[llvm] 7b5a0e2 - [RISCV] Move shift ComplexPatterns and custom isel to PatFrags with predicates

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 5 11:40:10 PST 2021


Author: Craig Topper
Date: 2021-01-05T11:37:48-08:00
New Revision: 7b5a0e2f88eedc1123f4027552940bdf1ab6c03e

URL: https://github.com/llvm/llvm-project/commit/7b5a0e2f88eedc1123f4027552940bdf1ab6c03e
DIFF: https://github.com/llvm/llvm-project/commit/7b5a0e2f88eedc1123f4027552940bdf1ab6c03e.diff

LOG: [RISCV] Move shift ComplexPatterns and custom isel to PatFrags with predicates

ComplexPatterns are kind of weird, they don't call any of the predicates on their operands. And their "complexity" used for tablegen ordering purposes in the matcher table is hand specified.

This started as an attempt to just use sext_inreg + SLOIPat to implement SLOIW just to have one less Select function. The matching for the or+shl is the same as long as you know the immediate is less than 32 for SLOIW. But that didn't work out because using uimm5 with SLOIPat didn't do anything if it was a ComplexPattern.

I realized I could just use a PatFrag with the opcodes I wanted to match and an immediate predicate would then evaluate correctly. This also computes the complexity just like any other pattern does. Then I just needed to check the constraints on the immediates in the predicate. Conveniently the predicate is evaluated after the fragment has been matched. So the structure has already been checked, we just need to find the constants.

I'll note that this is unusual, I didn't find any other targets looking through operands in PatFrag predicate. There is a PredicateCodeUsesOperands feature that can be used to collect the operands into an array that is used by AMDGPU/VOP3Instructions.td. I believe that feature exists to handle commuted matching, but since the nodes here use constants, they aren't ever commuted

Differential Revision: https://reviews.llvm.org/D91901

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/lib/Target/RISCV/RISCVInstrInfoB.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 2a815863a81c..e332a3c90b42 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -49,17 +49,6 @@ static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
   return Result;
 }
 
-// Returns true if the Node is an ISD::AND with a constant argument. If so,
-// set Mask to that constant value.
-static bool isConstantMask(SDNode *Node, uint64_t &Mask) {
-  if (Node->getOpcode() == ISD::AND &&
-      Node->getOperand(1).getOpcode() == ISD::Constant) {
-    Mask = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
-    return true;
-  }
-  return false;
-}
-
 void RISCVDAGToDAGISel::Select(SDNode *Node) {
   // If we have a custom node, we have already selected.
   if (Node->isMachineOpcode()) {
@@ -121,27 +110,6 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
     return;
   }
-  case ISD::SRL: {
-    if (!Subtarget->is64Bit())
-      break;
-    SDNode *Op0 = Node->getOperand(0).getNode();
-    uint64_t Mask;
-    // Match (srl (and val, mask), imm) where the result would be a
-    // zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result
-    // is equivalent to this (SimplifyDemandedBits may have removed lower bits
-    // from the mask that aren't necessary due to the right-shifting).
-    if (isa<ConstantSDNode>(Node->getOperand(1)) && isConstantMask(Op0, Mask)) {
-      uint64_t ShAmt = Node->getConstantOperandVal(1);
-
-      if ((Mask | maskTrailingOnes<uint64_t>(ShAmt)) == 0xffffffff) {
-        SDValue ShAmtVal = CurDAG->getTargetConstant(ShAmt, DL, XLenVT);
-        CurDAG->SelectNodeTo(Node, RISCV::SRLIW, XLenVT, Op0->getOperand(0),
-                             ShAmtVal);
-        return;
-      }
-    }
-    break;
-  }
   case ISD::INTRINSIC_W_CHAIN: {
     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
     switch (IntNo) {
@@ -238,198 +206,147 @@ bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
   return false;
 }
 
-// Check that it is a SLOI (Shift Left Ones Immediate). We first check that
-// it is the right node tree:
+// Match (srl (and val, mask), imm) where the result would be a
+// zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result
+// is equivalent to this (SimplifyDemandedBits may have removed lower bits
+// from the mask that aren't necessary due to the right-shifting).
+bool RISCVDAGToDAGISel::MatchSRLIW(SDNode *N) const {
+  assert(N->getOpcode() == ISD::SRL);
+  assert(N->getOperand(0).getOpcode() == ISD::AND);
+  assert(isa<ConstantSDNode>(N->getOperand(1)));
+  assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
+
+  // The IsRV64 predicate is checked after PatFrag predicates so we can get
+  // here even on RV32.
+  if (!Subtarget->is64Bit())
+    return false;
+
+  SDValue And = N->getOperand(0);
+  uint64_t ShAmt = N->getConstantOperandVal(1);
+  uint64_t Mask = And.getConstantOperandVal(1);
+  return (Mask | maskTrailingOnes<uint64_t>(ShAmt)) == 0xffffffff;
+}
+
+// Check that it is a SLOI (Shift Left Ones Immediate). A PatFrag has already
+// determined it has the right structure:
 //
 //  (OR (SHL RS1, VC2), VC1)
 //
-// and then we check that VC1, the mask used to fill with ones, is compatible
+// Check that VC1, the mask used to fill with ones, is compatible
 // with VC2, the shamt:
 //
-//  VC1 == maskTrailingOnes<uint64_t>(VC2)
-
-bool RISCVDAGToDAGISel::SelectSLOI(SDValue N, SDValue &RS1, SDValue &Shamt) {
-  MVT XLenVT = Subtarget->getXLenVT();
-  if (N.getOpcode() == ISD::OR) {
-    SDValue Or = N;
-    if (Or.getOperand(0).getOpcode() == ISD::SHL) {
-      SDValue Shl = Or.getOperand(0);
-      if (isa<ConstantSDNode>(Shl.getOperand(1)) &&
-          isa<ConstantSDNode>(Or.getOperand(1))) {
-        if (XLenVT == MVT::i64) {
-          uint64_t VC1 = Or.getConstantOperandVal(1);
-          uint64_t VC2 = Shl.getConstantOperandVal(1);
-          if (VC1 == maskTrailingOnes<uint64_t>(VC2)) {
-            RS1 = Shl.getOperand(0);
-            Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
-                           Shl.getOperand(1).getValueType());
-            return true;
-          }
-        }
-        if (XLenVT == MVT::i32) {
-          uint32_t VC1 = Or.getConstantOperandVal(1);
-          uint32_t VC2 = Shl.getConstantOperandVal(1);
-          if (VC1 == maskTrailingOnes<uint32_t>(VC2)) {
-            RS1 = Shl.getOperand(0);
-            Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
-                           Shl.getOperand(1).getValueType());
-            return true;
-          }
-        }
-      }
-    }
+//  VC1 == maskTrailingOnes(VC2)
+//
+bool RISCVDAGToDAGISel::MatchSLOI(SDNode *N) const {
+  assert(N->getOpcode() == ISD::OR);
+  assert(N->getOperand(0).getOpcode() == ISD::SHL);
+  assert(isa<ConstantSDNode>(N->getOperand(1)));
+  assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
+
+  SDValue Shl = N->getOperand(0);
+  if (Subtarget->is64Bit()) {
+    uint64_t VC1 = N->getConstantOperandVal(1);
+    uint64_t VC2 = Shl.getConstantOperandVal(1);
+    return VC1 == maskTrailingOnes<uint64_t>(VC2);
   }
-  return false;
+
+  uint32_t VC1 = N->getConstantOperandVal(1);
+  uint32_t VC2 = Shl.getConstantOperandVal(1);
+  return VC1 == maskTrailingOnes<uint32_t>(VC2);
 }
 
-// Check that it is a SROI (Shift Right Ones Immediate). We first check that
-// it is the right node tree:
+// Check that it is a SROI (Shift Right Ones Immediate). A PatFrag has already
+// determined it has the right structure:
 //
 //  (OR (SRL RS1, VC2), VC1)
 //
-// and then we check that VC1, the mask used to fill with ones, is compatible
+// Check that VC1, the mask used to fill with ones, is compatible
 // with VC2, the shamt:
 //
-//  VC1 == maskLeadingOnes<uint64_t>(VC2)
-
-bool RISCVDAGToDAGISel::SelectSROI(SDValue N, SDValue &RS1, SDValue &Shamt) {
-  MVT XLenVT = Subtarget->getXLenVT();
-  if (N.getOpcode() == ISD::OR) {
-    SDValue Or = N;
-    if (Or.getOperand(0).getOpcode() == ISD::SRL) {
-      SDValue Srl = Or.getOperand(0);
-      if (isa<ConstantSDNode>(Srl.getOperand(1)) &&
-          isa<ConstantSDNode>(Or.getOperand(1))) {
-        if (XLenVT == MVT::i64) {
-          uint64_t VC1 = Or.getConstantOperandVal(1);
-          uint64_t VC2 = Srl.getConstantOperandVal(1);
-          if (VC1 == maskLeadingOnes<uint64_t>(VC2)) {
-            RS1 = Srl.getOperand(0);
-            Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
-                           Srl.getOperand(1).getValueType());
-            return true;
-          }
-        }
-        if (XLenVT == MVT::i32) {
-          uint32_t VC1 = Or.getConstantOperandVal(1);
-          uint32_t VC2 = Srl.getConstantOperandVal(1);
-          if (VC1 == maskLeadingOnes<uint32_t>(VC2)) {
-            RS1 = Srl.getOperand(0);
-            Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
-                           Srl.getOperand(1).getValueType());
-            return true;
-          }
-        }
-      }
-    }
-  }
-  return false;
-}
-
-// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
-// on RV64).
-// SLLIUW is the same as SLLI except for the fact that it clears the bits
-// XLEN-1:32 of the input RS1 before shifting.
-// We first check that it is the right node tree:
-//
-//  (AND (SHL RS1, VC2), VC1)
-//
-// We check that VC2, the shamt is less than 32, otherwise the pattern is
-// exactly the same as SLLI and we give priority to that.
-// Eventually we check that that VC1, the mask used to clear the upper 32 bits
-// of RS1, is correct:
+//  VC1 == maskLeadingOnes(VC2)
 //
-//  VC1 == (0xFFFFFFFF << VC2)
-
-bool RISCVDAGToDAGISel::SelectSLLIUW(SDValue N, SDValue &RS1, SDValue &Shamt) {
-  if (N.getOpcode() == ISD::AND && Subtarget->getXLenVT() == MVT::i64) {
-    SDValue And = N;
-    if (And.getOperand(0).getOpcode() == ISD::SHL) {
-      SDValue Shl = And.getOperand(0);
-      if (isa<ConstantSDNode>(Shl.getOperand(1)) &&
-          isa<ConstantSDNode>(And.getOperand(1))) {
-        uint64_t VC1 = And.getConstantOperandVal(1);
-        uint64_t VC2 = Shl.getConstantOperandVal(1);
-        if (VC2 < 32 && VC1 == ((uint64_t)0xFFFFFFFF << VC2)) {
-          RS1 = Shl.getOperand(0);
-          Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
-                                            Shl.getOperand(1).getValueType());
-          return true;
-        }
-      }
-    }
+bool RISCVDAGToDAGISel::MatchSROI(SDNode *N) const {
+  assert(N->getOpcode() == ISD::OR);
+  assert(N->getOperand(0).getOpcode() == ISD::SRL);
+  assert(isa<ConstantSDNode>(N->getOperand(1)));
+  assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
+
+  SDValue Srl = N->getOperand(0);
+  if (Subtarget->is64Bit()) {
+    uint64_t VC1 = N->getConstantOperandVal(1);
+    uint64_t VC2 = Srl.getConstantOperandVal(1);
+    return VC1 == maskLeadingOnes<uint64_t>(VC2);
   }
-  return false;
+
+  uint32_t VC1 = N->getConstantOperandVal(1);
+  uint32_t VC2 = Srl.getConstantOperandVal(1);
+  return VC1 == maskLeadingOnes<uint32_t>(VC2);
 }
 
-// Check that it is a SLOIW (Shift Left Ones Immediate i32 on RV64).
-// We first check that it is the right node tree:
+// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64). A PatFrag
+// has already determined it has the right structure:
 //
-//  (SIGN_EXTEND_INREG (OR (SHL RS1, VC2), VC1))
+//  (OR (SRL RS1, VC2), VC1)
 //
 // and then we check that VC1, the mask used to fill with ones, is compatible
 // with VC2, the shamt:
 //
 //  VC2 < 32
-//  VC1 == maskTrailingOnes<uint64_t>(VC2)
-
-bool RISCVDAGToDAGISel::SelectSLOIW(SDValue N, SDValue &RS1, SDValue &Shamt) {
-  assert(Subtarget->is64Bit() && "SLOIW should only be matched on RV64");
-  if (N.getOpcode() != ISD::SIGN_EXTEND_INREG ||
-      cast<VTSDNode>(N.getOperand(1))->getVT() != MVT::i32)
+//  VC1 == maskTrailingZeros<uint64_t>(32 - VC2)
+//
+bool RISCVDAGToDAGISel::MatchSROIW(SDNode *N) const {
+  assert(N->getOpcode() == ISD::OR);
+  assert(N->getOperand(0).getOpcode() == ISD::SRL);
+  assert(isa<ConstantSDNode>(N->getOperand(1)));
+  assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
+
+  // The IsRV64 predicate is checked after PatFrag predicates so we can get
+  // here even on RV32.
+  if (!Subtarget->is64Bit())
     return false;
 
-   SDValue Or = N.getOperand(0);
-
-   if (Or.getOpcode() != ISD::OR || !isa<ConstantSDNode>(Or.getOperand(1)))
-     return false;
-
-   SDValue Shl = Or.getOperand(0);
-   if (Shl.getOpcode() != ISD::SHL || !isa<ConstantSDNode>(Shl.getOperand(1)))
-     return false;
-
-   uint64_t VC1 = Or.getConstantOperandVal(1);
-   uint64_t VC2 = Shl.getConstantOperandVal(1);
-
-   if (VC2 >= 32 || VC1 != maskTrailingOnes<uint64_t>(VC2))
-     return false;
+  SDValue Srl = N->getOperand(0);
+  uint64_t VC1 = N->getConstantOperandVal(1);
+  uint64_t VC2 = Srl.getConstantOperandVal(1);
 
-  RS1 = Shl.getOperand(0);
-  Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
-                                    Shl.getOperand(1).getValueType());
-  return true;
+  // Immediate range should be enforced by uimm5 predicate.
+  assert(VC2 < 32 && "Unexpected immediate");
+  return VC1 == maskTrailingZeros<uint64_t>(32 - VC2);
 }
 
-// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64).
-// We first check that it is the right node tree:
+// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
+// on RV64).
+// SLLIUW is the same as SLLI except for the fact that it clears the bits
+// XLEN-1:32 of the input RS1 before shifting.
+// A PatFrag has already checked that it has the right structure:
 //
-//  (OR (SRL RS1, VC2), VC1)
+//  (AND (SHL RS1, VC2), VC1)
 //
-// and then we check that VC1, the mask used to fill with ones, is compatible
-// with VC2, the shamt:
+// We check that VC2, the shamt is less than 32, otherwise the pattern is
+// exactly the same as SLLI and we give priority to that.
+// Eventually we check that VC1, the mask used to clear the upper 32 bits
+// of RS1, is correct:
 //
-//  VC2 < 32
-//  VC1 == maskTrailingZeros<uint64_t>(32 - VC2)
+//  VC1 == (0xFFFFFFFF << VC2)
 //
-bool RISCVDAGToDAGISel::SelectSROIW(SDValue N, SDValue &RS1, SDValue &Shamt) {
-  assert(Subtarget->is64Bit() && "SROIW should only be matched on RV64");
-  if (N.getOpcode() != ISD::OR || !isa<ConstantSDNode>(N.getOperand(1)))
-    return false;
-
-  SDValue Srl = N.getOperand(0);
-  if (Srl.getOpcode() != ISD::SRL || !isa<ConstantSDNode>(Srl.getOperand(1)))
+bool RISCVDAGToDAGISel::MatchSLLIUW(SDNode *N) const {
+  assert(N->getOpcode() == ISD::AND);
+  assert(N->getOperand(0).getOpcode() == ISD::SHL);
+  assert(isa<ConstantSDNode>(N->getOperand(1)));
+  assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
+
+  // The IsRV64 predicate is checked after PatFrag predicates so we can get
+  // here even on RV32.
+  if (!Subtarget->is64Bit())
     return false;
 
-  uint64_t VC1 = N.getConstantOperandVal(1);
-  uint64_t VC2 = Srl.getConstantOperandVal(1);
+  SDValue Shl = N->getOperand(0);
+  uint64_t VC1 = N->getConstantOperandVal(1);
+  uint64_t VC2 = Shl.getConstantOperandVal(1);
 
-  if (VC2 >= 32 || VC1 != maskTrailingZeros<uint64_t>(32 - VC2))
-    return false;
-
-  RS1 = Srl.getOperand(0);
-  Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
-                                    Srl.getOperand(1).getValueType());
-  return true;
+  // Immediate range should be enforced by uimm5 predicate.
+  assert(VC2 < 32 && "Unexpected immediate");
+  return VC1 == ((uint64_t)0xFFFFFFFF << VC2);
 }
 
 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 34c8f1c824f5..43efb6d26b96 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -45,11 +45,12 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
 
   bool SelectAddrFI(SDValue Addr, SDValue &Base);
 
-  bool SelectSLOI(SDValue N, SDValue &RS1, SDValue &Shamt);
-  bool SelectSROI(SDValue N, SDValue &RS1, SDValue &Shamt);
-  bool SelectSLLIUW(SDValue N, SDValue &RS1, SDValue &Shamt);
-  bool SelectSLOIW(SDValue N, SDValue &RS1, SDValue &Shamt);
-  bool SelectSROIW(SDValue N, SDValue &RS1, SDValue &Shamt);
+  bool MatchSRLIW(SDNode *N) const;
+  bool MatchSLOI(SDNode *N) const;
+  bool MatchSROI(SDNode *N) const;
+  bool MatchSROIW(SDNode *N) const;
+  bool MatchSLLIUW(SDNode *N) const;
+
   bool selectVSplat(SDValue N, SDValue &SplatVal);
   bool selectVSplatSimm5(SDValue N, SDValue &SplatVal);
   bool selectVSplatUimm5(SDValue N, SDValue &SplatVal);

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 98f8935d8a05..4aee8ae39cc2 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -850,6 +850,11 @@ def zexti32 : PatFrags<(ops node:$src),
                        [(and node:$src, 0xffffffff),
                         (assertzexti32 node:$src)]>;
 
+def SRLIWPat : PatFrag<(ops node:$A, node:$B),
+                       (srl (and node:$A, imm), node:$B), [{
+  return MatchSRLIW(N);
+}]>;
+
 /// Immediates
 
 def : Pat<(simm12:$imm), (ADDI X0, simm12:$imm)>;
@@ -1168,8 +1173,8 @@ def : Pat<(sext_inreg (sub GPR:$rs1, GPR:$rs2), i32),
           (SUBW GPR:$rs1, GPR:$rs2)>;
 def : Pat<(sext_inreg (shl GPR:$rs1, uimm5:$shamt), i32),
           (SLLIW GPR:$rs1, uimm5:$shamt)>;
-// (srl (zexti32 ...), uimm5:$shamt) is matched with custom code due to the
-// need to undo manipulation of the mask value performed by DAGCombine.
+def : Pat<(SRLIWPat GPR:$rs1, uimm5:$shamt),
+          (SRLIW GPR:$rs1, uimm5:$shamt)>;
 def : Pat<(srl (shl GPR:$rs1, (i64 32)), uimm6gt32:$shamt),
           (SRLIW GPR:$rs1, (ImmSub32 uimm6gt32:$shamt))>;
 def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt),

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
index 40dbe4e8f9a8..ce6cb6ba82ce 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
@@ -59,6 +59,31 @@ def ImmROTL2RW : SDNodeXForm<imm, [{
                                    N->getValueType(0));
 }]>;
 
+// Check that it is a SLOI (Shift Left Ones Immediate).
+def SLOIPat : PatFrag<(ops node:$A, node:$B),
+                      (or (shl node:$A, node:$B), imm), [{
+  return MatchSLOI(N);
+}]>;
+
+// Check that it is a SROI (Shift Right Ones Immediate).
+def SROIPat : PatFrag<(ops node:$A, node:$B),
+                      (or (srl node:$A, node:$B), imm), [{
+  return MatchSROI(N);
+}]>;
+
+// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64).
+def SROIWPat : PatFrag<(ops node:$A, node:$B),
+                       (or (srl node:$A, node:$B), imm), [{
+  return MatchSROIW(N);
+}]>;
+
+// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
+// on RV64).
+def SLLIUWPat : PatFrag<(ops node:$A, node:$B),
+                        (and (shl node:$A, node:$B), imm), [{
+  return MatchSLLIUW(N);
+}]>;
+
 // Checks if this mask has a single 0 bit and cannot be used with ANDI.
 def SBCLRMask : ImmLeaf<XLenVT, [{
   if (Subtarget->is64Bit())
@@ -691,11 +716,6 @@ def : CompressPat<(PACK GPRC:$rs1, GPRC:$rs1, X0),
 //===----------------------------------------------------------------------===//
 // Codegen patterns
 //===----------------------------------------------------------------------===//
-def SLOIPat   : ComplexPattern<XLenVT, 2, "SelectSLOI", [or]>;
-def SROIPat   : ComplexPattern<XLenVT, 2, "SelectSROI", [or]>;
-def SLLIUWPat : ComplexPattern<i64, 2, "SelectSLLIUW", [and]>;
-def SLOIWPat  : ComplexPattern<i64, 2, "SelectSLOIW", [sext_inreg]>;
-def SROIWPat  : ComplexPattern<i64, 2, "SelectSROIW", [or]>;
 
 let Predicates = [HasStdExtZbbOrZbp] in {
 def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>;
@@ -900,8 +920,8 @@ def : Pat<(or (or (and (shl GPR:$rs1, (i64 1)), (i64 0x4444444444444444)),
 let Predicates = [HasStdExtZbb, IsRV64] in {
 def : Pat<(and (add GPR:$rs, simm12:$simm12), (i64 0xFFFFFFFF)),
           (ADDIWU GPR:$rs, simm12:$simm12)>;
-def : Pat<(SLLIUWPat GPR:$rs1, uimmlog2xlen:$shamt),
-          (SLLIUW GPR:$rs1, uimmlog2xlen:$shamt)>;
+def : Pat<(SLLIUWPat GPR:$rs1, uimm5:$shamt),
+          (SLLIUW GPR:$rs1, uimm5:$shamt)>;
 def : Pat<(and (add GPR:$rs1, GPR:$rs2), (i64 0xFFFFFFFF)),
           (ADDWU GPR:$rs1, GPR:$rs2)>;
 def : Pat<(and (sub GPR:$rs1, GPR:$rs2), (i64 0xFFFFFFFF)),
@@ -956,10 +976,10 @@ def : Pat<(xor (assertsexti32 GPR:$rs1), SBSETINVWMask:$mask),
 } // Predicates = [HasStdExtZbs, IsRV64]
 
 let Predicates = [HasStdExtZbb, IsRV64] in {
-def : Pat<(SLOIWPat GPR:$rs1, uimmlog2xlen:$shamt),
-          (SLOIW GPR:$rs1, uimmlog2xlen:$shamt)>;
-def : Pat<(SROIWPat GPR:$rs1, uimmlog2xlen:$shamt),
-          (SROIW GPR:$rs1, uimmlog2xlen:$shamt)>;
+def : Pat<(sext_inreg (SLOIPat GPR:$rs1, uimm5:$shamt), i32),
+          (SLOIW GPR:$rs1, uimm5:$shamt)>;
+def : Pat<(SROIWPat GPR:$rs1, uimm5:$shamt),
+          (SROIW GPR:$rs1, uimm5:$shamt)>;
 } // Predicates = [HasStdExtZbb, IsRV64]
 
 let Predicates = [HasStdExtZbp, IsRV64] in {


        


More information about the llvm-commits mailing list