[llvm-branch-commits] [llvm] bfc4f29 - [RISCV] Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2).

Craig Topper via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Mon Nov 30 08:48:12 PST 2020


Author: Craig Topper
Date: 2020-11-30T08:42:46-08:00
New Revision: bfc4f29f46b4961c0532cf734f5d15015b4f7120

URL: https://github.com/llvm/llvm-project/commit/bfc4f29f46b4961c0532cf734f5d15015b4f7120
DIFF: https://github.com/llvm/llvm-project/commit/bfc4f29f46b4961c0532cf734f5d15015b4f7120.diff

LOG: [RISCV] Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2).

Unlike GREVI, GORCI stages can't be undone, but they are
redundant if done more than once.

Differential Revision: https://reviews.llvm.org/D92295

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rv32Zbp.ll
    llvm/test/CodeGen/RISCV/rv64Zbp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f1c25512000e..49c81284eba7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1303,27 +1303,33 @@ static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
   return SDValue();
 }
 
-static SDValue combineGREVI(SDNode *N, SelectionDAG &DAG,
-                            const RISCVSubtarget &Subtarget) {
-  // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
-  // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
-  uint64_t ShAmt1 = N->getConstantOperandVal(1);
-  SDValue GREVSrc = N->getOperand(0);
-
-  if (GREVSrc->getOpcode() != N->getOpcode())
+// Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
+// non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
+// Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
+// not undo itself, but they are redundant.
+static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
+  unsigned ShAmt1 = N->getConstantOperandVal(1);
+  SDValue Src = N->getOperand(0);
+
+  if (Src.getOpcode() != N->getOpcode())
     return SDValue();
 
-  uint64_t ShAmt2 = GREVSrc.getConstantOperandVal(1);
-  GREVSrc = GREVSrc->getOperand(0);
+  unsigned ShAmt2 = Src.getConstantOperandVal(1);
+  Src = Src.getOperand(0);
+
+  unsigned CombinedShAmt;
+  if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW)
+    CombinedShAmt = ShAmt1 | ShAmt2;
+  else
+    CombinedShAmt = ShAmt1 ^ ShAmt2;
 
-  uint64_t CombinedShAmt = ShAmt1 ^ ShAmt2;
   if (CombinedShAmt == 0)
-    return GREVSrc;
+    return Src;
 
   SDLoc DL(N);
-  return DAG.getNode(
-      N->getOpcode(), DL, N->getValueType(0), GREVSrc,
-      DAG.getTargetConstant(CombinedShAmt, DL, Subtarget.getXLenVT()));
+  return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src,
+                     DAG.getTargetConstant(CombinedShAmt, DL,
+                                           N->getOperand(1).getValueType()));
 }
 
 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
@@ -1422,11 +1428,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
       return SDValue(N, 0);
     }
 
-    if (N->getOpcode() == RISCVISD::GREVIW)
-      if (SDValue V = combineGREVI(N, DCI.DAG, Subtarget))
-        return V;
-
-    break;
+    return combineGREVI_GORCI(N, DCI.DAG);
   }
   case RISCVISD::FMV_X_ANYEXTW_RV64: {
     SDLoc DL(N);
@@ -1459,7 +1461,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
                        DAG.getConstant(~SignBit, DL, MVT::i64));
   }
   case RISCVISD::GREVI:
-    return combineGREVI(N, DCI.DAG, Subtarget);
+  case RISCVISD::GORCI:
+    return combineGREVI_GORCI(N, DCI.DAG);
   case ISD::OR:
     if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget))
       return GREV;

diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbp.ll b/llvm/test/CodeGen/RISCV/rv32Zbp.ll
index f1b8629f021d..6115f2217fa6 100644
--- a/llvm/test/CodeGen/RISCV/rv32Zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv32Zbp.ll
@@ -154,6 +154,118 @@ define i64 @gorc2_i64(i64 %a) nounwind {
   ret i64 %or2
 }
 
+define i32 @gorc3_i32(i32 %a) nounwind {
+; RV32I-LABEL: gorc3_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    lui a2, 699051
+; RV32I-NEXT:    addi a2, a2, -1366
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    lui a3, 349525
+; RV32I-NEXT:    addi a3, a3, 1365
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    lui a2, 838861
+; RV32I-NEXT:    addi a2, a2, -820
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 2
+; RV32I-NEXT:    lui a3, 209715
+; RV32I-NEXT:    addi a3, a3, 819
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc3_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    orc.n a0, a0
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc3_i32:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    orc.n a0, a0
+; RV32IBP-NEXT:    ret
+  %and1 = shl i32 %a, 1
+  %shl1 = and i32 %and1, -1431655766
+  %and1b = lshr i32 %a, 1
+  %shr1 = and i32 %and1b, 1431655765
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 2
+  %shl2 = and i32 %and2, -858993460
+  %and2b = lshr i32 %or1b, 2
+  %shr2 = and i32 %and2b, 858993459
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  ret i32 %or2b
+}
+
+define i64 @gorc3_i64(i64 %a) nounwind {
+; RV32I-LABEL: gorc3_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a1, 1
+; RV32I-NEXT:    slli a3, a0, 1
+; RV32I-NEXT:    lui a4, 699051
+; RV32I-NEXT:    addi a4, a4, -1366
+; RV32I-NEXT:    and a6, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a0, 1
+; RV32I-NEXT:    srli a5, a1, 1
+; RV32I-NEXT:    lui a3, 349525
+; RV32I-NEXT:    addi a3, a3, 1365
+; RV32I-NEXT:    and a5, a5, a3
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    or a0, a3, a0
+; RV32I-NEXT:    or a1, a5, a1
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    or a0, a0, a6
+; RV32I-NEXT:    slli a2, a0, 2
+; RV32I-NEXT:    slli a3, a1, 2
+; RV32I-NEXT:    lui a4, 838861
+; RV32I-NEXT:    addi a4, a4, -820
+; RV32I-NEXT:    and a6, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a1, 2
+; RV32I-NEXT:    srli a5, a0, 2
+; RV32I-NEXT:    lui a3, 209715
+; RV32I-NEXT:    addi a3, a3, 819
+; RV32I-NEXT:    and a5, a5, a3
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    or a1, a3, a1
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc3_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    orc.n a0, a0
+; RV32IB-NEXT:    orc.n a1, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc3_i64:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    orc.n a0, a0
+; RV32IBP-NEXT:    orc.n a1, a1
+; RV32IBP-NEXT:    ret
+  %and1 = shl i64 %a, 1
+  %shl1 = and i64 %and1, -6148914691236517206
+  %and1b = lshr i64 %a, 1
+  %shr1 = and i64 %and1b, 6148914691236517205
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 2
+  %shl2 = and i64 %and2, -3689348814741910324
+  %and2b = lshr i64 %or1b, 2
+  %shr2 = and i64 %and2b, 3689348814741910323
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  ret i64 %or2b
+}
+
 define i32 @gorc4_i32(i32 %a) nounwind {
 ; RV32I-LABEL: gorc4_i32:
 ; RV32I:       # %bb.0:
@@ -169,27 +281,389 @@ define i32 @gorc4_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
-; RV32IB-LABEL: gorc4_i32:
+; RV32IB-LABEL: gorc4_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    orc4.b a0, a0
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc4_i32:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    orc4.b a0, a0
+; RV32IBP-NEXT:    ret
+  %and = shl i32 %a, 4
+  %shl = and i32 %and, -252645136
+  %and1 = lshr i32 %a, 4
+  %shr = and i32 %and1, 252645135
+  %or = or i32 %shr, %a
+  %or2 = or i32 %or, %shl
+  ret i32 %or2
+}
+
+define i64 @gorc4_i64(i64 %a) nounwind {
+; RV32I-LABEL: gorc4_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a0, 4
+; RV32I-NEXT:    slli a3, a1, 4
+; RV32I-NEXT:    lui a4, 986895
+; RV32I-NEXT:    addi a4, a4, 240
+; RV32I-NEXT:    and a6, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a1, 4
+; RV32I-NEXT:    srli a5, a0, 4
+; RV32I-NEXT:    lui a3, 61681
+; RV32I-NEXT:    addi a3, a3, -241
+; RV32I-NEXT:    and a5, a5, a3
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    or a1, a3, a1
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc4_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    orc4.b a0, a0
+; RV32IB-NEXT:    orc4.b a1, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc4_i64:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    orc4.b a0, a0
+; RV32IBP-NEXT:    orc4.b a1, a1
+; RV32IBP-NEXT:    ret
+  %and = shl i64 %a, 4
+  %shl = and i64 %and, -1085102592571150096
+  %and1 = lshr i64 %a, 4
+  %shr = and i64 %and1, 1085102592571150095
+  %or = or i64 %shr, %a
+  %or2 = or i64 %or, %shl
+  ret i64 %or2
+}
+
+define i32 @gorc5_i32(i32 %a) nounwind {
+; RV32I-LABEL: gorc5_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    lui a2, 699051
+; RV32I-NEXT:    addi a2, a2, -1366
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    lui a3, 349525
+; RV32I-NEXT:    addi a3, a3, 1365
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 4
+; RV32I-NEXT:    lui a2, 986895
+; RV32I-NEXT:    addi a2, a2, 240
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 4
+; RV32I-NEXT:    lui a3, 61681
+; RV32I-NEXT:    addi a3, a3, -241
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc5_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    gorci a0, a0, 5
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc5_i32:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    gorci a0, a0, 5
+; RV32IBP-NEXT:    ret
+  %and1 = shl i32 %a, 1
+  %shl1 = and i32 %and1, -1431655766
+  %and1b = lshr i32 %a, 1
+  %shr1 = and i32 %and1b, 1431655765
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 4
+  %shl2 = and i32 %and2, -252645136
+  %and2b = lshr i32 %or1b, 4
+  %shr2 = and i32 %and2b, 252645135
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  ret i32 %or2b
+}
+
+define i64 @gorc5_i64(i64 %a) nounwind {
+; RV32I-LABEL: gorc5_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a1, 1
+; RV32I-NEXT:    slli a3, a0, 1
+; RV32I-NEXT:    lui a4, 699051
+; RV32I-NEXT:    addi a4, a4, -1366
+; RV32I-NEXT:    and a6, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a0, 1
+; RV32I-NEXT:    srli a5, a1, 1
+; RV32I-NEXT:    lui a3, 349525
+; RV32I-NEXT:    addi a3, a3, 1365
+; RV32I-NEXT:    and a5, a5, a3
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    or a0, a3, a0
+; RV32I-NEXT:    or a1, a5, a1
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    or a0, a0, a6
+; RV32I-NEXT:    slli a2, a0, 4
+; RV32I-NEXT:    slli a3, a1, 4
+; RV32I-NEXT:    lui a4, 986895
+; RV32I-NEXT:    addi a4, a4, 240
+; RV32I-NEXT:    and a6, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a1, 4
+; RV32I-NEXT:    srli a5, a0, 4
+; RV32I-NEXT:    lui a3, 61681
+; RV32I-NEXT:    addi a3, a3, -241
+; RV32I-NEXT:    and a5, a5, a3
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    or a1, a3, a1
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc5_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    gorci a0, a0, 5
+; RV32IB-NEXT:    gorci a1, a1, 5
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc5_i64:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    gorci a0, a0, 5
+; RV32IBP-NEXT:    gorci a1, a1, 5
+; RV32IBP-NEXT:    ret
+  %and1 = shl i64 %a, 1
+  %shl1 = and i64 %and1, -6148914691236517206
+  %and1b = lshr i64 %a, 1
+  %shr1 = and i64 %and1b, 6148914691236517205
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 4
+  %shl2 = and i64 %and2, -1085102592571150096
+  %and2b = lshr i64 %or1b, 4
+  %shr2 = and i64 %and2b, 1085102592571150095
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  ret i64 %or2b
+}
+
+define i32 @gorc6_i32(i32 %a) nounwind {
+; RV32I-LABEL: gorc6_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    lui a2, 838861
+; RV32I-NEXT:    addi a2, a2, -820
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 2
+; RV32I-NEXT:    lui a3, 209715
+; RV32I-NEXT:    addi a3, a3, 819
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 4
+; RV32I-NEXT:    lui a2, 986895
+; RV32I-NEXT:    addi a2, a2, 240
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 4
+; RV32I-NEXT:    lui a3, 61681
+; RV32I-NEXT:    addi a3, a3, -241
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc6_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    orc2.b a0, a0
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc6_i32:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    orc2.b a0, a0
+; RV32IBP-NEXT:    ret
+  %and1 = shl i32 %a, 2
+  %shl1 = and i32 %and1, -858993460
+  %and1b = lshr i32 %a, 2
+  %shr1 = and i32 %and1b, 858993459
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 4
+  %shl2 = and i32 %and2, -252645136
+  %and2b = lshr i32 %or1b, 4
+  %shr2 = and i32 %and2b, 252645135
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  ret i32 %or2b
+}
+
+define i64 @gorc6_i64(i64 %a) nounwind {
+; RV32I-LABEL: gorc6_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a1, 2
+; RV32I-NEXT:    slli a3, a0, 2
+; RV32I-NEXT:    lui a4, 838861
+; RV32I-NEXT:    addi a4, a4, -820
+; RV32I-NEXT:    and a6, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a0, 2
+; RV32I-NEXT:    srli a5, a1, 2
+; RV32I-NEXT:    lui a3, 209715
+; RV32I-NEXT:    addi a3, a3, 819
+; RV32I-NEXT:    and a5, a5, a3
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    or a0, a3, a0
+; RV32I-NEXT:    or a1, a5, a1
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    or a0, a0, a6
+; RV32I-NEXT:    slli a2, a0, 4
+; RV32I-NEXT:    slli a3, a1, 4
+; RV32I-NEXT:    lui a4, 986895
+; RV32I-NEXT:    addi a4, a4, 240
+; RV32I-NEXT:    and a6, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a1, 4
+; RV32I-NEXT:    srli a5, a0, 4
+; RV32I-NEXT:    lui a3, 61681
+; RV32I-NEXT:    addi a3, a3, -241
+; RV32I-NEXT:    and a5, a5, a3
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    or a1, a3, a1
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc6_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    orc2.b a0, a0
+; RV32IB-NEXT:    orc2.b a1, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc6_i64:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    orc2.b a0, a0
+; RV32IBP-NEXT:    orc2.b a1, a1
+; RV32IBP-NEXT:    ret
+  %and1 = shl i64 %a, 2
+  %shl1 = and i64 %and1, -3689348814741910324
+  %and1b = lshr i64 %a, 2
+  %shr1 = and i64 %and1b, 3689348814741910323
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 4
+  %shl2 = and i64 %and2, -1085102592571150096
+  %and2b = lshr i64 %or1b, 4
+  %shr2 = and i64 %and2b, 1085102592571150095
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  ret i64 %or2b
+}
+
+define i32 @gorc7_i32(i32 %a) nounwind {
+; RV32I-LABEL: gorc7_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    lui a2, 699051
+; RV32I-NEXT:    addi a2, a2, -1366
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    lui a3, 349525
+; RV32I-NEXT:    addi a3, a3, 1365
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    lui a2, 838861
+; RV32I-NEXT:    addi a2, a2, -820
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 2
+; RV32I-NEXT:    lui a3, 209715
+; RV32I-NEXT:    addi a3, a3, 819
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 4
+; RV32I-NEXT:    lui a2, 986895
+; RV32I-NEXT:    addi a2, a2, 240
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 4
+; RV32I-NEXT:    lui a3, 61681
+; RV32I-NEXT:    addi a3, a3, -241
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc7_i32:
 ; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    orc4.b a0, a0
+; RV32IB-NEXT:    orc.b a0, a0
 ; RV32IB-NEXT:    ret
 ;
-; RV32IBP-LABEL: gorc4_i32:
+; RV32IBP-LABEL: gorc7_i32:
 ; RV32IBP:       # %bb.0:
-; RV32IBP-NEXT:    orc4.b a0, a0
+; RV32IBP-NEXT:    orc.b a0, a0
 ; RV32IBP-NEXT:    ret
-  %and = shl i32 %a, 4
-  %shl = and i32 %and, -252645136
-  %and1 = lshr i32 %a, 4
-  %shr = and i32 %and1, 252645135
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
+  %and1 = shl i32 %a, 1
+  %shl1 = and i32 %and1, -1431655766
+  %and1b = lshr i32 %a, 1
+  %shr1 = and i32 %and1b, 1431655765
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 2
+  %shl2 = and i32 %and2, -858993460
+  %and2b = lshr i32 %or1b, 2
+  %shr2 = and i32 %and2b, 858993459
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  %and3 = shl i32 %or2b, 4
+  %shl3 = and i32 %and3, -252645136
+  %and3b = lshr i32 %or2b, 4
+  %shr3 = and i32 %and3b, 252645135
+  %or3 = or i32 %shr3, %or2b
+  %or3b = or i32 %or3, %shl3
+  ret i32 %or3b
 }
 
-define i64 @gorc4_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc4_i64:
+define i64 @gorc7_i64(i64 %a) nounwind {
+; RV32I-LABEL: gorc7_i64:
 ; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a0, 1
+; RV32I-NEXT:    slli a3, a1, 1
+; RV32I-NEXT:    lui a4, 699051
+; RV32I-NEXT:    addi a4, a4, -1366
+; RV32I-NEXT:    and a6, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a1, 1
+; RV32I-NEXT:    srli a5, a0, 1
+; RV32I-NEXT:    lui a3, 349525
+; RV32I-NEXT:    addi a3, a3, 1365
+; RV32I-NEXT:    and a5, a5, a3
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    or a1, a3, a1
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    slli a2, a1, 2
+; RV32I-NEXT:    slli a3, a0, 2
+; RV32I-NEXT:    lui a4, 838861
+; RV32I-NEXT:    addi a4, a4, -820
+; RV32I-NEXT:    and a6, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a0, 2
+; RV32I-NEXT:    srli a5, a1, 2
+; RV32I-NEXT:    lui a3, 209715
+; RV32I-NEXT:    addi a3, a3, 819
+; RV32I-NEXT:    and a5, a5, a3
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    or a0, a3, a0
+; RV32I-NEXT:    or a1, a5, a1
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    or a0, a0, a6
 ; RV32I-NEXT:    slli a2, a0, 4
 ; RV32I-NEXT:    slli a3, a1, 4
 ; RV32I-NEXT:    lui a4, 986895
@@ -208,24 +682,36 @@ define i64 @gorc4_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    or a1, a1, a6
 ; RV32I-NEXT:    ret
 ;
-; RV32IB-LABEL: gorc4_i64:
+; RV32IB-LABEL: gorc7_i64:
 ; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    orc4.b a0, a0
-; RV32IB-NEXT:    orc4.b a1, a1
+; RV32IB-NEXT:    orc.b a0, a0
+; RV32IB-NEXT:    orc.b a1, a1
 ; RV32IB-NEXT:    ret
 ;
-; RV32IBP-LABEL: gorc4_i64:
+; RV32IBP-LABEL: gorc7_i64:
 ; RV32IBP:       # %bb.0:
-; RV32IBP-NEXT:    orc4.b a0, a0
-; RV32IBP-NEXT:    orc4.b a1, a1
+; RV32IBP-NEXT:    orc.b a0, a0
+; RV32IBP-NEXT:    orc.b a1, a1
 ; RV32IBP-NEXT:    ret
-  %and = shl i64 %a, 4
-  %shl = and i64 %and, -1085102592571150096
-  %and1 = lshr i64 %a, 4
-  %shr = and i64 %and1, 1085102592571150095
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
+  %and1 = shl i64 %a, 1
+  %shl1 = and i64 %and1, -6148914691236517206
+  %and1b = lshr i64 %a, 1
+  %shr1 = and i64 %and1b, 6148914691236517205
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 2
+  %shl2 = and i64 %and2, -3689348814741910324
+  %and2b = lshr i64 %or1b, 2
+  %shr2 = and i64 %and2b, 3689348814741910323
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  %and3 = shl i64 %or2b, 4
+  %shl3 = and i64 %and3, -1085102592571150096
+  %and3b = lshr i64 %or2b, 4
+  %shr3 = and i64 %and3b, 1085102592571150095
+  %or3 = or i64 %shr3, %or2b
+  %or3b = or i64 %or3, %shl3
+  ret i64 %or3b
 }
 
 define i32 @gorc8_i32(i32 %a) nounwind {
@@ -360,6 +846,256 @@ define i64 @gorc16_i64(i64 %a) nounwind {
   ret i64 %or2
 }
 
+; gorc2, gorc2 -> gorc2
+define i32 @gorc2b_i32(i32 %a) nounwind {
+; RV32I-LABEL: gorc2b_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    lui a2, 838861
+; RV32I-NEXT:    addi a2, a2, -820
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a3, a0, 2
+; RV32I-NEXT:    lui a4, 209715
+; RV32I-NEXT:    addi a4, a4, 819
+; RV32I-NEXT:    and a3, a3, a4
+; RV32I-NEXT:    or a0, a3, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 2
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc2b_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    orc2.n a0, a0
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc2b_i32:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    orc2.n a0, a0
+; RV32IBP-NEXT:    ret
+  %and1 = shl i32 %a, 2
+  %shl1 = and i32 %and1, -858993460
+  %and1b = lshr i32 %a, 2
+  %shr1 = and i32 %and1b, 858993459
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 2
+  %shl2 = and i32 %and2, -858993460
+  %and2b = lshr i32 %or1b, 2
+  %shr2 = and i32 %and2b, 858993459
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  ret i32 %or2b
+}
+
+; gorc2, gorc2 -> gorc2
+define i64 @gorc2b_i64(i64 %a) nounwind {
+; RV32I-LABEL: gorc2b_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a1, 2
+; RV32I-NEXT:    slli a3, a0, 2
+; RV32I-NEXT:    lui a4, 838861
+; RV32I-NEXT:    addi a4, a4, -820
+; RV32I-NEXT:    and a6, a3, a4
+; RV32I-NEXT:    and a7, a2, a4
+; RV32I-NEXT:    srli a5, a0, 2
+; RV32I-NEXT:    srli a3, a1, 2
+; RV32I-NEXT:    lui a2, 209715
+; RV32I-NEXT:    addi a2, a2, 819
+; RV32I-NEXT:    and a3, a3, a2
+; RV32I-NEXT:    and a5, a5, a2
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a1, a3, a1
+; RV32I-NEXT:    or a1, a1, a7
+; RV32I-NEXT:    or a0, a0, a6
+; RV32I-NEXT:    slli a3, a0, 2
+; RV32I-NEXT:    slli a5, a1, 2
+; RV32I-NEXT:    and a6, a5, a4
+; RV32I-NEXT:    and a3, a3, a4
+; RV32I-NEXT:    srli a4, a1, 2
+; RV32I-NEXT:    srli a5, a0, 2
+; RV32I-NEXT:    and a5, a5, a2
+; RV32I-NEXT:    and a2, a4, a2
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc2b_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    orc2.n a0, a0
+; RV32IB-NEXT:    orc2.n a1, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc2b_i64:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    orc2.n a0, a0
+; RV32IBP-NEXT:    orc2.n a1, a1
+; RV32IBP-NEXT:    ret
+  %and1 = shl i64 %a, 2
+  %shl1 = and i64 %and1, -3689348814741910324
+  %and1b = lshr i64 %a, 2
+  %shr1 = and i64 %and1b, 3689348814741910323
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 2
+  %shl2 = and i64 %and2, -3689348814741910324
+  %and2b = lshr i64 %or1b, 2
+  %shr2 = and i64 %and2b, 3689348814741910323
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  ret i64 %or2b
+}
+
+; gorc1, gorc2, gorc1 -> gorc2
+define i32 @gorc3b_i32(i32 %a) nounwind {
+; RV32I-LABEL: gorc3b_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    lui a2, 699051
+; RV32I-NEXT:    addi a2, a2, -1366
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a3, a0, 1
+; RV32I-NEXT:    lui a4, 349525
+; RV32I-NEXT:    addi a4, a4, 1365
+; RV32I-NEXT:    and a3, a3, a4
+; RV32I-NEXT:    or a0, a3, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    lui a3, 838861
+; RV32I-NEXT:    addi a3, a3, -820
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    srli a3, a0, 2
+; RV32I-NEXT:    lui a5, 209715
+; RV32I-NEXT:    addi a5, a5, 819
+; RV32I-NEXT:    and a3, a3, a5
+; RV32I-NEXT:    or a0, a3, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc3b_i32:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    orc.n a0, a0
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc3b_i32:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    orc.n a0, a0
+; RV32IBP-NEXT:    ret
+  %and1 = shl i32 %a, 1
+  %shl1 = and i32 %and1, -1431655766
+  %and1b = lshr i32 %a, 1
+  %shr1 = and i32 %and1b, 1431655765
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 2
+  %shl2 = and i32 %and2, -858993460
+  %and2b = lshr i32 %or1b, 2
+  %shr2 = and i32 %and2b, 858993459
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  %and3 = shl i32 %or2b, 1
+  %shl3 = and i32 %and3, -1431655766
+  %and3b = lshr i32 %or2b, 1
+  %shr3 = and i32 %and3b, 1431655765
+  %or3 = or i32 %shr3, %or2b
+  %or3b = or i32 %or3, %shl3
+  ret i32 %or3b
+}
+
+; gorc1, gorc2, gorc1 -> gorc2
+define i64 @gorc3b_i64(i64 %a) nounwind {
+; RV32I-LABEL: gorc3b_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a0, 1
+; RV32I-NEXT:    slli a3, a1, 1
+; RV32I-NEXT:    lui a4, 699051
+; RV32I-NEXT:    addi a4, a4, -1366
+; RV32I-NEXT:    and a6, a3, a4
+; RV32I-NEXT:    and a7, a2, a4
+; RV32I-NEXT:    srli a5, a1, 1
+; RV32I-NEXT:    srli a3, a0, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a3, a3, a2
+; RV32I-NEXT:    and a5, a5, a2
+; RV32I-NEXT:    or a1, a5, a1
+; RV32I-NEXT:    or a0, a3, a0
+; RV32I-NEXT:    or a0, a0, a7
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    slli a6, a1, 2
+; RV32I-NEXT:    slli a5, a0, 2
+; RV32I-NEXT:    lui a3, 838861
+; RV32I-NEXT:    addi a3, a3, -820
+; RV32I-NEXT:    and a7, a5, a3
+; RV32I-NEXT:    and a6, a6, a3
+; RV32I-NEXT:    srli t0, a0, 2
+; RV32I-NEXT:    srli a3, a1, 2
+; RV32I-NEXT:    lui a5, 209715
+; RV32I-NEXT:    addi a5, a5, 819
+; RV32I-NEXT:    and a3, a3, a5
+; RV32I-NEXT:    and a5, t0, a5
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a1, a3, a1
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    or a0, a0, a7
+; RV32I-NEXT:    slli a3, a0, 1
+; RV32I-NEXT:    slli a5, a1, 1
+; RV32I-NEXT:    and a6, a5, a4
+; RV32I-NEXT:    and a3, a3, a4
+; RV32I-NEXT:    srli a4, a1, 1
+; RV32I-NEXT:    srli a5, a0, 1
+; RV32I-NEXT:    and a5, a5, a2
+; RV32I-NEXT:    and a2, a4, a2
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a1, a1, a6
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: gorc3b_i64:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    orc.n a0, a0
+; RV32IB-NEXT:    orc.n a1, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: gorc3b_i64:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    orc.n a0, a0
+; RV32IBP-NEXT:    orc.n a1, a1
+; RV32IBP-NEXT:    ret
+  %and1 = shl i64 %a, 1
+  %shl1 = and i64 %and1, -6148914691236517206
+  %and1b = lshr i64 %a, 1
+  %shr1 = and i64 %and1b, 6148914691236517205
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 2
+  %shl2 = and i64 %and2, -3689348814741910324
+  %and2b = lshr i64 %or1b, 2
+  %shr2 = and i64 %and2b, 3689348814741910323
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  %and3 = shl i64 %or2b, 1
+  %shl3 = and i64 %and3, -6148914691236517206
+  %and3b = lshr i64 %or2b, 1
+  %shr3 = and i64 %and3b, 6148914691236517205
+  %or3 = or i64 %shr3, %or2b
+  %or3b = or i64 %or3, %shl3
+  ret i64 %or3b
+}
+
 define i32 @grev1_i32(i32 %a) nounwind {
 ; RV32I-LABEL: grev1_i32:
 ; RV32I:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rv64Zbp.ll b/llvm/test/CodeGen/RISCV/rv64Zbp.ll
index 367255d1c3f0..1198d03e65d6 100644
--- a/llvm/test/CodeGen/RISCV/rv64Zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64Zbp.ll
@@ -168,6 +168,131 @@ define i64 @gorc2_i64(i64 %a) nounwind {
   ret i64 %or2
 }
 
+define signext i32 @gorc3_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: gorc3_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    lui a2, 699051
+; RV64I-NEXT:    addiw a2, a2, -1366
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 1
+; RV64I-NEXT:    lui a3, 349525
+; RV64I-NEXT:    addiw a3, a3, 1365
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    lui a2, 205
+; RV64I-NEXT:    addiw a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -820
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    lui a3, 209715
+; RV64I-NEXT:    addiw a3, a3, 819
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc3_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    gorciw a0, a0, 3
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc3_i32:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    gorciw a0, a0, 3
+; RV64IBP-NEXT:    ret
+  %and1 = shl i32 %a, 1
+  %shl1 = and i32 %and1, -1431655766
+  %and1b = lshr i32 %a, 1
+  %shr1 = and i32 %and1b, 1431655765
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 2
+  %shl2 = and i32 %and2, -858993460
+  %and2b = lshr i32 %or1b, 2
+  %shr2 = and i32 %and2b, 858993459
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  ret i32 %or2b
+}
+
+define i64 @gorc3_i64(i64 %a) nounwind {
+; RV64I-LABEL: gorc3_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    lui a2, 1026731
+; RV64I-NEXT:    addiw a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1366
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 1
+; RV64I-NEXT:    lui a3, 21845
+; RV64I-NEXT:    addiw a3, a3, 1365
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 1365
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 1365
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 1365
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    lui a2, 1035469
+; RV64I-NEXT:    addiw a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -820
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    lui a3, 13107
+; RV64I-NEXT:    addiw a3, a3, 819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 819
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc3_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    orc.n a0, a0
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc3_i64:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    orc.n a0, a0
+; RV64IBP-NEXT:    ret
+  %and1 = shl i64 %a, 1
+  %shl1 = and i64 %and1, -6148914691236517206
+  %and1b = lshr i64 %a, 1
+  %shr1 = and i64 %and1b, 6148914691236517205
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 2
+  %shl2 = and i64 %and2, -3689348814741910324
+  %and2b = lshr i64 %or1b, 2
+  %shr2 = and i64 %and2b, 3689348814741910323
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  ret i64 %or2b
+}
+
 define signext i32 @gorc4_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: gorc4_i32:
 ; RV64I:       # %bb.0:
@@ -175,38 +300,445 @@ define signext i32 @gorc4_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    lui a2, 241
 ; RV64I-NEXT:    addiw a2, a2, -241
 ; RV64I-NEXT:    slli a2, a2, 12
-; RV64I-NEXT:    addi a2, a2, 240
+; RV64I-NEXT:    addi a2, a2, 240
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 4
+; RV64I-NEXT:    lui a3, 61681
+; RV64I-NEXT:    addiw a3, a3, -241
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc4_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    gorciw a0, a0, 4
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc4_i32:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    gorciw a0, a0, 4
+; RV64IBP-NEXT:    ret
+  %and = shl i32 %a, 4
+  %shl = and i32 %and, -252645136
+  %and1 = lshr i32 %a, 4
+  %shr = and i32 %and1, 252645135
+  %or = or i32 %shr, %a
+  %or2 = or i32 %or, %shl
+  ret i32 %or2
+}
+
+define i64 @gorc4_i64(i64 %a) nounwind {
+; RV64I-LABEL: gorc4_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 4
+; RV64I-NEXT:    lui a2, 1044721
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, 241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, 240
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 4
+; RV64I-NEXT:    lui a3, 3855
+; RV64I-NEXT:    addiw a3, a3, 241
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, -241
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 241
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, -241
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc4_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    orc4.b a0, a0
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc4_i64:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    orc4.b a0, a0
+; RV64IBP-NEXT:    ret
+  %and = shl i64 %a, 4
+  %shl = and i64 %and, -1085102592571150096
+  %and1 = lshr i64 %a, 4
+  %shr = and i64 %and1, 1085102592571150095
+  %or = or i64 %shr, %a
+  %or2 = or i64 %or, %shl
+  ret i64 %or2
+}
+
+define signext i32 @gorc5_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: gorc5_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    lui a2, 699051
+; RV64I-NEXT:    addiw a2, a2, -1366
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 1
+; RV64I-NEXT:    lui a3, 349525
+; RV64I-NEXT:    addiw a3, a3, 1365
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 4
+; RV64I-NEXT:    lui a2, 241
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, 240
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 4
+; RV64I-NEXT:    lui a3, 61681
+; RV64I-NEXT:    addiw a3, a3, -241
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc5_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    gorciw a0, a0, 5
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc5_i32:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    gorciw a0, a0, 5
+; RV64IBP-NEXT:    ret
+  %and1 = shl i32 %a, 1
+  %shl1 = and i32 %and1, -1431655766
+  %and1b = lshr i32 %a, 1
+  %shr1 = and i32 %and1b, 1431655765
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 4
+  %shl2 = and i32 %and2, -252645136
+  %and2b = lshr i32 %or1b, 4
+  %shr2 = and i32 %and2b, 252645135
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  ret i32 %or2b
+}
+
+define i64 @gorc5_i64(i64 %a) nounwind {
+; RV64I-LABEL: gorc5_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    lui a2, 1026731
+; RV64I-NEXT:    addiw a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1366
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 1
+; RV64I-NEXT:    lui a3, 21845
+; RV64I-NEXT:    addiw a3, a3, 1365
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 1365
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 1365
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 1365
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 4
+; RV64I-NEXT:    lui a2, 1044721
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, 241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, 240
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 4
+; RV64I-NEXT:    lui a3, 3855
+; RV64I-NEXT:    addiw a3, a3, 241
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, -241
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 241
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, -241
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc5_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    gorci a0, a0, 5
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc5_i64:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    gorci a0, a0, 5
+; RV64IBP-NEXT:    ret
+  %and1 = shl i64 %a, 1
+  %shl1 = and i64 %and1, -6148914691236517206
+  %and1b = lshr i64 %a, 1
+  %shr1 = and i64 %and1b, 6148914691236517205
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 4
+  %shl2 = and i64 %and2, -1085102592571150096
+  %and2b = lshr i64 %or1b, 4
+  %shr2 = and i64 %and2b, 1085102592571150095
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  ret i64 %or2b
+}
+
+define signext i32 @gorc6_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: gorc6_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    lui a2, 838861
+; RV64I-NEXT:    addiw a2, a2, -820
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    lui a3, 209715
+; RV64I-NEXT:    addiw a3, a3, 819
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 4
+; RV64I-NEXT:    lui a2, 241
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, 240
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 4
+; RV64I-NEXT:    lui a3, 61681
+; RV64I-NEXT:    addiw a3, a3, -241
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc6_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    gorciw a0, a0, 6
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc6_i32:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    gorciw a0, a0, 6
+; RV64IBP-NEXT:    ret
+  %and1 = shl i32 %a, 2
+  %shl1 = and i32 %and1, -858993460
+  %and1b = lshr i32 %a, 2
+  %shr1 = and i32 %and1b, 858993459
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 4
+  %shl2 = and i32 %and2, -252645136
+  %and2b = lshr i32 %or1b, 4
+  %shr2 = and i32 %and2b, 252645135
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  ret i32 %or2b
+}
+
+define i64 @gorc6_i64(i64 %a) nounwind {
+; RV64I-LABEL: gorc6_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    lui a2, 1035469
+; RV64I-NEXT:    addiw a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -820
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    lui a3, 13107
+; RV64I-NEXT:    addiw a3, a3, 819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 819
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 4
+; RV64I-NEXT:    lui a2, 1044721
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, 241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, 240
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 4
+; RV64I-NEXT:    lui a3, 3855
+; RV64I-NEXT:    addiw a3, a3, 241
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, -241
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 241
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, -241
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc6_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    orc2.b a0, a0
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc6_i64:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    orc2.b a0, a0
+; RV64IBP-NEXT:    ret
+  %and1 = shl i64 %a, 2
+  %shl1 = and i64 %and1, -3689348814741910324
+  %and1b = lshr i64 %a, 2
+  %shr1 = and i64 %and1b, 3689348814741910323
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 4
+  %shl2 = and i64 %and2, -1085102592571150096
+  %and2b = lshr i64 %or1b, 4
+  %shr2 = and i64 %and2b, 1085102592571150095
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  ret i64 %or2b
+}
+
+define signext i32 @gorc7_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: gorc7_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    lui a2, 699051
+; RV64I-NEXT:    addiw a2, a2, -1366
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 1
+; RV64I-NEXT:    lui a3, 349525
+; RV64I-NEXT:    addiw a3, a3, 1365
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    lui a2, 838861
+; RV64I-NEXT:    addiw a2, a2, -820
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    lui a3, 209715
+; RV64I-NEXT:    addiw a3, a3, 819
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 4
+; RV64I-NEXT:    lui a2, 241
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, 240
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 4
+; RV64I-NEXT:    lui a3, 61681
+; RV64I-NEXT:    addiw a3, a3, -241
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc7_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    gorciw a0, a0, 7
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc7_i32:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    gorciw a0, a0, 7
+; RV64IBP-NEXT:    ret
+  %and1 = shl i32 %a, 1
+  %shl1 = and i32 %and1, -1431655766
+  %and1b = lshr i32 %a, 1
+  %shr1 = and i32 %and1b, 1431655765
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 2
+  %shl2 = and i32 %and2, -858993460
+  %and2b = lshr i32 %or1b, 2
+  %shr2 = and i32 %and2b, 858993459
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  %and3 = shl i32 %or2b, 4
+  %shl3 = and i32 %and3, -252645136
+  %and3b = lshr i32 %or2b, 4
+  %shr3 = and i32 %and3b, 252645135
+  %or3 = or i32 %shr3, %or2b
+  %or3b = or i32 %or3, %shl3
+  ret i32 %or3b
+}
+
+define i64 @gorc7_i64(i64 %a) nounwind {
+; RV64I-LABEL: gorc7_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    lui a2, 1026731
+; RV64I-NEXT:    addiw a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1366
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 1
+; RV64I-NEXT:    lui a3, 21845
+; RV64I-NEXT:    addiw a3, a3, 1365
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 1365
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 1365
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 1365
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    lui a2, 1035469
+; RV64I-NEXT:    addiw a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -820
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    lui a3, 61681
-; RV64I-NEXT:    addiw a3, a3, -241
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    lui a3, 13107
+; RV64I-NEXT:    addiw a3, a3, 819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, 819
 ; RV64I-NEXT:    and a2, a2, a3
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    ret
-;
-; RV64IB-LABEL: gorc4_i32:
-; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    gorciw a0, a0, 4
-; RV64IB-NEXT:    ret
-;
-; RV64IBP-LABEL: gorc4_i32:
-; RV64IBP:       # %bb.0:
-; RV64IBP-NEXT:    gorciw a0, a0, 4
-; RV64IBP-NEXT:    ret
-  %and = shl i32 %a, 4
-  %shl = and i32 %and, -252645136
-  %and1 = lshr i32 %a, 4
-  %shr = and i32 %and1, 252645135
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
-}
-
-define i64 @gorc4_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc4_i64:
-; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a1, a0, 4
 ; RV64I-NEXT:    lui a2, 1044721
 ; RV64I-NEXT:    addiw a2, a2, -241
@@ -231,22 +763,34 @@ define i64 @gorc4_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
-; RV64IB-LABEL: gorc4_i64:
+; RV64IB-LABEL: gorc7_i64:
 ; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    orc4.b a0, a0
+; RV64IB-NEXT:    orc.b a0, a0
 ; RV64IB-NEXT:    ret
 ;
-; RV64IBP-LABEL: gorc4_i64:
+; RV64IBP-LABEL: gorc7_i64:
 ; RV64IBP:       # %bb.0:
-; RV64IBP-NEXT:    orc4.b a0, a0
+; RV64IBP-NEXT:    orc.b a0, a0
 ; RV64IBP-NEXT:    ret
-  %and = shl i64 %a, 4
-  %shl = and i64 %and, -1085102592571150096
-  %and1 = lshr i64 %a, 4
-  %shr = and i64 %and1, 1085102592571150095
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
+  %and1 = shl i64 %a, 1
+  %shl1 = and i64 %and1, -6148914691236517206
+  %and1b = lshr i64 %a, 1
+  %shr1 = and i64 %and1b, 6148914691236517205
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 2
+  %shl2 = and i64 %and2, -3689348814741910324
+  %and2b = lshr i64 %or1b, 2
+  %shr2 = and i64 %and2b, 3689348814741910323
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  %and3 = shl i64 %or2b, 4
+  %shl3 = and i64 %and3, -1085102592571150096
+  %and3b = lshr i64 %or2b, 4
+  %shr3 = and i64 %and3b, 1085102592571150095
+  %or3 = or i64 %shr3, %or2b
+  %or3b = or i64 %or3, %shl3
+  ret i64 %or3b
 }
 
 define signext i32 @gorc8_i32(i32 signext %a) nounwind {
@@ -417,6 +961,268 @@ define i64 @gorc32(i64 %a) nounwind {
   ret i64 %or2
 }
 
+; gorc2, gorc2 -> gorc2
+define signext i32 @gorc2b_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: gorc2b_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    lui a2, 838861
+; RV64I-NEXT:    addiw a2, a2, -820
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    lui a3, 209715
+; RV64I-NEXT:    addiw a3, a3, 819
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    lui a2, 205
+; RV64I-NEXT:    addiw a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -820
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc2b_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    gorciw a0, a0, 2
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc2b_i32:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    gorciw a0, a0, 2
+; RV64IBP-NEXT:    ret
+  %and1 = shl i32 %a, 2
+  %shl1 = and i32 %and1, -858993460
+  %and1b = lshr i32 %a, 2
+  %shr1 = and i32 %and1b, 858993459
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 2
+  %shl2 = and i32 %and2, -858993460
+  %and2b = lshr i32 %or1b, 2
+  %shr2 = and i32 %and2b, 858993459
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  ret i32 %or2b
+}
+
+; gorc2, gorc2 -> gorc2
+define i64 @gorc2b_i64(i64 %a) nounwind {
+; RV64I-LABEL: gorc2b_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    lui a2, 1035469
+; RV64I-NEXT:    addiw a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -819
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -820
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a3, a0, 2
+; RV64I-NEXT:    lui a4, 13107
+; RV64I-NEXT:    addiw a4, a4, 819
+; RV64I-NEXT:    slli a4, a4, 12
+; RV64I-NEXT:    addi a4, a4, 819
+; RV64I-NEXT:    slli a4, a4, 12
+; RV64I-NEXT:    addi a4, a4, 819
+; RV64I-NEXT:    slli a4, a4, 12
+; RV64I-NEXT:    addi a4, a4, 819
+; RV64I-NEXT:    and a3, a3, a4
+; RV64I-NEXT:    or a0, a3, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc2b_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    orc2.n a0, a0
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc2b_i64:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    orc2.n a0, a0
+; RV64IBP-NEXT:    ret
+  %and1 = shl i64 %a, 2
+  %shl1 = and i64 %and1, -3689348814741910324
+  %and1b = lshr i64 %a, 2
+  %shr1 = and i64 %and1b, 3689348814741910323
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 2
+  %shl2 = and i64 %and2, -3689348814741910324
+  %and2b = lshr i64 %or1b, 2
+  %shr2 = and i64 %and2b, 3689348814741910323
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  ret i64 %or2b
+}
+
+; gorc1, gorc2, gorc1 -> gorc2
+define signext i32 @gorc3b_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: gorc3b_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    lui a2, 699051
+; RV64I-NEXT:    addiw a2, a2, -1366
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 1
+; RV64I-NEXT:    lui a3, 349525
+; RV64I-NEXT:    addiw a3, a3, 1365
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    lui a2, 838861
+; RV64I-NEXT:    addiw a2, a2, -820
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    lui a4, 209715
+; RV64I-NEXT:    addiw a4, a4, 819
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    lui a2, 171
+; RV64I-NEXT:    addiw a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1366
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 1
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc3b_i32:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    gorciw a0, a0, 3
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc3b_i32:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    gorciw a0, a0, 3
+; RV64IBP-NEXT:    ret
+  %and1 = shl i32 %a, 1
+  %shl1 = and i32 %and1, -1431655766
+  %and1b = lshr i32 %a, 1
+  %shr1 = and i32 %and1b, 1431655765
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 2
+  %shl2 = and i32 %and2, -858993460
+  %and2b = lshr i32 %or1b, 2
+  %shr2 = and i32 %and2b, 858993459
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  %and3 = shl i32 %or2b, 1
+  %shl3 = and i32 %and3, -1431655766
+  %and3b = lshr i32 %or2b, 1
+  %shr3 = and i32 %and3b, 1431655765
+  %or3 = or i32 %shr3, %or2b
+  %or3b = or i32 %or3, %shl3
+  ret i32 %or3b
+}
+
+; gorc1, gorc2, gorc1 -> gorc2
+define i64 @gorc3b_i64(i64 %a) nounwind {
+; RV64I-LABEL: gorc3b_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    lui a2, 1026731
+; RV64I-NEXT:    addiw a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1365
+; RV64I-NEXT:    slli a2, a2, 12
+; RV64I-NEXT:    addi a2, a2, -1366
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a3, a0, 1
+; RV64I-NEXT:    lui a4, 21845
+; RV64I-NEXT:    addiw a4, a4, 1365
+; RV64I-NEXT:    slli a4, a4, 12
+; RV64I-NEXT:    addi a4, a4, 1365
+; RV64I-NEXT:    slli a4, a4, 12
+; RV64I-NEXT:    addi a4, a4, 1365
+; RV64I-NEXT:    slli a4, a4, 12
+; RV64I-NEXT:    addi a4, a4, 1365
+; RV64I-NEXT:    and a3, a3, a4
+; RV64I-NEXT:    or a0, a3, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    lui a3, 1035469
+; RV64I-NEXT:    addiw a3, a3, -819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, -819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, -819
+; RV64I-NEXT:    slli a3, a3, 12
+; RV64I-NEXT:    addi a3, a3, -820
+; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    srli a3, a0, 2
+; RV64I-NEXT:    lui a5, 13107
+; RV64I-NEXT:    addiw a5, a5, 819
+; RV64I-NEXT:    slli a5, a5, 12
+; RV64I-NEXT:    addi a5, a5, 819
+; RV64I-NEXT:    slli a5, a5, 12
+; RV64I-NEXT:    addi a5, a5, 819
+; RV64I-NEXT:    slli a5, a5, 12
+; RV64I-NEXT:    addi a5, a5, 819
+; RV64I-NEXT:    and a3, a3, a5
+; RV64I-NEXT:    or a0, a3, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 1
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: gorc3b_i64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    orc.n a0, a0
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: gorc3b_i64:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    orc.n a0, a0
+; RV64IBP-NEXT:    ret
+  %and1 = shl i64 %a, 1
+  %shl1 = and i64 %and1, -6148914691236517206
+  %and1b = lshr i64 %a, 1
+  %shr1 = and i64 %and1b, 6148914691236517205
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 2
+  %shl2 = and i64 %and2, -3689348814741910324
+  %and2b = lshr i64 %or1b, 2
+  %shr2 = and i64 %and2b, 3689348814741910323
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  %and3 = shl i64 %or2b, 1
+  %shl3 = and i64 %and3, -6148914691236517206
+  %and3b = lshr i64 %or2b, 1
+  %shr3 = and i64 %and3b, 6148914691236517205
+  %or3 = or i64 %shr3, %or2b
+  %or3b = or i64 %or3, %shl3
+  ret i64 %or3b
+}
+
 define signext i32 @grev1_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: grev1_i32:
 ; RV64I:       # %bb.0:


        


More information about the llvm-branch-commits mailing list