[llvm] 182aa0c - [RISCV] Remove support for the unratified Zbp extension.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 21 21:23:04 PDT 2022


Author: Craig Topper
Date: 2022-09-21T21:22:42-07:00
New Revision: 182aa0cbe0cd86dbf91e9d0e647eec736461f0b7

URL: https://github.com/llvm/llvm-project/commit/182aa0cbe0cd86dbf91e9d0e647eec736461f0b7
DIFF: https://github.com/llvm/llvm-project/commit/182aa0cbe0cd86dbf91e9d0e647eec736461f0b7.diff

LOG: [RISCV] Remove support for the unratified Zbp extension.

This extension does not appear to be on its way to ratification.

Still need some follow up to simplify the RISCVISD nodes.

Added: 
    llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
    llvm/test/CodeGen/RISCV/rv32zbkb.ll
    llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
    llvm/test/CodeGen/RISCV/rv64zbkb.ll
    llvm/test/MC/RISCV/rv32zbb-only-valid.s

Modified: 
    clang/include/clang/Basic/BuiltinsRISCV.def
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/test/Driver/riscv-arch.c
    clang/test/Preprocessor/riscv-target-features.c
    llvm/docs/RISCVUsage.rst
    llvm/docs/ReleaseNotes.rst
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCV.td
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
    llvm/lib/Target/RISCV/RISCVSchedRocket.td
    llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
    llvm/lib/Target/RISCV/RISCVScheduleB.td
    llvm/lib/Target/RISCV/RISCVSubtarget.h
    llvm/test/CodeGen/RISCV/attributes.ll
    llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
    llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll
    llvm/test/MC/RISCV/attribute-arch-invalid.s
    llvm/test/MC/RISCV/attribute-arch.s
    llvm/test/MC/RISCV/rv32zbb-invalid.s
    llvm/test/MC/RISCV/rv32zbb-valid.s
    llvm/test/MC/RISCV/rv32zbkx-invalid.s
    llvm/test/MC/RISCV/rv64zbb-invalid.s
    llvm/test/MC/RISCV/rv64zbb-valid.s

Removed: 
    clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbp.c
    clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbp.c
    llvm/test/CodeGen/RISCV/rv32zbb-zbp-zbkb.ll
    llvm/test/CodeGen/RISCV/rv32zbp-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv32zbp-zbkb.ll
    llvm/test/CodeGen/RISCV/rv32zbp.ll
    llvm/test/CodeGen/RISCV/rv64zbb-zbp-zbkb.ll
    llvm/test/CodeGen/RISCV/rv64zbp-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zbp-zbkb.ll
    llvm/test/CodeGen/RISCV/rv64zbp.ll
    llvm/test/MC/RISCV/rv32zbbp-invalid.s
    llvm/test/MC/RISCV/rv32zbbp-only-valid.s
    llvm/test/MC/RISCV/rv32zbbp-valid.s
    llvm/test/MC/RISCV/rv32zbp-aliases-valid.s
    llvm/test/MC/RISCV/rv32zbp-invalid.s
    llvm/test/MC/RISCV/rv32zbp-only-valid.s
    llvm/test/MC/RISCV/rv32zbp-valid.s
    llvm/test/MC/RISCV/rv64zbbp-invalid.s
    llvm/test/MC/RISCV/rv64zbbp-valid.s
    llvm/test/MC/RISCV/rv64zbp-aliases-valid.s
    llvm/test/MC/RISCV/rv64zbp-invalid.s
    llvm/test/MC/RISCV/rv64zbp-valid.s


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsRISCV.def b/clang/include/clang/Basic/BuiltinsRISCV.def
index e71e1e439115..f51cba95a9f5 100644
--- a/clang/include/clang/Basic/BuiltinsRISCV.def
+++ b/clang/include/clang/Basic/BuiltinsRISCV.def
@@ -45,20 +45,6 @@ TARGET_BUILTIN(__builtin_riscv_bdecompress_64, "WiWiWi", "nc",
 TARGET_BUILTIN(__builtin_riscv_bfp_32, "ZiZiZi", "nc", "experimental-zbf")
 TARGET_BUILTIN(__builtin_riscv_bfp_64, "WiWiWi", "nc", "experimental-zbf,64bit")
 
-// Zbp extension
-TARGET_BUILTIN(__builtin_riscv_grev_32, "ZiZiZi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_grev_64, "WiWiWi", "nc", "experimental-zbp,64bit")
-TARGET_BUILTIN(__builtin_riscv_gorc_32, "ZiZiZi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_gorc_64, "WiWiWi", "nc", "experimental-zbp,64bit")
-TARGET_BUILTIN(__builtin_riscv_shfl_32, "ZiZiZi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_shfl_64, "WiWiWi", "nc", "experimental-zbp,64bit")
-TARGET_BUILTIN(__builtin_riscv_unshfl_32, "ZiZiZi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_unshfl_64, "WiWiWi", "nc", "experimental-zbp,64bit")
-TARGET_BUILTIN(__builtin_riscv_xperm_n, "LiLiLi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_xperm_b, "LiLiLi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_xperm_h, "LiLiLi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_xperm_w, "WiWiWi", "nc", "experimental-zbp,64bit")
-
 // Zbr extension
 TARGET_BUILTIN(__builtin_riscv_crc32_b, "LiLi", "nc", "experimental-zbr")
 TARGET_BUILTIN(__builtin_riscv_crc32_h, "LiLi", "nc", "experimental-zbr")

diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 77a2c19a025a..43361b54d693 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -19192,20 +19192,8 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
   case RISCV::BI__builtin_riscv_bdecompress_64:
   case RISCV::BI__builtin_riscv_bfp_32:
   case RISCV::BI__builtin_riscv_bfp_64:
-  case RISCV::BI__builtin_riscv_grev_32:
-  case RISCV::BI__builtin_riscv_grev_64:
-  case RISCV::BI__builtin_riscv_gorc_32:
-  case RISCV::BI__builtin_riscv_gorc_64:
-  case RISCV::BI__builtin_riscv_shfl_32:
-  case RISCV::BI__builtin_riscv_shfl_64:
-  case RISCV::BI__builtin_riscv_unshfl_32:
-  case RISCV::BI__builtin_riscv_unshfl_64:
   case RISCV::BI__builtin_riscv_xperm4:
   case RISCV::BI__builtin_riscv_xperm8:
-  case RISCV::BI__builtin_riscv_xperm_n:
-  case RISCV::BI__builtin_riscv_xperm_b:
-  case RISCV::BI__builtin_riscv_xperm_h:
-  case RISCV::BI__builtin_riscv_xperm_w:
   case RISCV::BI__builtin_riscv_crc32_b:
   case RISCV::BI__builtin_riscv_crc32_h:
   case RISCV::BI__builtin_riscv_crc32_w:
@@ -19262,36 +19250,6 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
       ID = Intrinsic::riscv_bfp;
       break;
 
-    // Zbp
-    case RISCV::BI__builtin_riscv_grev_32:
-    case RISCV::BI__builtin_riscv_grev_64:
-      ID = Intrinsic::riscv_grev;
-      break;
-    case RISCV::BI__builtin_riscv_gorc_32:
-    case RISCV::BI__builtin_riscv_gorc_64:
-      ID = Intrinsic::riscv_gorc;
-      break;
-    case RISCV::BI__builtin_riscv_shfl_32:
-    case RISCV::BI__builtin_riscv_shfl_64:
-      ID = Intrinsic::riscv_shfl;
-      break;
-    case RISCV::BI__builtin_riscv_unshfl_32:
-    case RISCV::BI__builtin_riscv_unshfl_64:
-      ID = Intrinsic::riscv_unshfl;
-      break;
-    case RISCV::BI__builtin_riscv_xperm_n:
-      ID = Intrinsic::riscv_xperm_n;
-      break;
-    case RISCV::BI__builtin_riscv_xperm_b:
-      ID = Intrinsic::riscv_xperm_b;
-      break;
-    case RISCV::BI__builtin_riscv_xperm_h:
-      ID = Intrinsic::riscv_xperm_h;
-      break;
-    case RISCV::BI__builtin_riscv_xperm_w:
-      ID = Intrinsic::riscv_xperm_w;
-      break;
-
     // Zbr
     case RISCV::BI__builtin_riscv_crc32_b:
       ID = Intrinsic::riscv_crc32_b;

diff  --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbp.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbp.c
deleted file mode 100644
index dccd870292ce..000000000000
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbp.c
+++ /dev/null
@@ -1,179 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// RUN: %clang_cc1 -no-opaque-pointers -triple riscv32 -target-feature +experimental-zbp -emit-llvm %s -o - \
-// RUN:     | FileCheck %s  -check-prefix=RV32ZBP
-
-// RV32ZBP-LABEL: @grev(
-// RV32ZBP-NEXT:  entry:
-// RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.grev.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBP-NEXT:    ret i32 [[TMP2]]
-//
-long grev(long rs1, long rs2)
-{
-  return __builtin_riscv_grev_32(rs1, rs2);
-}
-
-// RV32ZBP-LABEL: @grevi(
-// RV32ZBP-NEXT:  entry:
-// RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.grev.i32(i32 [[TMP0]], i32 13)
-// RV32ZBP-NEXT:    ret i32 [[TMP1]]
-//
-long grevi(long rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_grev_32(rs1, i);
-}
-
-// RV32ZBP-LABEL: @gorc(
-// RV32ZBP-NEXT:  entry:
-// RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.gorc.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBP-NEXT:    ret i32 [[TMP2]]
-//
-long gorc(long rs1, long rs2)
-{
-  return __builtin_riscv_gorc_32(rs1, rs2);
-}
-
-// RV32ZBP-LABEL: @gorci(
-// RV32ZBP-NEXT:  entry:
-// RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.gorc.i32(i32 [[TMP0]], i32 13)
-// RV32ZBP-NEXT:    ret i32 [[TMP1]]
-//
-long gorci(long rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_gorc_32(rs1, i);
-}
-
-// RV32ZBP-LABEL: @shfl(
-// RV32ZBP-NEXT:  entry:
-// RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.shfl.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBP-NEXT:    ret i32 [[TMP2]]
-//
-long shfl(long rs1, long rs2)
-{
-  return __builtin_riscv_shfl_32(rs1, rs2);
-}
-
-// RV32ZBP-LABEL: @shfli(
-// RV32ZBP-NEXT:  entry:
-// RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.shfl.i32(i32 [[TMP0]], i32 13)
-// RV32ZBP-NEXT:    ret i32 [[TMP1]]
-//
-long shfli(long rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_shfl_32(rs1, i);
-}
-
-// RV32ZBP-LABEL: @unshfl(
-// RV32ZBP-NEXT:  entry:
-// RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.unshfl.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBP-NEXT:    ret i32 [[TMP2]]
-//
-long unshfl(long rs1, long rs2)
-{
-  return __builtin_riscv_unshfl_32(rs1, rs2);
-}
-
-// RV32ZBP-LABEL: @unshfli(
-// RV32ZBP-NEXT:  entry:
-// RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.unshfl.i32(i32 [[TMP0]], i32 13)
-// RV32ZBP-NEXT:    ret i32 [[TMP1]]
-//
-long unshfli(long rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_unshfl_32(rs1, i);
-}
-
-// RV32ZBP-LABEL: @xperm_n(
-// RV32ZBP-NEXT:  entry:
-// RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.xperm.n.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBP-NEXT:    ret i32 [[TMP2]]
-//
-long xperm_n(long rs1, long rs2)
-{
-  return __builtin_riscv_xperm_n(rs1, rs2);
-}
-
-// RV32ZBP-LABEL: @xperm_b(
-// RV32ZBP-NEXT:  entry:
-// RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.xperm.b.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBP-NEXT:    ret i32 [[TMP2]]
-//
-long xperm_b(long rs1, long rs2)
-{
-  return __builtin_riscv_xperm_b(rs1, rs2);
-}
-
-// RV32ZBP-LABEL: @xperm_h(
-// RV32ZBP-NEXT:  entry:
-// RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
-// RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.xperm.h.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBP-NEXT:    ret i32 [[TMP2]]
-//
-long xperm_h(long rs1, long rs2)
-{
-  return __builtin_riscv_xperm_h(rs1, rs2);
-}

diff  --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbp.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbp.c
deleted file mode 100644
index 2643eb9a20dc..000000000000
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbp.c
+++ /dev/null
@@ -1,323 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// RUN: %clang_cc1 -no-opaque-pointers -triple riscv64 -target-feature +experimental-zbp -emit-llvm %s -o - \
-// RUN:     | FileCheck %s  -check-prefix=RV64ZBP
-
-// RV64ZBP-LABEL: @grev(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.grev.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBP-NEXT:    ret i64 [[TMP2]]
-//
-long grev(long rs1, long rs2)
-{
-  return __builtin_riscv_grev_64(rs1, rs2);
-}
-
-// RV64ZBP-LABEL: @grevi(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.grev.i64(i64 [[TMP0]], i64 13)
-// RV64ZBP-NEXT:    ret i64 [[TMP1]]
-//
-long grevi(long rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_grev_64(rs1, i);
-}
-
-// RV64ZBP-LABEL: @grevw(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.grev.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV64ZBP-NEXT:    ret i32 [[TMP2]]
-//
-int grevw(int rs1, int rs2)
-{
-  return __builtin_riscv_grev_32(rs1, rs2);
-}
-
-// RV64ZBP-LABEL: @greviw(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.grev.i32(i32 [[TMP0]], i32 13)
-// RV64ZBP-NEXT:    ret i32 [[TMP1]]
-//
-int greviw(int rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_grev_32(rs1, i);
-}
-
-// RV64ZBP-LABEL: @gorc(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.gorc.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBP-NEXT:    ret i64 [[TMP2]]
-//
-long gorc(long rs1, long rs2)
-{
-  return __builtin_riscv_gorc_64(rs1, rs2);
-}
-
-// RV64ZBP-LABEL: @gorci(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.gorc.i64(i64 [[TMP0]], i64 13)
-// RV64ZBP-NEXT:    ret i64 [[TMP1]]
-//
-long gorci(long rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_gorc_64(rs1, i);
-}
-
-// RV64ZBP-LABEL: @gorcw(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.gorc.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV64ZBP-NEXT:    ret i32 [[TMP2]]
-//
-int gorcw(int rs1, int rs2)
-{
-  return __builtin_riscv_gorc_32(rs1, rs2);
-}
-
-// RV64ZBP-LABEL: @gorciw(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.gorc.i32(i32 [[TMP0]], i32 13)
-// RV64ZBP-NEXT:    ret i32 [[TMP1]]
-//
-int gorciw(int rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_gorc_32(rs1, i);
-}
-
-// RV64ZBP-LABEL: @shfl(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.shfl.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBP-NEXT:    ret i64 [[TMP2]]
-//
-long shfl(long rs1, long rs2)
-{
-  return __builtin_riscv_shfl_64(rs1, rs2);
-}
-
-// RV64ZBP-LABEL: @shfli(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.shfl.i64(i64 [[TMP0]], i64 13)
-// RV64ZBP-NEXT:    ret i64 [[TMP1]]
-//
-long shfli(long rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_shfl_64(rs1, i);
-}
-
-// RV64ZBP-LABEL: @shflw(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.shfl.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV64ZBP-NEXT:    ret i32 [[TMP2]]
-//
-int shflw(int rs1, int rs2)
-{
-  return __builtin_riscv_shfl_32(rs1, rs2);
-}
-
-// RV64ZBP-LABEL: @shfli_NOw(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.shfl.i32(i32 [[TMP0]], i32 13)
-// RV64ZBP-NEXT:    ret i32 [[TMP1]]
-//
-int shfli_NOw(int rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_shfl_32(rs1, i);
-}
-
-// RV64ZBP-LABEL: @unshfl(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.unshfl.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBP-NEXT:    ret i64 [[TMP2]]
-//
-long unshfl(long rs1, long rs2)
-{
-  return __builtin_riscv_unshfl_64(rs1, rs2);
-}
-
-// RV64ZBP-LABEL: @unshfli(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.unshfl.i64(i64 [[TMP0]], i64 13)
-// RV64ZBP-NEXT:    ret i64 [[TMP1]]
-//
-long unshfli(long rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_unshfl_64(rs1, i);
-}
-
-// RV64ZBP-LABEL: @unshflw(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.unshfl.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV64ZBP-NEXT:    ret i32 [[TMP2]]
-//
-int unshflw(int rs1, int rs2)
-{
-  return __builtin_riscv_unshfl_32(rs1, rs2);
-}
-
-// RV64ZBP-LABEL: @unshfli_NOw(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
-// RV64ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    store i32 13, i32* [[I]], align 4
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.unshfl.i32(i32 [[TMP0]], i32 13)
-// RV64ZBP-NEXT:    ret i32 [[TMP1]]
-//
-int unshfli_NOw(int rs1)
-{
-  const int i = 13;
-  return __builtin_riscv_unshfl_32(rs1, i);
-}
-
-// RV64ZBP-LABEL: @xperm_n(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.xperm.n.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBP-NEXT:    ret i64 [[TMP2]]
-//
-long xperm_n(long rs1, long rs2)
-{
-  return __builtin_riscv_xperm_n(rs1, rs2);
-}
-
-// RV64ZBP-LABEL: @xperm_b(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.xperm.b.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBP-NEXT:    ret i64 [[TMP2]]
-//
-long xperm_b(long rs1, long rs2)
-{
-  return __builtin_riscv_xperm_b(rs1, rs2);
-}
-
-// RV64ZBP-LABEL: @xperm_h(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.xperm.h.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBP-NEXT:    ret i64 [[TMP2]]
-//
-long xperm_h(long rs1, long rs2)
-{
-  return __builtin_riscv_xperm_h(rs1, rs2);
-}
-
-// RV64ZBP-LABEL: @xperm_w(
-// RV64ZBP-NEXT:  entry:
-// RV64ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBP-NEXT:    store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
-// RV64ZBP-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.xperm.w.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBP-NEXT:    ret i64 [[TMP2]]
-//
-long xperm_w(long rs1, long rs2)
-{
-  return __builtin_riscv_xperm_w(rs1, rs2);
-}

diff  --git a/clang/test/Driver/riscv-arch.c b/clang/test/Driver/riscv-arch.c
index edfa659a16d0..37bd7e29895b 100644
--- a/clang/test/Driver/riscv-arch.c
+++ b/clang/test/Driver/riscv-arch.c
@@ -437,14 +437,9 @@
 // RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-ZBB %s
 // RV32-ZBB: "-target-feature" "+zbb"
 
-// RUN: %clang --target=riscv32-unknown-elf -march=rv32izbb1p0_zbp0p93 -menable-experimental-extensions -### %s \
-// RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-EXPERIMENTAL-ZBB-ZBP %s
-// RV32-EXPERIMENTAL-ZBB-ZBP: "-target-feature" "+zbb"
-// RV32-EXPERIMENTAL-ZBB-ZBP: "-target-feature" "+experimental-zbp"
-
-// RUN: %clang --target=riscv32-unknown-elf -march=rv32izbb1p0zbp0p93 -menable-experimental-extensions -### %s \
-// RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-EXPERIMENTAL-ZBB-ZBP-UNDERSCORE %s
-// RV32-EXPERIMENTAL-ZBB-ZBP-UNDERSCORE: error: invalid arch name 'rv32izbb1p0zbp0p93', unsupported version number 0.93 for extension 'zbb1p0zbp'
+// RUN: %clang --target=riscv32-unknown-elf -march=rv32izbb1p0zbs1p0 -menable-experimental-extensions -### %s \
+// RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-EXPERIMENTAL-ZBB-ZBS-UNDERSCORE %s
+// RV32-EXPERIMENTAL-ZBB-ZBS-UNDERSCORE: error: invalid arch name 'rv32izbb1p0zbs1p0', unsupported version number 1.0 for extension 'zbb1p0zbs'
 
 // RUN: %clang --target=riscv32-unknown-elf -march=rv32izba1p0 -### %s \
 // RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-ZBA %s

diff  --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index 39ab684e575b..a925c45b3cb7 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -25,7 +25,6 @@
 // CHECK-NOT: __riscv_zbe
 // CHECK-NOT: __riscv_zbf
 // CHECK-NOT: __riscv_zbm
-// CHECK-NOT: __riscv_zbp
 // CHECK-NOT: __riscv_zbr
 // CHECK-NOT: __riscv_zbs
 // CHECK-NOT: __riscv_zbt
@@ -191,15 +190,6 @@
 // CHECK-ZBM-NOT: __riscv_b
 // CHECK-ZBM-EXT: __riscv_zbm 93000{{$}}
 
-// RUN: %clang -target riscv32-unknown-linux-gnu -menable-experimental-extensions \
-// RUN: -march=rv32izbp0p93 -x c -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZBP-EXT %s
-// RUN: %clang -target riscv64-unknown-linux-gnu -menable-experimental-extensions \
-// RUN: -march=rv64izbp0p93 -x c -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZBP-EXT %s
-// CHECK-ZBP-NOT: __riscv_b
-// CHECK-ZBP-EXT: __riscv_zbp 93000{{$}}
-
 // RUN: %clang -target riscv32-unknown-linux-gnu -menable-experimental-extensions \
 // RUN: -march=rv32izbr0p93 -x c -E -dM %s \
 // RUN: -o - | FileCheck --check-prefix=CHECK-ZBR-EXT %s

diff  --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index 511ce030aa3d..754360fc0a5a 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -131,7 +131,7 @@ The primary goal of experimental support is to assist in the process of ratifica
 ``experimental-zawrs``
   LLVM implements the `1.0-rc3 draft specification <https://github.com/riscv/riscv-zawrs/releases/download/V1.0-rc3/Zawrs.pdf>`_.  Note that have been backwards incompatible changes made between release candidates for the 1.0 draft.
 
-``experimental-zbe``, ``experimental-zbf``, ``experimental-zbm``, ``experimental-zbp``, ``experimental-zbr``
+``experimental-zbe``, ``experimental-zbf``, ``experimental-zbm``, ``experimental-zbr``
   LLVM implements the `latest state of the bitmanip working branch <https://github.com/riscv/riscv-bitmanip/tree/main-history>`_, which is largely similar to the 0.93 draft specification but with some instruction naming changes.  These are individual portions of the bitmanip efforts which did *not* get ratified.  Given ratification for these sub-extensions appears stalled; they are a likely candidate for removal in the future.
 
 ``experimental-zca``

diff  --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index efa550ad0e8f..ca7444daad4b 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -112,6 +112,7 @@ Changes to the PowerPC Backend
 Changes to the RISC-V Backend
 -----------------------------
 
+* Support the unratified Zbp extension has been removed.
 * Support the unratified Zbt extension has been removed.
 
 Changes to the WebAssembly Backend

diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index da1f31f90e26..3b0f64eeb38d 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -109,16 +109,6 @@ let TargetPrefix = "riscv" in {
   // Zbf
   def int_riscv_bfp  : BitManipGPRGPRIntrinsics;
 
-  // Zbp
-  def int_riscv_grev  : BitManipGPRGPRIntrinsics;
-  def int_riscv_gorc  : BitManipGPRGPRIntrinsics;
-  def int_riscv_shfl  : BitManipGPRGPRIntrinsics;
-  def int_riscv_unshfl  : BitManipGPRGPRIntrinsics;
-  def int_riscv_xperm_n  : BitManipGPRGPRIntrinsics;
-  def int_riscv_xperm_b  : BitManipGPRGPRIntrinsics;
-  def int_riscv_xperm_h  : BitManipGPRGPRIntrinsics;
-  def int_riscv_xperm_w  : BitManipGPRGPRIntrinsics;
-
   // Zbr
   def int_riscv_crc32_b : BitManipGPRIntrinsics;
   def int_riscv_crc32_h : BitManipGPRIntrinsics;

diff  --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td
index 5455aa632336..84ff6883d0d1 100644
--- a/llvm/lib/Target/RISCV/RISCV.td
+++ b/llvm/lib/Target/RISCV/RISCV.td
@@ -174,13 +174,6 @@ def HasStdExtZbm : Predicate<"Subtarget->hasStdExtZbm()">,
                              AssemblerPredicate<(all_of FeatureStdExtZbm),
                              "'Zbm' (Matrix 'Zb' Instructions)">;
 
-def FeatureStdExtZbp
-    : SubtargetFeature<"experimental-zbp", "HasStdExtZbp", "true",
-                       "'Zbp' (Permutation 'Zb' Instructions)">;
-def HasStdExtZbp : Predicate<"Subtarget->hasStdExtZbp()">,
-                             AssemblerPredicate<(all_of FeatureStdExtZbp),
-                             "'Zbp' (Permutation 'Zb' Instructions)">;
-
 def FeatureStdExtZbr
     : SubtargetFeature<"experimental-zbr", "HasStdExtZbr", "true",
                        "'Zbr' (Polynomial Reduction 'Zb' Instructions)">;
@@ -195,14 +188,6 @@ def HasStdExtZbs : Predicate<"Subtarget->hasStdExtZbs()">,
                              AssemblerPredicate<(all_of FeatureStdExtZbs),
                              "'Zbs' (Single-Bit Instructions)">;
 
-// Some instructions belong to both the basic and the permutation
-// subextensions. They should be enabled if either has been specified.
-def HasStdExtZbbOrZbp
-    : Predicate<"Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp()">,
-                AssemblerPredicate<(any_of FeatureStdExtZbb, FeatureStdExtZbp),
-                                   "'Zbb' (Basic Bit-Manipulation) or "
-                                   "'Zbp' (Permutation 'Zb' Instructions)">;
-
 def FeatureStdExtZbkb
     : SubtargetFeature<"zbkb", "HasStdExtZbkb", "true",
                        "'Zbkb' (Bitmanip instructions for Cryptography)">;
@@ -217,31 +202,12 @@ def HasStdExtZbkx : Predicate<"Subtarget->hasStdExtZbkx()">,
                              AssemblerPredicate<(all_of FeatureStdExtZbkx),
                              "'Zbkx' (Crossbar permutation instructions)">;
 
-def HasStdExtZbpOrZbkx
-    : Predicate<"Subtarget->hasStdExtZbp() || Subtarget->hasStdExtZbkx()">,
-                AssemblerPredicate<(any_of FeatureStdExtZbp, FeatureStdExtZbkx),
-                                   "'Zbp' (Permutation 'Zb' Instructions) or "
-                                   "'Zbkx' (Crossbar permutation instructions)">;
-
-def HasStdExtZbpOrZbkb
-    : Predicate<"Subtarget->hasStdExtZbp() || Subtarget->hasStdExtZbkb()">,
-                AssemblerPredicate<(any_of FeatureStdExtZbp, FeatureStdExtZbkb),
-                                   "'Zbp' (Permutation 'Zb' Instructions) or "
-                                   "'Zbkb' (Bitmanip instructions for Cryptography)">;
-
 def HasStdExtZbbOrZbkb
     : Predicate<"Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbkb()">,
                 AssemblerPredicate<(any_of FeatureStdExtZbb, FeatureStdExtZbkb),
                                    "'Zbb' (Basic Bit-Manipulation) or "
                                    "'Zbkb' (Bitmanip instructions for Cryptography)">;
 
-def HasStdExtZbbOrZbpOrZbkb
-    : Predicate<"Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp() || Subtarget->hasStdExtZbkb()">,
-                AssemblerPredicate<(any_of FeatureStdExtZbb, FeatureStdExtZbp, FeatureStdExtZbkb),
-                                   "'Zbb' (Basic Bit-Manipulation) or "
-                                   "'Zbp' (Permutation 'Zb' Instructions) or "
-                                   "'Zbkb' (Bitmanip instructions for Cryptography)">;
-
 // The Carry-less multiply subextension for cryptography is a subset of basic carry-less multiply subextension. The former should be enabled if the latter is enabled.
 def FeatureStdExtZbkc
     : SubtargetFeature<"zbkc", "HasStdExtZbkc", "true",

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index c5c8438e68d5..f63367bf9353 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1068,8 +1068,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     // might allow it to be compressed.
     bool IsANDIOrZExt =
         isInt<12>(C2) ||
-        (C2 == UINT64_C(0xFFFF) &&
-         (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) ||
+        (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb()) ||
         (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba());
     if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.hasOneUse()))
       break;
@@ -2510,8 +2509,6 @@ bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
   case RISCV::SUBW:
   case RISCV::MULW:
   case RISCV::SLLIW:
-  case RISCV::GREVIW:
-  case RISCV::GORCIW:
     // Result is already sign extended just remove the sext.w.
     // NOTE: We only handle the nodes that are selected with hasAllWUsers.
     ReplaceUses(N, N0.getNode());

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6cda6deadc42..8c910b8d1e5e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -254,37 +254,22 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, XLenVT,
                      Custom);
 
-  if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
-      Subtarget.hasStdExtZbkb()) {
+  if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) {
     if (Subtarget.is64Bit())
       setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
   } else {
     setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
   }
 
-  if (Subtarget.hasStdExtZbp()) {
-    // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
-    // more combining.
-    setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, XLenVT, Custom);
-
-    // BSWAP i8 doesn't exist.
-    setOperationAction(ISD::BITREVERSE, MVT::i8, Custom);
-
-    setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i16, Custom);
-
-    if (Subtarget.is64Bit())
-      setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i32, Custom);
-  } else {
-    // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
-    // pattern match it directly in isel.
-    setOperationAction(ISD::BSWAP, XLenVT,
-                       (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
-                           ? Legal
-                           : Expand);
-    // Zbkb can use rev8+brev8 to implement bitreverse.
-    setOperationAction(ISD::BITREVERSE, XLenVT,
-                       Subtarget.hasStdExtZbkb() ? Custom : Expand);
-  }
+  // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
+  // pattern match it directly in isel.
+  setOperationAction(ISD::BSWAP, XLenVT,
+                     (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
+                         ? Legal
+                         : Expand);
+  // Zbkb can use rev8+brev8 to implement bitreverse.
+  setOperationAction(ISD::BITREVERSE, XLenVT,
+                     Subtarget.hasStdExtZbkb() ? Custom : Expand);
 
   if (Subtarget.hasStdExtZbb()) {
     setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
@@ -960,9 +945,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   if (Subtarget.hasStdExtF())
     setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM});
 
-  if (Subtarget.hasStdExtZbp())
-    setTargetDAGCombine({ISD::ROTL, ISD::ROTR});
-
   if (Subtarget.hasStdExtZbb())
     setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN});
 
@@ -1180,8 +1162,7 @@ bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
   if (VT.isVector())
     return false;
 
-  return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
-          Subtarget.hasStdExtZbkb()) &&
+  return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
          !isa<ConstantSDNode>(Y);
 }
 
@@ -3372,22 +3353,13 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::BITREVERSE: {
     MVT VT = Op.getSimpleValueType();
     SDLoc DL(Op);
-    if (Subtarget.hasStdExtZbp()) {
-      // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combining.
-      // Start with the maximum immediate value which is the bitwidth - 1.
-      unsigned Imm = VT.getSizeInBits() - 1;
-      // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
-      if (Op.getOpcode() == ISD::BSWAP)
-        Imm &= ~0x7U;
-      return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
-                         DAG.getConstant(Imm, DL, VT));
-    }
     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
     // Expand bitreverse to a bswap(rev8) followed by brev8.
     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
-    // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
-    // as brev8 by an isel pattern.
+    // We use the old Zbp grevi encoding for rev.b/brev8 which will be
+    // recognized as brev8 by an isel pattern.
+    // TODO: Replace with RISCVISD::BREV8.
     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
                        DAG.getConstant(7, DL, VT));
   }
@@ -5086,12 +5058,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
                        DAG.getConstant(7, DL, XLenVT));
   }
-  case Intrinsic::riscv_grev:
-  case Intrinsic::riscv_gorc: {
-    unsigned Opc =
-        IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
-    return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
-  }
   case Intrinsic::riscv_zip:
   case Intrinsic::riscv_unzip: {
     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
@@ -5103,12 +5069,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
   }
-  case Intrinsic::riscv_shfl:
-  case Intrinsic::riscv_unshfl: {
-    unsigned Opc =
-        IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
-    return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
-  }
   case Intrinsic::riscv_bcompress:
   case Intrinsic::riscv_bdecompress: {
     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
@@ -7471,17 +7431,13 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
     }
     break;
   }
-  case RISCVISD::GREV:
-  case RISCVISD::GORC:
-  case RISCVISD::SHFL: {
+  case RISCVISD::GREV: {
     MVT VT = N->getSimpleValueType(0);
     MVT XLenVT = Subtarget.getXLenVT();
     assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
            "Unexpected custom legalisation");
     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
-    assert((Subtarget.hasStdExtZbp() ||
-            (Subtarget.hasStdExtZbkb() && N->getOpcode() == RISCVISD::GREV &&
-             N->getConstantOperandVal(1) == 7)) &&
+    assert(Subtarget.hasStdExtZbkb() && N->getConstantOperandVal(1) == 7 &&
            "Unexpected extension");
     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
     SDValue NewOp1 =
@@ -7492,25 +7448,6 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
     break;
   }
-  case ISD::BSWAP:
-  case ISD::BITREVERSE: {
-    MVT VT = N->getSimpleValueType(0);
-    MVT XLenVT = Subtarget.getXLenVT();
-    assert((VT == MVT::i8 || VT == MVT::i16 ||
-            (VT == MVT::i32 && Subtarget.is64Bit())) &&
-           Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
-    SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
-    unsigned Imm = VT.getSizeInBits() - 1;
-    // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
-    if (N->getOpcode() == ISD::BSWAP)
-      Imm &= ~0x7U;
-    SDValue GREVI = DAG.getNode(RISCVISD::GREV, DL, XLenVT, NewOp0,
-                                DAG.getConstant(Imm, DL, XLenVT));
-    // ReplaceNodeResults requires we maintain the same type for the return
-    // value.
-    Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
-    break;
-  }
   case ISD::EXTRACT_VECTOR_ELT: {
     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
     // type is illegal (currently only vXi64 RV32).
@@ -7576,28 +7513,6 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
     default:
       llvm_unreachable(
           "Don't know how to custom type legalize this intrinsic!");
-    case Intrinsic::riscv_grev:
-    case Intrinsic::riscv_gorc: {
-      assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
-             "Unexpected custom legalisation");
-      SDValue NewOp1 =
-          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
-      SDValue NewOp2 =
-          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
-      unsigned Opc =
-          IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
-      // If the control is a constant, promote the node by clearing any extra
-      // bits bits in the control. isel will form greviw/gorciw if the result is
-      // sign extended.
-      if (isa<ConstantSDNode>(NewOp2)) {
-        NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
-                             DAG.getConstant(0x1f, DL, MVT::i64));
-        Opc = IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
-      }
-      SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
-      Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
-      break;
-    }
     case Intrinsic::riscv_bcompress:
     case Intrinsic::riscv_bdecompress:
     case Intrinsic::riscv_bfp: {
@@ -7615,30 +7530,6 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
       return;
     }
-    case Intrinsic::riscv_shfl:
-    case Intrinsic::riscv_unshfl: {
-      assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
-             "Unexpected custom legalisation");
-      SDValue NewOp1 =
-          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
-      SDValue NewOp2 =
-          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
-      unsigned Opc =
-          IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
-      // There is no (UN)SHFLIW. If the control word is a constant, we can use
-      // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
-      // will be shuffled the same way as the lower 32 bit half, but the two
-      // halves won't cross.
-      if (isa<ConstantSDNode>(NewOp2)) {
-        NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
-                             DAG.getConstant(0xf, DL, MVT::i64));
-        Opc =
-            IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
-      }
-      SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
-      Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
-      break;
-    }
     case Intrinsic::riscv_vmv_x_s: {
       EVT VT = N->getValueType(0);
       MVT XLenVT = Subtarget.getXLenVT();
@@ -7711,118 +7602,6 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
   }
 }
 
-// A structure to hold one of the bit-manipulation patterns below. Together, a
-// SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
-//   (or (and (shl x, 1), 0xAAAAAAAA),
-//       (and (srl x, 1), 0x55555555))
-struct RISCVBitmanipPat {
-  SDValue Op;
-  unsigned ShAmt;
-  bool IsSHL;
-
-  bool formsPairWith(const RISCVBitmanipPat &Other) const {
-    return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
-  }
-};
-
-// Matches patterns of the form
-//   (and (shl x, C2), (C1 << C2))
-//   (and (srl x, C2), C1)
-//   (shl (and x, C1), C2)
-//   (srl (and x, (C1 << C2)), C2)
-// Where C2 is a power of 2 and C1 has at least that many leading zeroes.
-// The expected masks for each shift amount are specified in BitmanipMasks where
-// BitmanipMasks[log2(C2)] specifies the expected C1 value.
-// The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
-// BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
-// XLen is 64.
-static Optional<RISCVBitmanipPat>
-matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
-  assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
-         "Unexpected number of masks");
-  Optional<uint64_t> Mask;
-  // Optionally consume a mask around the shift operation.
-  if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
-    Mask = Op.getConstantOperandVal(1);
-    Op = Op.getOperand(0);
-  }
-  if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
-    return None;
-  bool IsSHL = Op.getOpcode() == ISD::SHL;
-
-  if (!isa<ConstantSDNode>(Op.getOperand(1)))
-    return None;
-  uint64_t ShAmt = Op.getConstantOperandVal(1);
-
-  unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
-  if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
-    return None;
-  // If we don't have enough masks for 64 bit, then we must be trying to
-  // match SHFL so we're only allowed to shift 1/4 of the width.
-  if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
-    return None;
-
-  SDValue Src = Op.getOperand(0);
-
-  // The expected mask is shifted left when the AND is found around SHL
-  // patterns.
-  //   ((x >> 1) & 0x55555555)
-  //   ((x << 1) & 0xAAAAAAAA)
-  bool SHLExpMask = IsSHL;
-
-  if (!Mask) {
-    // Sometimes LLVM keeps the mask as an operand of the shift, typically when
-    // the mask is all ones: consume that now.
-    if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
-      Mask = Src.getConstantOperandVal(1);
-      Src = Src.getOperand(0);
-      // The expected mask is now in fact shifted left for SRL, so reverse the
-      // decision.
-      //   ((x & 0xAAAAAAAA) >> 1)
-      //   ((x & 0x55555555) << 1)
-      SHLExpMask = !SHLExpMask;
-    } else {
-      // Use a default shifted mask of all-ones if there's no AND, truncated
-      // down to the expected width. This simplifies the logic later on.
-      Mask = maskTrailingOnes<uint64_t>(Width);
-      *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
-    }
-  }
-
-  unsigned MaskIdx = Log2_32(ShAmt);
-  uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
-
-  if (SHLExpMask)
-    ExpMask <<= ShAmt;
-
-  if (Mask != ExpMask)
-    return None;
-
-  return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
-}
-
-// Matches any of the following bit-manipulation patterns:
-//   (and (shl x, 1), (0x55555555 << 1))
-//   (and (srl x, 1), 0x55555555)
-//   (shl (and x, 0x55555555), 1)
-//   (srl (and x, (0x55555555 << 1)), 1)
-// where the shift amount and mask may vary thus:
-//   [1]  = 0x55555555 / 0xAAAAAAAA
-//   [2]  = 0x33333333 / 0xCCCCCCCC
-//   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
-//   [8]  = 0x00FF00FF / 0xFF00FF00
-//   [16] = 0x0000FFFF / 0xFFFFFFFF
-//   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
-static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
-  // These are the unshifted masks which we use to match bit-manipulation
-  // patterns. They may be shifted left in certain circumstances.
-  static const uint64_t BitmanipMasks[] = {
-      0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
-      0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
-
-  return matchRISCVBitmanipPat(Op, BitmanipMasks);
-}
-
 // Try to fold (<bop> x, (reduction.<bop> vec, start))
 static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG) {
   auto BinOpToRVVReduce = [](unsigned Opc) {
@@ -7927,186 +7706,6 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG) {
                      Extract.getValueType(), NewReduce, Extract.getOperand(1));
 }
 
-// Match the following pattern as a GREVI(W) operation
-//   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
-static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
-                               const RISCVSubtarget &Subtarget) {
-  assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
-  EVT VT = Op.getValueType();
-
-  if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
-    auto LHS = matchGREVIPat(Op.getOperand(0));
-    auto RHS = matchGREVIPat(Op.getOperand(1));
-    if (LHS && RHS && LHS->formsPairWith(*RHS)) {
-      SDLoc DL(Op);
-      return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
-                         DAG.getConstant(LHS->ShAmt, DL, VT));
-    }
-  }
-  return SDValue();
-}
-
-// Matches any the following pattern as a GORCI(W) operation
-// 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
-// 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
-// 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
-// Note that with the variant of 3.,
-//     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
-// the inner pattern will first be matched as GREVI and then the outer
-// pattern will be matched to GORC via the first rule above.
-// 4.  (or (rotl/rotr x, bitwidth/2), x)
-static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
-                               const RISCVSubtarget &Subtarget) {
-  assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
-  EVT VT = Op.getValueType();
-
-  if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
-    SDLoc DL(Op);
-    SDValue Op0 = Op.getOperand(0);
-    SDValue Op1 = Op.getOperand(1);
-
-    auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
-      if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
-          isa<ConstantSDNode>(Reverse.getOperand(1)) &&
-          isPowerOf2_32(Reverse.getConstantOperandVal(1)))
-        return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
-      // We can also form GORCI from ROTL/ROTR by half the bitwidth.
-      if ((Reverse.getOpcode() == ISD::ROTL ||
-           Reverse.getOpcode() == ISD::ROTR) &&
-          Reverse.getOperand(0) == X &&
-          isa<ConstantSDNode>(Reverse.getOperand(1))) {
-        uint64_t RotAmt = Reverse.getConstantOperandVal(1);
-        if (RotAmt == (VT.getSizeInBits() / 2))
-          return DAG.getNode(RISCVISD::GORC, DL, VT, X,
-                             DAG.getConstant(RotAmt, DL, VT));
-      }
-      return SDValue();
-    };
-
-    // Check for either commutable permutation of (or (GREVI x, shamt), x)
-    if (SDValue V = MatchOROfReverse(Op0, Op1))
-      return V;
-    if (SDValue V = MatchOROfReverse(Op1, Op0))
-      return V;
-
-    // OR is commutable so canonicalize its OR operand to the left
-    if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
-      std::swap(Op0, Op1);
-    if (Op0.getOpcode() != ISD::OR)
-      return SDValue();
-    SDValue OrOp0 = Op0.getOperand(0);
-    SDValue OrOp1 = Op0.getOperand(1);
-    auto LHS = matchGREVIPat(OrOp0);
-    // OR is commutable so swap the operands and try again: x might have been
-    // on the left
-    if (!LHS) {
-      std::swap(OrOp0, OrOp1);
-      LHS = matchGREVIPat(OrOp0);
-    }
-    auto RHS = matchGREVIPat(Op1);
-    if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
-      return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
-                         DAG.getConstant(LHS->ShAmt, DL, VT));
-    }
-  }
-  return SDValue();
-}
-
-// Matches any of the following bit-manipulation patterns:
-//   (and (shl x, 1), (0x22222222 << 1))
-//   (and (srl x, 1), 0x22222222)
-//   (shl (and x, 0x22222222), 1)
-//   (srl (and x, (0x22222222 << 1)), 1)
-// where the shift amount and mask may vary thus:
-//   [1]  = 0x22222222 / 0x44444444
-//   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
-//   [4]  = 0x00F000F0 / 0x0F000F00
-//   [8]  = 0x0000FF00 / 0x00FF0000
-//   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
-static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
-  // These are the unshifted masks which we use to match bit-manipulation
-  // patterns. They may be shifted left in certain circumstances.
-  static const uint64_t BitmanipMasks[] = {
-      0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
-      0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
-
-  return matchRISCVBitmanipPat(Op, BitmanipMasks);
-}
-
-// Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
-static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
-                               const RISCVSubtarget &Subtarget) {
-  assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
-  EVT VT = Op.getValueType();
-
-  if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
-    return SDValue();
-
-  SDValue Op0 = Op.getOperand(0);
-  SDValue Op1 = Op.getOperand(1);
-
-  // Or is commutable so canonicalize the second OR to the LHS.
-  if (Op0.getOpcode() != ISD::OR)
-    std::swap(Op0, Op1);
-  if (Op0.getOpcode() != ISD::OR)
-    return SDValue();
-
-  // We found an inner OR, so our operands are the operands of the inner OR
-  // and the other operand of the outer OR.
-  SDValue A = Op0.getOperand(0);
-  SDValue B = Op0.getOperand(1);
-  SDValue C = Op1;
-
-  auto Match1 = matchSHFLPat(A);
-  auto Match2 = matchSHFLPat(B);
-
-  // If neither matched, we failed.
-  if (!Match1 && !Match2)
-    return SDValue();
-
-  // We had at least one match. if one failed, try the remaining C operand.
-  if (!Match1) {
-    std::swap(A, C);
-    Match1 = matchSHFLPat(A);
-    if (!Match1)
-      return SDValue();
-  } else if (!Match2) {
-    std::swap(B, C);
-    Match2 = matchSHFLPat(B);
-    if (!Match2)
-      return SDValue();
-  }
-  assert(Match1 && Match2);
-
-  // Make sure our matches pair up.
-  if (!Match1->formsPairWith(*Match2))
-    return SDValue();
-
-  // All the remains is to make sure C is an AND with the same input, that masks
-  // out the bits that are being shuffled.
-  if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
-      C.getOperand(0) != Match1->Op)
-    return SDValue();
-
-  uint64_t Mask = C.getConstantOperandVal(1);
-
-  static const uint64_t BitmanipMasks[] = {
-      0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
-      0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
-  };
-
-  unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
-  unsigned MaskIdx = Log2_32(Match1->ShAmt);
-  uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
-
-  if (Mask != ExpMask)
-    return SDValue();
-
-  SDLoc DL(Op);
-  return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
-                     DAG.getConstant(Match1->ShAmt, DL, VT));
-}
-
 // Optimize (add (shl x, c0), (shl y, c1)) ->
 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
@@ -8153,63 +7752,6 @@ static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
 }
 
-// Combine
-// ROTR ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
-// ROTL ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
-// ROTR ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
-// ROTL ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
-// RORW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
-// ROLW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
-// The grev patterns represents BSWAP.
-// FIXME: This can be generalized to any GREV. We just need to toggle the MSB
-// off the grev.
-static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG,
-                                          const RISCVSubtarget &Subtarget) {
-  bool IsWInstruction =
-      N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW;
-  assert((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL ||
-          IsWInstruction) &&
-         "Unexpected opcode!");
-  SDValue Src = N->getOperand(0);
-  EVT VT = N->getValueType(0);
-  SDLoc DL(N);
-
-  if (!Subtarget.hasStdExtZbp() || Src.getOpcode() != RISCVISD::GREV)
-    return SDValue();
-
-  if (!isa<ConstantSDNode>(N->getOperand(1)) ||
-      !isa<ConstantSDNode>(Src.getOperand(1)))
-    return SDValue();
-
-  unsigned BitWidth = IsWInstruction ? 32 : VT.getSizeInBits();
-  assert(isPowerOf2_32(BitWidth) && "Expected a power of 2");
-
-  // Needs to be a rotate by half the bitwidth for ROTR/ROTL or by 16 for
-  // RORW/ROLW. And the grev should be the encoding for bswap for this width.
-  unsigned ShAmt1 = N->getConstantOperandVal(1);
-  unsigned ShAmt2 = Src.getConstantOperandVal(1);
-  if (BitWidth < 32 || ShAmt1 != (BitWidth / 2) || ShAmt2 != (BitWidth - 8))
-    return SDValue();
-
-  Src = Src.getOperand(0);
-
-  // Toggle bit the MSB of the shift.
-  unsigned CombinedShAmt = ShAmt1 ^ ShAmt2;
-  if (CombinedShAmt == 0)
-    return Src;
-
-  SDValue Res = DAG.getNode(
-      RISCVISD::GREV, DL, VT, Src,
-      DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
-  if (!IsWInstruction)
-    return Res;
-
-  // Sign extend the result to match the behavior of the rotate. This will be
-  // selected to GREVIW in isel.
-  return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Res,
-                     DAG.getValueType(MVT::i32));
-}
-
 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
@@ -8535,15 +8077,6 @@ static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                                 const RISCVSubtarget &Subtarget) {
   SelectionDAG &DAG = DCI.DAG;
 
-  if (Subtarget.hasStdExtZbp()) {
-    if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
-      return GREV;
-    if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
-      return GORC;
-    if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
-      return SHFL;
-  }
-
   if (SDValue V = combineBinOpToReduce(N, DAG))
     return V;
 
@@ -9376,7 +8909,9 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
   }
   case RISCVISD::SLLW:
   case RISCVISD::SRAW:
-  case RISCVISD::SRLW: {
+  case RISCVISD::SRLW:
+  case RISCVISD::RORW:
+  case RISCVISD::ROLW: {
     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
     if (SimplifyDemandedLowBitsHelper(0, 32) ||
         SimplifyDemandedLowBitsHelper(1, 5))
@@ -9384,19 +8919,6 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
 
     break;
   }
-  case ISD::ROTR:
-  case ISD::ROTL:
-  case RISCVISD::RORW:
-  case RISCVISD::ROLW: {
-    if (N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW) {
-      // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
-      if (SimplifyDemandedLowBitsHelper(0, 32) ||
-          SimplifyDemandedLowBitsHelper(1, 5))
-        return SDValue(N, 0);
-    }
-
-    return combineROTR_ROTL_RORW_ROLW(N, DAG, Subtarget);
-  }
   case RISCVISD::CLZW:
   case RISCVISD::CTZW: {
     // Only the lower 32 bits of the first operand are read
@@ -9414,15 +8936,6 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
 
     return combineGREVI_GORCI(N, DAG);
   }
-  case RISCVISD::GREVW:
-  case RISCVISD::GORCW: {
-    // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
-    if (SimplifyDemandedLowBitsHelper(0, 32) ||
-        SimplifyDemandedLowBitsHelper(1, 5))
-      return SDValue(N, 0);
-
-    break;
-  }
   case RISCVISD::SHFL:
   case RISCVISD::UNSHFL: {
     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
@@ -9433,15 +8946,6 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
 
     break;
   }
-  case RISCVISD::SHFLW:
-  case RISCVISD::UNSHFLW: {
-    // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
-    if (SimplifyDemandedLowBitsHelper(0, 32) ||
-        SimplifyDemandedLowBitsHelper(1, 4))
-      return SDValue(N, 0);
-
-    break;
-  }
   case RISCVISD::BCOMPRESSW:
   case RISCVISD::BDECOMPRESSW: {
     // Only the lower 32 bits of LHS and RHS are read.
@@ -10142,10 +9646,6 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
   case RISCVISD::REMUW:
   case RISCVISD::ROLW:
   case RISCVISD::RORW:
-  case RISCVISD::GREVW:
-  case RISCVISD::GORCW:
-  case RISCVISD::SHFLW:
-  case RISCVISD::UNSHFLW:
   case RISCVISD::BCOMPRESSW:
   case RISCVISD::BDECOMPRESSW:
   case RISCVISD::BFPW:
@@ -12213,13 +11713,9 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
   NODE_NAME_CASE(READ_CYCLE_WIDE)
   NODE_NAME_CASE(GREV)
-  NODE_NAME_CASE(GREVW)
   NODE_NAME_CASE(GORC)
-  NODE_NAME_CASE(GORCW)
   NODE_NAME_CASE(SHFL)
-  NODE_NAME_CASE(SHFLW)
   NODE_NAME_CASE(UNSHFL)
-  NODE_NAME_CASE(UNSHFLW)
   NODE_NAME_CASE(BFP)
   NODE_NAME_CASE(BFPW)
   NODE_NAME_CASE(BCOMPRESS)

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index d019fa3230e5..14796d8bbc24 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -114,13 +114,9 @@ enum NodeType : unsigned {
   // DAGs. Each node takes an input operand and a control operand and outputs a
   // bit-manipulated version of input. All operands are i32 or XLenVT.
   GREV,
-  GREVW,
   GORC,
-  GORCW,
   SHFL,
-  SHFLW,
   UNSHFL,
-  UNSHFLW,
   // Bit Compress/Decompress implement the generic bit extract and bit deposit
   // functions. This operation is also referred to as bit gather/scatter, bit
   // pack/unpack, parallel extract/deposit, compress/expand, or right

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index c442af1d6c5e..85811c94f839 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -15,7 +15,6 @@
 //   Zbe - 0.93 *experimental
 //   Zbf - 0.93 *experimental
 //   Zbm - 0.93 *experimental
-//   Zbp - 0.93 *experimental
 //   Zbr - 0.93 *experimental
 //
 // The experimental extensions appeared in an earlier draft of the Bitmanip
@@ -314,14 +313,14 @@ class RVBTernaryR<bits<2> funct2, bits<3> funct3, RISCVOpcode opcode,
 // Instructions
 //===----------------------------------------------------------------------===//
 
-let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
+let Predicates = [HasStdExtZbbOrZbkb] in {
 def ANDN  : ALU_rr<0b0100000, 0b111, "andn">,
             Sched<[WriteIALU, ReadIALU, ReadIALU]>;
 def ORN   : ALU_rr<0b0100000, 0b110, "orn">,
             Sched<[WriteIALU, ReadIALU, ReadIALU]>;
 def XNOR  : ALU_rr<0b0100000, 0b100, "xnor">,
             Sched<[WriteIALU, ReadIALU, ReadIALU]>;
-} // Predicates = [HasStdExtZbbOrZbpOrZbkb]
+} // Predicates = [HasStdExtZbbOrZbkb]
 
 let Predicates = [HasStdExtZba] in {
 def SH1ADD : ALU_rr<0b0010000, 0b010, "sh1add">,
@@ -345,7 +344,7 @@ def SH3ADD_UW : ALUW_rr<0b0010000, 0b110, "sh3add.uw">,
                 Sched<[WriteSHXADD32, ReadSHXADD32, ReadSHXADD32]>;
 } // Predicates = [HasStdExtZba, IsRV64]
 
-let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
+let Predicates = [HasStdExtZbbOrZbkb] in {
 def ROL   : ALU_rr<0b0110000, 0b001, "rol">,
             Sched<[WriteRotateReg, ReadRotateReg, ReadRotateReg]>;
 def ROR   : ALU_rr<0b0110000, 0b101, "ror">,
@@ -353,9 +352,9 @@ def ROR   : ALU_rr<0b0110000, 0b101, "ror">,
 
 def RORI  : RVBShift_ri<0b01100, 0b101, OPC_OP_IMM, "rori">,
             Sched<[WriteRotateImm, ReadRotateImm]>;
-} // Predicates = [HasStdExtZbbOrZbpOrZbkb]
+} // Predicates = [HasStdExtZbbOrZbkb]
 
-let Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64] in {
+let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
 def ROLW  : ALUW_rr<0b0110000, 0b001, "rolw">,
             Sched<[WriteRotateReg32, ReadRotateReg32, ReadRotateReg32]>;
 def RORW  : ALUW_rr<0b0110000, 0b101, "rorw">,
@@ -363,7 +362,7 @@ def RORW  : ALUW_rr<0b0110000, 0b101, "rorw">,
 
 def RORIW : RVBShiftW_ri<0b0110000, 0b101, OPC_OP_IMM_32, "roriw">,
             Sched<[WriteRotateImm32, ReadRotateImm32]>;
-} // Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64]
+} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
 
 let Predicates = [HasStdExtZbs] in {
 def BCLR : ALU_rr<0b0100100, 0b001, "bclr">,
@@ -385,58 +384,13 @@ def BEXTI : RVBShift_ri<0b01001, 0b101, OPC_OP_IMM, "bexti">,
             Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
 } // Predicates = [HasStdExtZbs]
 
-let Predicates = [HasStdExtZbp] in {
-def GORC : ALU_rr<0b0010100, 0b101, "gorc">,
-           Sched<[WriteORC, ReadORC, ReadORC]>;
-def GREV : ALU_rr<0b0110100, 0b101, "grev">,
-           Sched<[WriteREV, ReadREV, ReadREV]>;
-
-def GREVI : RVBShift_ri<0b01101, 0b101, OPC_OP_IMM, "grevi">,
-            Sched<[WriteREVImm, ReadREVImm]>;
-def GORCI : RVBShift_ri<0b00101, 0b101, OPC_OP_IMM, "gorci">,
-            Sched<[WriteORCImm, ReadORCImm]>;
-
-def SHFL   : ALU_rr<0b0000100, 0b001, "shfl">,
-             Sched<[WriteSHFL, ReadSHFL, ReadSHFL]>;
-def UNSHFL : ALU_rr<0b0000100, 0b101, "unshfl">,
-             Sched<[WriteUNSHFL, ReadUNSHFL, ReadUNSHFL]>;
-
-def SHFLI   : RVBShfl_ri<0b0000100, 0b001, OPC_OP_IMM, "shfli">,
-              Sched<[WriteSHFLImm, ReadSHFLImm]>;
-def UNSHFLI : RVBShfl_ri<0b0000100, 0b101, OPC_OP_IMM, "unshfli">,
-              Sched<[WriteUNSHFLImm, ReadUNSHFLImm]>;
-
-def XPERM_H : ALU_rr<0b0010100, 0b110, "xperm.h">,
-              Sched<[WriteXPERMH, ReadXPERMH, ReadXPERMH]>;
-} // Predicates = [HasStdExtZbp]
-
-let Predicates = [HasStdExtZbp, IsRV64] in {
-def GORCW  : ALUW_rr<0b0010100, 0b101, "gorcw">,
-             Sched<[WriteORC32, ReadORC32, ReadORC32]>;
-def GREVW  : ALUW_rr<0b0110100, 0b101, "grevw">,
-             Sched<[WriteREV32, ReadREV32, ReadREV32]>;
-
-def GORCIW : RVBShiftW_ri<0b0010100, 0b101, OPC_OP_IMM_32, "gorciw">,
-             Sched<[WriteREVImm32, ReadREVImm32]>;
-def GREVIW : RVBShiftW_ri<0b0110100, 0b101, OPC_OP_IMM_32, "greviw">,
-             Sched<[WriteORCImm32, ReadORCImm32]>;
-
-def SHFLW   : ALUW_rr<0b0000100, 0b001, "shflw">,
-              Sched<[WriteSHFL32, ReadSHFL32, ReadSHFL32]>;
-def UNSHFLW : ALUW_rr<0b0000100, 0b101, "unshflw">,
-              Sched<[WriteUNSHFL32, ReadUNSHFL32, ReadUNSHFL32]>;
-
-def XPERM_W : ALU_rr<0b0010100, 0b000, "xperm.w">,
-              Sched<[WriteXPERMW, ReadXPERMW, ReadXPERMW]>;
-} // Predicates = [HasStdExtZbp, IsRV64]
-
 // These instructions were named xperm.n and xperm.b in the last version of
 // the draft bit manipulation specification they were included in. However, we
 // use the mnemonics given to them in the ratified Zbkx extension.
-let Predicates = [HasStdExtZbpOrZbkx] in {
+let Predicates = [HasStdExtZbkx] in {
 def XPERM4 : ALU_rr<0b0010100, 0b010, "xperm4">, Sched<[]>;
 def XPERM8 : ALU_rr<0b0010100, 0b100, "xperm8">, Sched<[]>;
-} // Predicates = [HasStdExtZbpOrZbkx]
+} // Predicates = [HasStdExtZbkx]
 
 let Predicates = [HasStdExtZbb] in {
 def CLZ  : RVBUnary<0b0110000, 0b00000, 0b001, OPC_OP_IMM, "clz">,
@@ -528,25 +482,17 @@ def BCOMPRESSW   : ALUW_rr<0b0000100, 0b110, "bcompressw">,
                    Sched<[WriteCompress32, ReadCompress32, ReadCompress32]>;
 } // Predicates = [HasStdExtZbe, IsRV64]
 
-let Predicates = [HasStdExtZbpOrZbkb] in {
+let Predicates = [HasStdExtZbkb] in {
 def PACK  : ALU_rr<0b0000100, 0b100, "pack">,
             Sched<[WritePACK, ReadPACK, ReadPACK]>;
 def PACKH : ALU_rr<0b0000100, 0b111, "packh">,
             Sched<[WritePACK, ReadPACK, ReadPACK]>;
-} // Predicates = [HasStdExtZbpOrZbkb]
+} // Predicates = [HasStdExtZbkb]
 
-let Predicates = [HasStdExtZbpOrZbkb, IsRV64] in 
+let Predicates = [HasStdExtZbkb, IsRV64] in
 def PACKW  : ALUW_rr<0b0000100, 0b100, "packw">,
              Sched<[WritePACK32, ReadPACK32, ReadPACK32]>;
 
-let Predicates = [HasStdExtZbp] in 
-def PACKU : ALU_rr<0b0100100, 0b100, "packu">,
-            Sched<[WritePACKU, ReadPACKU, ReadPACKU]>;
-
-let Predicates = [HasStdExtZbp, IsRV64] in 
-def PACKUW : ALUW_rr<0b0100100, 0b100, "packuw">,
-             Sched<[WritePACKU32, ReadPACKU32, ReadPACKU32]>;
-
 let Predicates = [HasStdExtZbm, IsRV64] in {
 def BMATFLIP : RVBUnary<0b0110000, 0b00011, 0b001, OPC_OP_IMM, "bmatflip">,
                Sched<[WriteBMatrix, ReadBMatrix]>;
@@ -565,48 +511,41 @@ let Predicates = [HasStdExtZbf, IsRV64] in
 def BFPW : ALUW_rr<0b0100100, 0b111, "bfpw">,
            Sched<[WriteBFP32, ReadBFP32, ReadBFP32]>;
 
-let Predicates = [HasStdExtZbbOrZbp, IsRV32] in {
+let Predicates = [HasStdExtZbb, IsRV32] in {
 def ZEXT_H_RV32 : RVBUnary<0b0000100, 0b00000, 0b100, OPC_OP, "zext.h">,
                   Sched<[WriteIALU, ReadIALU]>;
-} // Predicates = [HasStdExtZbbOrZbp, IsRV32]
+} // Predicates = [HasStdExtZbb, IsRV32]
 
-let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
+let Predicates = [HasStdExtZbb, IsRV64] in {
 def ZEXT_H_RV64 : RVBUnary<0b0000100, 0b00000, 0b100, OPC_OP_32, "zext.h">,
                   Sched<[WriteIALU, ReadIALU]>;
-} // Predicates = [HasStdExtZbbOrZbp, IsRV64]
-
-// We treat rev8 and orc.b as standalone instructions even though they use a
-// portion of the encodings for grevi and gorci. This allows us to support only
-// those encodings when only Zbb is enabled. We do this even when grevi and
-// gorci are available with Zbp. Trying to use 'HasStdExtZbb, NotHasStdExtZbp'
-// causes diagnostics to suggest that Zbp rather than Zbb is required for rev8
-// or gorci. Since Zbb is closer to being finalized than Zbp this will be
-// misleading to users.
-let Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV32] in {
+} // Predicates = [HasStdExtZbb, IsRV64]
+
+let Predicates = [HasStdExtZbbOrZbkb, IsRV32] in {
 def REV8_RV32 : RVBUnary<0b0110100, 0b11000, 0b101, OPC_OP_IMM, "rev8">,
                 Sched<[WriteREV8, ReadREV8]>;
-} // Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV32]
+} // Predicates = [HasStdExtZbbOrZbkb, IsRV32]
 
-let Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64] in {
+let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
 def REV8_RV64 : RVBUnary<0b0110101, 0b11000, 0b101, OPC_OP_IMM, "rev8">,
                 Sched<[WriteREV8, ReadREV8]>;
-} // Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64]
+} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
 
-let Predicates = [HasStdExtZbbOrZbp] in {
+let Predicates = [HasStdExtZbb] in {
 def ORC_B : RVBUnary<0b0010100, 0b00111, 0b101, OPC_OP_IMM, "orc.b">,
             Sched<[WriteORCB, ReadORCB]>;
-} // Predicates = [HasStdExtZbbOrZbp]
+} // Predicates = [HasStdExtZbb]
 
-let Predicates = [HasStdExtZbpOrZbkb] in 
+let Predicates = [HasStdExtZbkb] in
 def BREV8 : RVBUnary<0b0110100, 0b00111, 0b101, OPC_OP_IMM, "brev8">,
             Sched<[]>;
 
-let Predicates = [HasStdExtZbpOrZbkb, IsRV32] in {
+let Predicates = [HasStdExtZbkb, IsRV32] in {
 def ZIP_RV32   : RVBUnary<0b0000100, 0b01111, 0b001, OPC_OP_IMM, "zip">,
                  Sched<[]>;
 def UNZIP_RV32 : RVBUnary<0b0000100, 0b01111, 0b101, OPC_OP_IMM, "unzip">,
                  Sched<[]>;
-} // Predicates = [HasStdExtZbpOrZbkb, IsRV32]
+} // Predicates = [HasStdExtZbkb, IsRV32]
 
 
 //===----------------------------------------------------------------------===//
@@ -617,146 +556,15 @@ let Predicates = [HasStdExtZba, IsRV64] in {
 def : InstAlias<"zext.w $rd, $rs", (ADD_UW GPR:$rd, GPR:$rs, X0)>;
 } // Predicates = [HasStdExtZba, IsRV64]
 
-let Predicates = [HasStdExtZbp] in {
-def : InstAlias<"rev.p $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b00001)>;
-def : InstAlias<"rev2.n $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b00010)>;
-def : InstAlias<"rev.n $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b00011)>;
-def : InstAlias<"rev4.b $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b00100)>;
-def : InstAlias<"rev2.b $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b00110)>;
-def : InstAlias<"rev8.h $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b01000)>;
-def : InstAlias<"rev4.h $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b01100)>;
-def : InstAlias<"rev2.h $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b01110)>;
-def : InstAlias<"rev.h $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b01111)>;
-def : InstAlias<"rev.b $rd, $rs",  (BREV8 GPR:$rd, GPR:$rs)>;
-
-def : InstAlias<"zip.n $rd, $rs",    (SHFLI   GPR:$rd, GPR:$rs, 0b0001)>;
-def : InstAlias<"unzip.n $rd, $rs",  (UNSHFLI GPR:$rd, GPR:$rs, 0b0001)>;
-def : InstAlias<"zip2.b $rd, $rs",   (SHFLI   GPR:$rd, GPR:$rs, 0b0010)>;
-def : InstAlias<"unzip2.b $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b0010)>;
-def : InstAlias<"zip.b $rd, $rs",    (SHFLI   GPR:$rd, GPR:$rs, 0b0011)>;
-def : InstAlias<"unzip.b $rd, $rs",  (UNSHFLI GPR:$rd, GPR:$rs, 0b0011)>;
-def : InstAlias<"zip4.h $rd, $rs",   (SHFLI   GPR:$rd, GPR:$rs, 0b0100)>;
-def : InstAlias<"unzip4.h $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b0100)>;
-def : InstAlias<"zip2.h $rd, $rs",   (SHFLI   GPR:$rd, GPR:$rs, 0b0110)>;
-def : InstAlias<"unzip2.h $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b0110)>;
-def : InstAlias<"zip.h $rd, $rs",    (SHFLI   GPR:$rd, GPR:$rs, 0b0111)>;
-def : InstAlias<"unzip.h $rd, $rs",  (UNSHFLI GPR:$rd, GPR:$rs, 0b0111)>;
-
-def : InstAlias<"orc.p $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b00001)>;
-def : InstAlias<"orc2.n $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b00010)>;
-def : InstAlias<"orc.n $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b00011)>;
-def : InstAlias<"orc4.b $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b00100)>;
-def : InstAlias<"orc2.b $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b00110)>;
-// orc.b is considered an instruction rather than an alias.
-def : InstAlias<"orc8.h $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b01000)>;
-def : InstAlias<"orc4.h $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b01100)>;
-def : InstAlias<"orc2.h $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b01110)>;
-def : InstAlias<"orc.h $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b01111)>;
-} // Predicates = [HasStdExtZbp]
-
-let Predicates = [HasStdExtZbp, IsRV32] in {
-def : InstAlias<"rev16 $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b10000)>;
-// rev8 is considered an instruction rather than an alias.
-def : InstAlias<"rev4 $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b11100)>;
-def : InstAlias<"rev2 $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b11110)>;
-def : InstAlias<"rev $rd, $rs",   (GREVI GPR:$rd, GPR:$rs, 0b11111)>;
-
-def : InstAlias<"zip8 $rd, $rs",   (SHFLI   GPR:$rd, GPR:$rs, 0b1000)>;
-def : InstAlias<"unzip8 $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b1000)>;
-def : InstAlias<"zip4 $rd, $rs",   (SHFLI   GPR:$rd, GPR:$rs, 0b1100)>;
-def : InstAlias<"unzip4 $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b1100)>;
-def : InstAlias<"zip2 $rd, $rs",   (SHFLI   GPR:$rd, GPR:$rs, 0b1110)>;
-def : InstAlias<"unzip2 $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b1110)>;
-// zip and unzip are considered instructions rather than an alias.
-
-def : InstAlias<"orc16 $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b10000)>;
-def : InstAlias<"orc8 $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b11000)>;
-def : InstAlias<"orc4 $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b11100)>;
-def : InstAlias<"orc2 $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b11110)>;
-def : InstAlias<"orc $rd, $rs",   (GORCI GPR:$rd, GPR:$rs, 0b11111)>;
-} // Predicates = [HasStdExtZbp, IsRV32]
-
-let Predicates = [HasStdExtZbp, IsRV64] in {
-def : InstAlias<"rev16.w $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b010000)>;
-def : InstAlias<"rev8.w $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b011000)>;
-def : InstAlias<"rev4.w $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b011100)>;
-def : InstAlias<"rev2.w $rd, $rs",  (GREVI GPR:$rd, GPR:$rs, 0b011110)>;
-def : InstAlias<"rev.w $rd, $rs",   (GREVI GPR:$rd, GPR:$rs, 0b011111)>;
-def : InstAlias<"rev32 $rd, $rs",   (GREVI GPR:$rd, GPR:$rs, 0b100000)>;
-def : InstAlias<"rev16 $rd, $rs",   (GREVI GPR:$rd, GPR:$rs, 0b110000)>;
-// rev8 is considered an instruction rather than an alias.
-def : InstAlias<"rev4 $rd, $rs",    (GREVI GPR:$rd, GPR:$rs, 0b111100)>;
-def : InstAlias<"rev2 $rd, $rs",    (GREVI GPR:$rd, GPR:$rs, 0b111110)>;
-def : InstAlias<"rev $rd, $rs",     (GREVI GPR:$rd, GPR:$rs, 0b111111)>;
-
-def : InstAlias<"zip8.w $rd, $rs",   (SHFLI   GPR:$rd, GPR:$rs, 0b01000)>;
-def : InstAlias<"unzip8.w $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b01000)>;
-def : InstAlias<"zip4.w $rd, $rs",   (SHFLI   GPR:$rd, GPR:$rs, 0b01100)>;
-def : InstAlias<"unzip4.w $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b01100)>;
-def : InstAlias<"zip2.w $rd, $rs",   (SHFLI   GPR:$rd, GPR:$rs, 0b01110)>;
-def : InstAlias<"unzip2.w $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b01110)>;
-def : InstAlias<"zip.w $rd, $rs",    (SHFLI   GPR:$rd, GPR:$rs, 0b01111)>;
-def : InstAlias<"unzip.w $rd, $rs",  (UNSHFLI GPR:$rd, GPR:$rs, 0b01111)>;
-def : InstAlias<"zip16 $rd, $rs",    (SHFLI   GPR:$rd, GPR:$rs, 0b10000)>;
-def : InstAlias<"unzip16 $rd, $rs",  (UNSHFLI GPR:$rd, GPR:$rs, 0b10000)>;
-def : InstAlias<"zip8 $rd, $rs",     (SHFLI   GPR:$rd, GPR:$rs, 0b11000)>;
-def : InstAlias<"unzip8 $rd, $rs",   (UNSHFLI GPR:$rd, GPR:$rs, 0b11000)>;
-def : InstAlias<"zip4 $rd, $rs",     (SHFLI   GPR:$rd, GPR:$rs, 0b11100)>;
-def : InstAlias<"unzip4 $rd, $rs",   (UNSHFLI GPR:$rd, GPR:$rs, 0b11100)>;
-def : InstAlias<"zip2 $rd, $rs",     (SHFLI   GPR:$rd, GPR:$rs, 0b11110)>;
-def : InstAlias<"unzip2 $rd, $rs",   (UNSHFLI GPR:$rd, GPR:$rs, 0b11110)>;
-def : InstAlias<"zip $rd, $rs",      (SHFLI   GPR:$rd, GPR:$rs, 0b11111)>;
-def : InstAlias<"unzip $rd, $rs",    (UNSHFLI GPR:$rd, GPR:$rs, 0b11111)>;
-
-def : InstAlias<"orc16.w $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b010000)>;
-def : InstAlias<"orc8.w $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b011000)>;
-def : InstAlias<"orc4.w $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b011100)>;
-def : InstAlias<"orc2.w $rd, $rs",  (GORCI GPR:$rd, GPR:$rs, 0b011110)>;
-def : InstAlias<"orc.w $rd, $rs",   (GORCI GPR:$rd, GPR:$rs, 0b011111)>;
-def : InstAlias<"orc32 $rd, $rs",   (GORCI GPR:$rd, GPR:$rs, 0b100000)>;
-def : InstAlias<"orc16 $rd, $rs",   (GORCI GPR:$rd, GPR:$rs, 0b110000)>;
-def : InstAlias<"orc8 $rd, $rs",    (GORCI GPR:$rd, GPR:$rs, 0b111000)>;
-def : InstAlias<"orc4 $rd, $rs",    (GORCI GPR:$rd, GPR:$rs, 0b111100)>;
-def : InstAlias<"orc2 $rd, $rs",    (GORCI GPR:$rd, GPR:$rs, 0b111110)>;
-def : InstAlias<"orc $rd, $rs",     (GORCI GPR:$rd, GPR:$rs, 0b111111)>;
-} // Predicates = [HasStdExtZbp, IsRV64]
-
-let Predicates = [HasStdExtZbbOrZbp] in {
+let Predicates = [HasStdExtZbb] in {
 def : InstAlias<"ror $rd, $rs1, $shamt",
                 (RORI  GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
-} // Predicates = [HasStdExtZbbOrZbp]
+} // Predicates = [HasStdExtZbb]
 
-let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
+let Predicates = [HasStdExtZbb, IsRV64] in {
 def : InstAlias<"rorw $rd, $rs1, $shamt",
                 (RORIW  GPR:$rd, GPR:$rs1, uimm5:$shamt), 0>;
-} // Predicates = [HasStdExtZbbOrZbp, IsRV64]
-
-let Predicates = [HasStdExtZbp] in {
-def : InstAlias<"grev $rd, $rs1, $shamt",
-                (GREVI  GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
-def : InstAlias<"gorc $rd, $rs1, $shamt",
-                (GORCI  GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
-def : InstAlias<"shfl $rd, $rs1, $shamt",
-                (SHFLI  GPR:$rd, GPR:$rs1, shfl_uimm:$shamt), 0>;
-def : InstAlias<"unshfl $rd, $rs1, $shamt",
-                (UNSHFLI  GPR:$rd, GPR:$rs1, shfl_uimm:$shamt), 0>;
-} // Predicates = [HasStdExtZbp]
-
-let Predicates = [HasStdExtZbp, IsRV64] in {
-def : InstAlias<"grevw $rd, $rs1, $shamt",
-                (GREVIW  GPR:$rd, GPR:$rs1, uimm5:$shamt), 0>;
-def : InstAlias<"gorcw $rd, $rs1, $shamt",
-                (GORCIW  GPR:$rd, GPR:$rs1, uimm5:$shamt), 0>;
-} // Predicates = [HasStdExtZbp, IsRV64]
-
-// Zbp is unratified and that it would likely adopt the already ratified Zbkx names.
-// Thus current Zbp instructions are defined as aliases for Zbkx instructions.
-let Predicates = [HasStdExtZbp] in {
-  def : InstAlias<"xperm.b $rd, $rs1, $rs2",
-                  (XPERM8 GPR:$rd, GPR:$rs1, GPR:$rs2)>;
-  def : InstAlias<"xperm.n $rd, $rs1, $rs2",
-                  (XPERM4 GPR:$rd, GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbp]
+} // Predicates = [HasStdExtZbb, IsRV64]
 
 let Predicates = [HasStdExtZbs] in {
 def : InstAlias<"bset $rd, $rs1, $shamt",
@@ -773,13 +581,13 @@ def : InstAlias<"bext $rd, $rs1, $shamt",
 // Codegen patterns
 //===----------------------------------------------------------------------===//
 
-let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
+let Predicates = [HasStdExtZbbOrZbkb] in {
 def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>;
 def : Pat<(or  GPR:$rs1, (not GPR:$rs2)), (ORN  GPR:$rs1, GPR:$rs2)>;
 def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbbOrZbpOrZbkb]
+} // Predicates = [HasStdExtZbbOrZbkb]
 
-let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
+let Predicates = [HasStdExtZbbOrZbkb] in {
 def : PatGprGpr<shiftop<rotl>, ROL>;
 def : PatGprGpr<shiftop<rotr>, ROR>;
 
@@ -788,15 +596,15 @@ def : PatGprImm<rotr, RORI, uimmlog2xlen>;
 // implemented with rori by negating the immediate.
 def : Pat<(rotl GPR:$rs1, uimmlog2xlen:$shamt),
           (RORI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
-} // Predicates = [HasStdExtZbbOrZbpOrZbkb]
+} // Predicates = [HasStdExtZbbOrZbkb]
 
-let Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64] in {
+let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
 def : PatGprGpr<shiftopw<riscv_rolw>, ROLW>;
 def : PatGprGpr<shiftopw<riscv_rorw>, RORW>;
 def : PatGprImm<riscv_rorw, RORIW, uimm5>;
 def : Pat<(riscv_rolw GPR:$rs1, uimm5:$rs2),
           (RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
-} // Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64]
+} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
 
 let Predicates = [HasStdExtZbs] in {
 def : Pat<(and (not (shiftop<shl> 1, GPR:$rs2)), GPR:$rs1),
@@ -845,70 +653,23 @@ def : Pat<(and GPR:$r, BCLRIANDIMask:$i),
                  (BCLRITwoBitsMaskHigh BCLRIANDIMask:$i))>;
 } // Predicates = [HasStdExtZbs]
 
-let Predicates = [HasStdExtZbbOrZbp] in {
+let Predicates = [HasStdExtZbb] in {
 // We treat orc.b as a separate instruction, so match it directly. We also
 // lower the Zbb orc.b intrinsic to this.
 def : Pat<(riscv_gorc GPR:$rs1, 7), (ORC_B GPR:$rs1)>;
-} // Predicates = [HasStdExtZbbOrZbp]
+} // Predicates = [HasStdExtZbb]
 
-let Predicates = [HasStdExtZbpOrZbkb] in {
+let Predicates = [HasStdExtZbkb] in {
 // We treat brev8 as a separate instruction, so match it directly. We also
 // use this for brev8 when lowering bitreverse with Zbkb.
 def : Pat<(riscv_grev GPR:$rs1, 7), (BREV8 GPR:$rs1)>;
-} // Predicates = [HasStdExtZbpOrZbkb]
+} // Predicates = [HasStdExtZbkb]
 
-let Predicates = [HasStdExtZbpOrZbkb, IsRV32] in {
+let Predicates = [HasStdExtZbkb, IsRV32] in {
 // We treat zip and unzip as separate instructions, so match it directly.
 def : Pat<(i32 (riscv_shfl GPR:$rs1, 15)), (ZIP_RV32 GPR:$rs1)>;
 def : Pat<(i32 (riscv_unshfl GPR:$rs1, 15)), (UNZIP_RV32 GPR:$rs1)>;
-} // Predicates = [HasStdExtZbpOrZbkb, IsRV32]
-
-let Predicates = [HasStdExtZbp] in {
-def : PatGprGpr<riscv_grev, GREV>;
-def : PatGprGpr<riscv_gorc, GORC>;
-def : PatGprImm<riscv_grev, GREVI, uimmlog2xlen>;
-def : PatGprImm<riscv_gorc, GORCI, uimmlog2xlen>;
-
-def : PatGprGpr<riscv_shfl, SHFL>;
-def : PatGprGpr<riscv_unshfl, UNSHFL>;
-def : PatGprImm<riscv_shfl, SHFLI, shfl_uimm>;
-def : PatGprImm<riscv_unshfl, UNSHFLI, shfl_uimm>;
-
-def : PatGprGpr<int_riscv_xperm_n, XPERM4>;
-def : PatGprGpr<int_riscv_xperm_b, XPERM8>;
-def : PatGprGpr<int_riscv_xperm_h, XPERM_H>;
-} // Predicates = [HasStdExtZbp]
-
-let Predicates = [HasStdExtZbp, IsRV64] in {
-def : PatGprGpr<riscv_grevw, GREVW>;
-def : PatGprGpr<riscv_gorcw, GORCW>;
-
-// Select GREVIW/GORCIW when the immediate doesn't have bit 5 set and the result
-// is sign extended.
-// FIXME: Two special patterns keeped when Imm is 7.
-def : Pat<(i64 (sext_inreg (binop_oneuse<riscv_grev> GPR:$rs1, 7), i32)),
-          (GREVIW GPR:$rs1, 7)>;
-def : Pat<(i64 (sext_inreg (binop_oneuse<riscv_gorc> GPR:$rs1, 7), i32)),
-          (GORCIW GPR:$rs1, 7)>;
-def : PatGprImm<binop_allwusers<riscv_grev>, GREVIW, uimm5>;
-def : PatGprImm<binop_allwusers<riscv_gorc>, GORCIW, uimm5>;
-
-def : PatGprGpr<riscv_shflw, SHFLW>;
-def : PatGprGpr<riscv_unshflw, UNSHFLW>;
-} // Predicates = [HasStdExtZbp, IsRV64]
-
-let Predicates = [HasStdExtZbp, IsRV64] in
-def : PatGprGpr<int_riscv_xperm_w, XPERM_W>;
-
-let Predicates = [HasStdExtZbp, IsRV32] in {
-// We treat rev8 as a separate instruction, so match it directly.
-def : Pat<(i32 (riscv_grev GPR:$rs1, 24)), (REV8_RV32 GPR:$rs1)>;
-} // Predicates = [HasStdExtZbp, IsRV32]
-
-let Predicates = [HasStdExtZbp, IsRV64] in {
-// We treat rev8 as a separate instruction, so match it directly.
-def : Pat<(i64 (riscv_grev GPR:$rs1, 56)), (REV8_RV64 GPR:$rs1)>;
-} // Predicates = [HasStdExtZbp, IsRV64]
+} // Predicates = [HasStdExtZbkb, IsRV32]
 
 let Predicates = [HasStdExtZbb] in {
 def : PatGpr<ctlz, CLZ>;
@@ -942,20 +703,20 @@ let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
 def : Pat<(i64 (bswap GPR:$rs1)), (REV8_RV64 GPR:$rs1)>;
 } // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
 
-let Predicates = [HasStdExtZbpOrZbkb] in {
+let Predicates = [HasStdExtZbkb] in {
 def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF),
               (and GPR:$rs1, 0x00FF)),
           (PACKH GPR:$rs1, GPR:$rs2)>;
 def : Pat<(or (shl (and GPR:$rs2, 0x00FF), (XLenVT 8)),
               (and GPR:$rs1, 0x00FF)),
           (PACKH GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbpOrZbkb]
+} // Predicates = [HasStdExtZbkb]
 
-let Predicates = [HasStdExtZbpOrZbkb, IsRV32] in
+let Predicates = [HasStdExtZbkb, IsRV32] in
 def : Pat<(i32 (or (and GPR:$rs1, 0x0000FFFF), (shl GPR:$rs2, (i32 16)))),
           (PACK GPR:$rs1, GPR:$rs2)>;
 
-let Predicates = [HasStdExtZbpOrZbkb, IsRV64] in {
+let Predicates = [HasStdExtZbkb, IsRV64] in {
 def : Pat<(i64 (or (and GPR:$rs1, 0x00000000FFFFFFFF), (shl GPR:$rs2, (i64 32)))),
           (PACK GPR:$rs1, GPR:$rs2)>;
 
@@ -966,24 +727,11 @@ def : Pat<(i64 (sext_inreg (or (shl GPR:$rs2, (i64 16)),
 def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32),
                    (and GPR:$rs1, 0x000000000000FFFF))),
           (PACKW GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbpOrZbkb, IsRV64]
-
-let Predicates = [HasStdExtZbp, IsRV32] in
-def : Pat<(i32 (or (and GPR:$rs2, 0xFFFF0000), (srl GPR:$rs1, (i32 16)))),
-          (PACKU GPR:$rs1, GPR:$rs2)>;
-
-let Predicates = [HasStdExtZbp, IsRV64] in {
-def : Pat<(i64 (or (and GPR:$rs2, 0xFFFFFFFF00000000), (srl GPR:$rs1, (i64 32)))),
-          (PACKU GPR:$rs1, GPR:$rs2)>;
-
-def : Pat<(i64 (or (and (assertsexti32 GPR:$rs2), 0xFFFFFFFFFFFF0000),
-                   (srl (and GPR:$rs1, 0xFFFFFFFF), (i64 16)))),
-          (PACKUW GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbp, IsRV64]
+} // Predicates = [HasStdExtZbkb, IsRV64]
 
-let Predicates = [HasStdExtZbbOrZbp, IsRV32] in
+let Predicates = [HasStdExtZbb, IsRV32] in
 def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (ZEXT_H_RV32 GPR:$rs)>;
-let Predicates = [HasStdExtZbbOrZbp, IsRV64] in
+let Predicates = [HasStdExtZbb, IsRV64] in
 def : Pat<(i64 (and GPR:$rs, 0xFFFF)), (ZEXT_H_RV64 GPR:$rs)>;
 
 let Predicates = [HasStdExtZba] in {

diff  --git a/llvm/lib/Target/RISCV/RISCVSchedRocket.td b/llvm/lib/Target/RISCV/RISCVSchedRocket.td
index 196a093ec4cc..f457adcc52bc 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedRocket.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedRocket.td
@@ -246,7 +246,7 @@ defm : UnsupportedSchedZbs;
 defm : UnsupportedSchedZbe;
 defm : UnsupportedSchedZbf;
 defm : UnsupportedSchedZbm;
-defm : UnsupportedSchedZbp;
+defm : UnsupportedSchedZbkb;
 defm : UnsupportedSchedZbr;
 defm : UnsupportedSchedZfh;
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
index 86dabdfe7edb..4edfff73cef8 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
@@ -233,7 +233,7 @@ defm : UnsupportedSchedZbs;
 defm : UnsupportedSchedZbe;
 defm : UnsupportedSchedZbf;
 defm : UnsupportedSchedZbm;
-defm : UnsupportedSchedZbp;
+defm : UnsupportedSchedZbkb;
 defm : UnsupportedSchedZbr;
 defm : UnsupportedSchedZfh;
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVScheduleB.td b/llvm/lib/Target/RISCV/RISCVScheduleB.td
index 8eb03e17a751..68aeb8a70e51 100644
--- a/llvm/lib/Target/RISCV/RISCVScheduleB.td
+++ b/llvm/lib/Target/RISCV/RISCVScheduleB.td
@@ -46,23 +46,7 @@ def WriteBFP32       : SchedWrite; // BFPW
 // Zbm extension
 def WriteBMatrix     : SchedWrite; // bmator/bmatxor/bmatflip
 
-// Zbp extension
-def WriteORC         : SchedWrite; // gorc
-def WriteREV         : SchedWrite; // grev
-def WriteORC32       : SchedWrite; // gorcw
-def WriteREV32       : SchedWrite; // grevw
-def WriteREVImm      : SchedWrite; // grevi
-def WriteORCImm      : SchedWrite; // gorci
-def WriteREVImm32    : SchedWrite; // greviw
-def WriteORCImm32    : SchedWrite; // gorciw
-def WriteSHFL        : SchedWrite; // shfl
-def WriteUNSHFL      : SchedWrite; // unshfl
-def WriteSHFL32      : SchedWrite; // shflw
-def WriteUNSHFL32    : SchedWrite; // unshflw
-def WriteSHFLImm     : SchedWrite; // shfli
-def WriteUNSHFLImm   : SchedWrite; // unshfli
-def WriteXPERMH      : SchedWrite; // xperm.h
-def WriteXPERMW      : SchedWrite; // xperm.w
+// Zbkb extension
 def WritePACK        : SchedWrite; // pack/packh
 def WritePACK32      : SchedWrite; // packw
 def WritePACKU       : SchedWrite; // packu
@@ -118,23 +102,7 @@ def ReadBFP32       : SchedRead; // BFPW
 // Zbm extension
 def ReadBMatrix     : SchedRead; // bmator/bmatxor/bmatflip
 
-// Zbp extension
-def ReadORC         : SchedRead; // gorc
-def ReadREV         : SchedRead; // grev
-def ReadORC32       : SchedRead; // gorcw
-def ReadREV32       : SchedRead; // grevw
-def ReadREVImm      : SchedRead; // grevi
-def ReadORCImm      : SchedRead; // groci
-def ReadREVImm32    : SchedRead; // greviw
-def ReadORCImm32    : SchedRead; // gorciw
-def ReadSHFL        : SchedRead; // shfl
-def ReadUNSHFL      : SchedRead; // unshfl
-def ReadSHFL32      : SchedRead; // shflw
-def ReadUNSHFL32    : SchedRead; // unshflw
-def ReadSHFLImm     : SchedRead; // shfli
-def ReadUNSHFLImm   : SchedRead; // unshfli
-def ReadXPERMH      : SchedRead; // xperm.h
-def ReadXPERMW      : SchedRead; // xperm.w
+// Zbkb extension
 def ReadPACK        : SchedRead; // pack/packh
 def ReadPACK32      : SchedRead; // packw
 def ReadPACKU       : SchedRead; // packu
@@ -242,45 +210,13 @@ def : ReadAdvance<ReadBMatrix, 0>;
 }
 }
 
-multiclass UnsupportedSchedZbp {
+multiclass UnsupportedSchedZbkb {
 let Unsupported = true in {
-def : WriteRes<WriteORC, []>;
-def : WriteRes<WriteREV, []>;
-def : WriteRes<WriteORC32, []>;
-def : WriteRes<WriteREV32, []>;
-def : WriteRes<WriteREVImm, []>;
-def : WriteRes<WriteORCImm, []>;
-def : WriteRes<WriteREVImm32, []>;
-def : WriteRes<WriteORCImm32, []>;
-def : WriteRes<WriteSHFL, []>;
-def : WriteRes<WriteUNSHFL, []>;
-def : WriteRes<WriteSHFL32, []>;
-def : WriteRes<WriteUNSHFL32, []>;
-def : WriteRes<WriteSHFLImm, []>;
-def : WriteRes<WriteUNSHFLImm, []>;
-def : WriteRes<WriteXPERMH, []>;
-def : WriteRes<WriteXPERMW, []>;
 def : WriteRes<WritePACK, []>;
 def : WriteRes<WritePACK32, []>;
 def : WriteRes<WritePACKU, []>;
 def : WriteRes<WritePACKU32, []>;
 
-def : ReadAdvance<ReadORC, 0>;
-def : ReadAdvance<ReadREV, 0>;
-def : ReadAdvance<ReadORC32, 0>;
-def : ReadAdvance<ReadREV32, 0>;
-def : ReadAdvance<ReadREVImm, 0>;
-def : ReadAdvance<ReadORCImm, 0>;
-def : ReadAdvance<ReadREVImm32, 0>;
-def : ReadAdvance<ReadORCImm32, 0>;
-def : ReadAdvance<ReadSHFL, 0>;
-def : ReadAdvance<ReadUNSHFL, 0>;
-def : ReadAdvance<ReadSHFL32, 0>;
-def : ReadAdvance<ReadUNSHFL32, 0>;
-def : ReadAdvance<ReadSHFLImm, 0>;
-def : ReadAdvance<ReadUNSHFLImm, 0>;
-def : ReadAdvance<ReadXPERMH, 0>;
-def : ReadAdvance<ReadXPERMW, 0>;
 def : ReadAdvance<ReadPACK, 0>;
 def : ReadAdvance<ReadPACK32, 0>;
 def : ReadAdvance<ReadPACKU, 0>;

diff  --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 74a601a5bb5b..6881ad8daeb9 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -57,7 +57,6 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
   bool HasStdExtZbe = false;
   bool HasStdExtZbf = false;
   bool HasStdExtZbm = false;
-  bool HasStdExtZbp = false;
   bool HasStdExtZbr = false;
   bool HasStdExtZbs = false;
   bool HasStdExtZca = false;
@@ -167,7 +166,6 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
   bool hasStdExtZbe() const { return HasStdExtZbe; }
   bool hasStdExtZbf() const { return HasStdExtZbf; }
   bool hasStdExtZbm() const { return HasStdExtZbm; }
-  bool hasStdExtZbp() const { return HasStdExtZbp; }
   bool hasStdExtZbr() const { return HasStdExtZbr; }
   bool hasStdExtZbs() const { return HasStdExtZbs; }
   bool hasStdExtZca() const { return HasStdExtZca; }

diff  --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index 7ca0b46568e5..fa23d3d35a67 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -17,7 +17,6 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbe %s -o - | FileCheck --check-prefix=RV32ZBE %s
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbf %s -o - | FileCheck --check-prefix=RV32ZBF %s
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbm %s -o - | FileCheck --check-prefix=RV32ZBM %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp %s -o - | FileCheck --check-prefix=RV32ZBP %s
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbr %s -o - | FileCheck --check-prefix=RV32ZBR %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zbs %s -o - | FileCheck --check-prefix=RV32ZBS %s
 ; RUN: llc -mtriple=riscv32 -mattr=+v %s -o - | FileCheck --check-prefix=RV32V %s
@@ -58,7 +57,6 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbe %s -o - | FileCheck --check-prefix=RV64ZBE %s
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbf %s -o - | FileCheck --check-prefix=RV64ZBF %s
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbm %s -o - | FileCheck --check-prefix=RV64ZBM %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp %s -o - | FileCheck --check-prefix=RV64ZBP %s
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbr %s -o - | FileCheck --check-prefix=RV64ZBR %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zbs %s -o - | FileCheck --check-prefix=RV64ZBS %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v %s -o - | FileCheck --check-prefix=RV64V %s
@@ -102,7 +100,6 @@
 ; RV32ZBE: .attribute 5, "rv32i2p0_zbe0p93"
 ; RV32ZBF: .attribute 5, "rv32i2p0_zbf0p93"
 ; RV32ZBM: .attribute 5, "rv32i2p0_zbm0p93"
-; RV32ZBP: .attribute 5, "rv32i2p0_zbp0p93"
 ; RV32ZBR: .attribute 5, "rv32i2p0_zbr0p93"
 ; RV32ZBS: .attribute 5, "rv32i2p0_zbs1p0"
 ; RV32V: .attribute 5, "rv32i2p0_f2p0_d2p0_v1p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0"
@@ -144,7 +141,6 @@
 ; RV64ZBE: .attribute 5, "rv64i2p0_zbe0p93"
 ; RV64ZBF: .attribute 5, "rv64i2p0_zbf0p93"
 ; RV64ZBM: .attribute 5, "rv64i2p0_zbm0p93"
-; RV64ZBP: .attribute 5, "rv64i2p0_zbp0p93"
 ; RV64ZBR: .attribute 5, "rv64i2p0_zbr0p93"
 ; RV64ZBS: .attribute 5, "rv64i2p0_zbs1p0"
 ; RV64V: .attribute 5, "rv64i2p0_f2p0_d2p0_v1p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0"

diff  --git a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
index a7cd36300375..0c9597a7f863 100644
--- a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
+++ b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
@@ -11,10 +11,6 @@
 ; RUN:   | FileCheck %s -check-prefixes=RV32ZB,RV32ZBKB
 ; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefixes=RV64ZB,RV64ZBKB
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=RV32ZBP
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=RV64ZBP
 
 declare i16 @llvm.bswap.i16(i16)
 declare i32 @llvm.bswap.i32(i32)
@@ -52,16 +48,6 @@ define i16 @test_bswap_i16(i16 %a) nounwind {
 ; RV64ZB-NEXT:    rev8 a0, a0
 ; RV64ZB-NEXT:    srli a0, a0, 48
 ; RV64ZB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bswap_i16:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev8.h a0, a0
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bswap_i16:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8.h a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i16 @llvm.bswap.i16(i16 %a)
   ret i16 %tmp
 }
@@ -109,16 +95,6 @@ define i32 @test_bswap_i32(i32 %a) nounwind {
 ; RV64ZB-NEXT:    rev8 a0, a0
 ; RV64ZB-NEXT:    srli a0, a0, 32
 ; RV64ZB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bswap_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev8 a0, a0
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bswap_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8.w a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i32 @llvm.bswap.i32(i32 %a)
   ret i32 %tmp
 }
@@ -193,18 +169,6 @@ define i64 @test_bswap_i64(i64 %a) nounwind {
 ; RV64ZB:       # %bb.0:
 ; RV64ZB-NEXT:    rev8 a0, a0
 ; RV64ZB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bswap_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev8 a2, a1
-; RV32ZBP-NEXT:    rev8 a1, a0
-; RV32ZBP-NEXT:    mv a0, a2
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bswap_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8 a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i64 @llvm.bswap.i64(i64 %a)
   ret i64 %tmp
 }
@@ -299,16 +263,6 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
 ; RV64ZBKB-NEXT:    brev8 a0, a0
 ; RV64ZBKB-NEXT:    srli a0, a0, 56
 ; RV64ZBKB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bitreverse_i8:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bitreverse_i8:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i8 @llvm.bitreverse.i8(i8 %a)
   ret i8 %tmp
 }
@@ -437,16 +391,6 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
 ; RV64ZBKB-NEXT:    brev8 a0, a0
 ; RV64ZBKB-NEXT:    srli a0, a0, 48
 ; RV64ZBKB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bitreverse_i16:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.h a0, a0
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bitreverse_i16:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.h a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i16 @llvm.bitreverse.i16(i16 %a)
   ret i16 %tmp
 }
@@ -593,16 +537,6 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
 ; RV64ZBKB-NEXT:    brev8 a0, a0
 ; RV64ZBKB-NEXT:    srli a0, a0, 32
 ; RV64ZBKB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bitreverse_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev a0, a0
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bitreverse_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.w a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i32 @llvm.bitreverse.i32(i32 %a)
   ret i32 %tmp
 }
@@ -806,18 +740,6 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; RV64ZBKB-NEXT:    rev8 a0, a0
 ; RV64ZBKB-NEXT:    brev8 a0, a0
 ; RV64ZBKB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bitreverse_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev a2, a1
-; RV32ZBP-NEXT:    rev a1, a0
-; RV32ZBP-NEXT:    mv a0, a2
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bitreverse_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i64 @llvm.bitreverse.i64(i64 %a)
   ret i64 %tmp
 }
@@ -932,16 +854,6 @@ define i16 @test_bswap_bitreverse_i16(i16 %a) nounwind {
 ; RV64ZBKB:       # %bb.0:
 ; RV64ZBKB-NEXT:    brev8 a0, a0
 ; RV64ZBKB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bswap_bitreverse_i16:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bswap_bitreverse_i16:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i16 @llvm.bswap.i16(i16 %a)
   %tmp2 = call i16 @llvm.bitreverse.i16(i16 %tmp)
   ret i16 %tmp2
@@ -1057,16 +969,6 @@ define i32 @test_bswap_bitreverse_i32(i32 %a) nounwind {
 ; RV64ZBKB:       # %bb.0:
 ; RV64ZBKB-NEXT:    brev8 a0, a0
 ; RV64ZBKB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bswap_bitreverse_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bswap_bitreverse_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i32 @llvm.bswap.i32(i32 %a)
   %tmp2 = call i32 @llvm.bitreverse.i32(i32 %tmp)
   ret i32 %tmp2
@@ -1213,17 +1115,6 @@ define i64 @test_bswap_bitreverse_i64(i64 %a) nounwind {
 ; RV64ZBKB:       # %bb.0:
 ; RV64ZBKB-NEXT:    brev8 a0, a0
 ; RV64ZBKB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bswap_bitreverse_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    rev.b a1, a1
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bswap_bitreverse_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i64 @llvm.bswap.i64(i64 %a)
   %tmp2 = call i64 @llvm.bitreverse.i64(i64 %tmp)
   ret i64 %tmp2
@@ -1339,16 +1230,6 @@ define i16 @test_bitreverse_bswap_i16(i16 %a) nounwind {
 ; RV64ZBKB:       # %bb.0:
 ; RV64ZBKB-NEXT:    brev8 a0, a0
 ; RV64ZBKB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bitreverse_bswap_i16:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bitreverse_bswap_i16:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i16 @llvm.bitreverse.i16(i16 %a)
   %tmp2 = call i16 @llvm.bswap.i16(i16 %tmp)
   ret i16 %tmp2
@@ -1464,16 +1345,6 @@ define i32 @test_bitreverse_bswap_i32(i32 %a) nounwind {
 ; RV64ZBKB:       # %bb.0:
 ; RV64ZBKB-NEXT:    brev8 a0, a0
 ; RV64ZBKB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bitreverse_bswap_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bitreverse_bswap_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i32 @llvm.bitreverse.i32(i32 %a)
   %tmp2 = call i32 @llvm.bswap.i32(i32 %tmp)
   ret i32 %tmp2
@@ -1620,17 +1491,6 @@ define i64 @test_bitreverse_bswap_i64(i64 %a) nounwind {
 ; RV64ZBKB:       # %bb.0:
 ; RV64ZBKB-NEXT:    brev8 a0, a0
 ; RV64ZBKB-NEXT:    ret
-;
-; RV32ZBP-LABEL: test_bitreverse_bswap_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    rev.b a1, a1
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: test_bitreverse_bswap_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i64 @llvm.bitreverse.i64(i64 %a)
   %tmp2 = call i64 @llvm.bswap.i64(i64 %tmp)
   ret i64 %tmp2
@@ -1688,24 +1548,6 @@ define i32 @pr55484(i32 %0) {
 ; RV64ZBKB-NEXT:    slli a0, a0, 48
 ; RV64ZBKB-NEXT:    srai a0, a0, 48
 ; RV64ZBKB-NEXT:    ret
-;
-; RV32ZBP-LABEL: pr55484:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    srli a1, a0, 8
-; RV32ZBP-NEXT:    slli a0, a0, 8
-; RV32ZBP-NEXT:    or a0, a1, a0
-; RV32ZBP-NEXT:    slli a0, a0, 16
-; RV32ZBP-NEXT:    srai a0, a0, 16
-; RV32ZBP-NEXT:    ret
-;
-; RV64ZBP-LABEL: pr55484:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    srli a1, a0, 8
-; RV64ZBP-NEXT:    slli a0, a0, 8
-; RV64ZBP-NEXT:    or a0, a1, a0
-; RV64ZBP-NEXT:    slli a0, a0, 48
-; RV64ZBP-NEXT:    srai a0, a0, 48
-; RV64ZBP-NEXT:    ret
   %2 = lshr i32 %0, 8
   %3 = shl i32 %0, 8
   %4 = or i32 %2, %3

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbp-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
similarity index 72%
rename from llvm/test/CodeGen/RISCV/rv32zbb-zbp-zbkb.ll
rename to llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
index a6e504f700bc..06c975f06451 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb-zbp-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
@@ -2,11 +2,9 @@
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32I
 ; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32ZBB-ZBP-ZBKB
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32ZBB-ZBP-ZBKB
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32ZBB-ZBKB
 ; RUN: llc -mtriple=riscv32 -mattr=+zbkb -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32ZBB-ZBP-ZBKB
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32ZBB-ZBKB
 
 define i32 @andn_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: andn_i32:
@@ -15,10 +13,10 @@ define i32 @andn_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: andn_i32:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a0, a1
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: andn_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    andn a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    ret
   %neg = xor i32 %b, -1
   %and = and i32 %neg, %a
   ret i32 %and
@@ -33,11 +31,11 @@ define i64 @andn_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    and a1, a3, a1
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: andn_i64:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a0, a2
-; RV32ZBB-ZBP-ZBKB-NEXT:    andn a1, a1, a3
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: andn_i64:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    andn a0, a0, a2
+; RV32ZBB-ZBKB-NEXT:    andn a1, a1, a3
+; RV32ZBB-ZBKB-NEXT:    ret
   %neg = xor i64 %b, -1
   %and = and i64 %neg, %a
   ret i64 %and
@@ -50,10 +48,10 @@ define i32 @orn_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: orn_i32:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    orn a0, a0, a1
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: orn_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    orn a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    ret
   %neg = xor i32 %b, -1
   %or = or i32 %neg, %a
   ret i32 %or
@@ -68,11 +66,11 @@ define i64 @orn_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    or a1, a3, a1
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: orn_i64:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    orn a0, a0, a2
-; RV32ZBB-ZBP-ZBKB-NEXT:    orn a1, a1, a3
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: orn_i64:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    orn a0, a0, a2
+; RV32ZBB-ZBKB-NEXT:    orn a1, a1, a3
+; RV32ZBB-ZBKB-NEXT:    ret
   %neg = xor i64 %b, -1
   %or = or i64 %neg, %a
   ret i64 %or
@@ -85,10 +83,10 @@ define i32 @xnor_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: xnor_i32:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    xnor a0, a0, a1
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: xnor_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    ret
   %neg = xor i32 %a, -1
   %xor = xor i32 %neg, %b
   ret i32 %xor
@@ -103,11 +101,11 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: xnor_i64:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    xnor a0, a0, a2
-; RV32ZBB-ZBP-ZBKB-NEXT:    xnor a1, a1, a3
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: xnor_i64:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    xnor a0, a0, a2
+; RV32ZBB-ZBKB-NEXT:    xnor a1, a1, a3
+; RV32ZBB-ZBKB-NEXT:    ret
   %neg = xor i64 %a, -1
   %xor = xor i64 %neg, %b
   ret i64 %xor
@@ -124,10 +122,10 @@ define i32 @rol_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    or a0, a2, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: rol_i32:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    rol a0, a0, a1
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: rol_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    rol a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    ret
   %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
   ret i32 %or
 }
@@ -177,10 +175,10 @@ define i32 @ror_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    or a0, a2, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: ror_i32:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    ror a0, a0, a1
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: ror_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    ror a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    ret
   %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
   ret i32 %or
 }
@@ -225,10 +223,10 @@ define i32 @rori_i32_fshl(i32 %a) nounwind {
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: rori_i32_fshl:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    rori a0, a0, 1
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: rori_i32_fshl:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    rori a0, a0, 1
+; RV32ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
   ret i32 %1
 }
@@ -241,10 +239,10 @@ define i32 @rori_i32_fshr(i32 %a) nounwind {
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: rori_i32_fshr:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    rori a0, a0, 31
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: rori_i32_fshr:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    rori a0, a0, 31
+; RV32ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
   ret i32 %1
 }
@@ -287,11 +285,11 @@ define i32 @not_shl_one_i32(i32 %x) {
 ; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: not_shl_one_i32:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    li a1, -2
-; RV32ZBB-ZBP-ZBKB-NEXT:    rol a0, a1, a0
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: not_shl_one_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    li a1, -2
+; RV32ZBB-ZBKB-NEXT:    rol a0, a1, a0
+; RV32ZBB-ZBKB-NEXT:    ret
   %1 = shl i32 1, %x
   %2 = xor i32 %1, -1
   ret i32 %2
@@ -314,19 +312,19 @@ define i64 @not_shl_one_i64(i64 %x) {
 ; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: not_shl_one_i64:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    addi a3, a0, -32
-; RV32ZBB-ZBP-ZBKB-NEXT:    li a2, -2
-; RV32ZBB-ZBP-ZBKB-NEXT:    li a1, -1
-; RV32ZBB-ZBP-ZBKB-NEXT:    bltz a3, .LBB15_2
-; RV32ZBB-ZBP-ZBKB-NEXT:  # %bb.1:
-; RV32ZBB-ZBP-ZBKB-NEXT:    rol a1, a2, a3
-; RV32ZBB-ZBP-ZBKB-NEXT:    li a0, -1
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
-; RV32ZBB-ZBP-ZBKB-NEXT:  .LBB15_2:
-; RV32ZBB-ZBP-ZBKB-NEXT:    rol a0, a2, a0
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: not_shl_one_i64:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    addi a3, a0, -32
+; RV32ZBB-ZBKB-NEXT:    li a2, -2
+; RV32ZBB-ZBKB-NEXT:    li a1, -1
+; RV32ZBB-ZBKB-NEXT:    bltz a3, .LBB15_2
+; RV32ZBB-ZBKB-NEXT:  # %bb.1:
+; RV32ZBB-ZBKB-NEXT:    rol a1, a2, a3
+; RV32ZBB-ZBKB-NEXT:    li a0, -1
+; RV32ZBB-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-NEXT:  .LBB15_2:
+; RV32ZBB-ZBKB-NEXT:    rol a0, a2, a0
+; RV32ZBB-ZBKB-NEXT:    ret
   %1 = shl i64 1, %x
   %2 = xor i64 %1, -1
   ret i64 %2
@@ -386,11 +384,11 @@ define i1 @andn_seqz_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: andn_seqz_i32:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
-; RV32ZBB-ZBP-ZBKB-NEXT:    seqz a0, a0
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: andn_seqz_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    andn a0, a1, a0
+; RV32ZBB-ZBKB-NEXT:    seqz a0, a0
+; RV32ZBB-ZBKB-NEXT:    ret
   %and = and i32 %a, %b
   %cmpeq = icmp eq i32 %and, %b
   ret i1 %cmpeq
@@ -407,13 +405,13 @@ define i1 @andn_seqz_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: andn_seqz_i64:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    andn a1, a3, a1
-; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a2, a0
-; RV32ZBB-ZBP-ZBKB-NEXT:    or a0, a0, a1
-; RV32ZBB-ZBP-ZBKB-NEXT:    seqz a0, a0
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: andn_seqz_i64:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    andn a1, a3, a1
+; RV32ZBB-ZBKB-NEXT:    andn a0, a2, a0
+; RV32ZBB-ZBKB-NEXT:    or a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    seqz a0, a0
+; RV32ZBB-ZBKB-NEXT:    ret
   %and = and i64 %a, %b
   %cmpeq = icmp eq i64 %and, %b
   ret i1 %cmpeq
@@ -427,11 +425,11 @@ define i1 @andn_snez_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: andn_snez_i32:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
-; RV32ZBB-ZBP-ZBKB-NEXT:    snez a0, a0
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: andn_snez_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    andn a0, a1, a0
+; RV32ZBB-ZBKB-NEXT:    snez a0, a0
+; RV32ZBB-ZBKB-NEXT:    ret
   %and = and i32 %a, %b
   %cmpeq = icmp ne i32 %and, %b
   ret i1 %cmpeq
@@ -448,13 +446,13 @@ define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBB-ZBP-ZBKB-LABEL: andn_snez_i64:
-; RV32ZBB-ZBP-ZBKB:       # %bb.0:
-; RV32ZBB-ZBP-ZBKB-NEXT:    andn a1, a3, a1
-; RV32ZBB-ZBP-ZBKB-NEXT:    andn a0, a2, a0
-; RV32ZBB-ZBP-ZBKB-NEXT:    or a0, a0, a1
-; RV32ZBB-ZBP-ZBKB-NEXT:    snez a0, a0
-; RV32ZBB-ZBP-ZBKB-NEXT:    ret
+; RV32ZBB-ZBKB-LABEL: andn_snez_i64:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    andn a1, a3, a1
+; RV32ZBB-ZBKB-NEXT:    andn a0, a2, a0
+; RV32ZBB-ZBKB-NEXT:    or a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    snez a0, a0
+; RV32ZBB-ZBKB-NEXT:    ret
   %and = and i64 %a, %b
   %cmpeq = icmp ne i64 %and, %b
   ret i1 %cmpeq

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbp-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
similarity index 77%
rename from llvm/test/CodeGen/RISCV/rv32zbp-zbkb.ll
rename to llvm/test/CodeGen/RISCV/rv32zbkb.ll
index e3e9dd6dd177..09ffcc9c83f7 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbp-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
@@ -1,10 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32I
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32ZBP-ZBKB
 ; RUN: llc -mtriple=riscv32 -mattr=+zbkb -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32ZBP-ZBKB
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32ZBKB
 
 define i32 @pack_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: pack_i32:
@@ -15,10 +13,10 @@ define i32 @pack_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBP-ZBKB-LABEL: pack_i32:
-; RV32ZBP-ZBKB:       # %bb.0:
-; RV32ZBP-ZBKB-NEXT:    pack a0, a0, a1
-; RV32ZBP-ZBKB-NEXT:    ret
+; RV32ZBKB-LABEL: pack_i32:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    pack a0, a0, a1
+; RV32ZBKB-NEXT:    ret
   %shl = and i32 %a, 65535
   %shl1 = shl i32 %b, 16
   %or = or i32 %shl1, %shl
@@ -67,10 +65,10 @@ define i32 @packh_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBP-ZBKB-LABEL: packh_i32:
-; RV32ZBP-ZBKB:       # %bb.0:
-; RV32ZBP-ZBKB-NEXT:    packh a0, a0, a1
-; RV32ZBP-ZBKB-NEXT:    ret
+; RV32ZBKB-LABEL: packh_i32:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    packh a0, a0, a1
+; RV32ZBKB-NEXT:    ret
   %and = and i32 %a, 255
   %and1 = shl i32 %b, 8
   %shl = and i32 %and1, 65280
@@ -87,10 +85,10 @@ define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBP-ZBKB-LABEL: packh_i32_2:
-; RV32ZBP-ZBKB:       # %bb.0:
-; RV32ZBP-ZBKB-NEXT:    packh a0, a0, a1
-; RV32ZBP-ZBKB-NEXT:    ret
+; RV32ZBKB-LABEL: packh_i32_2:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    packh a0, a0, a1
+; RV32ZBKB-NEXT:    ret
   %and = and i32 %a, 255
   %and1 = and i32 %b, 255
   %shl = shl i32 %and1, 8
@@ -108,11 +106,11 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBP-ZBKB-LABEL: packh_i64:
-; RV32ZBP-ZBKB:       # %bb.0:
-; RV32ZBP-ZBKB-NEXT:    packh a0, a0, a2
-; RV32ZBP-ZBKB-NEXT:    li a1, 0
-; RV32ZBP-ZBKB-NEXT:    ret
+; RV32ZBKB-LABEL: packh_i64:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    packh a0, a0, a2
+; RV32ZBKB-NEXT:    li a1, 0
+; RV32ZBKB-NEXT:    ret
   %and = and i64 %a, 255
   %and1 = shl i64 %b, 8
   %shl = and i64 %and1, 65280
@@ -130,11 +128,11 @@ define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
-; RV32ZBP-ZBKB-LABEL: packh_i64_2:
-; RV32ZBP-ZBKB:       # %bb.0:
-; RV32ZBP-ZBKB-NEXT:    packh a0, a0, a2
-; RV32ZBP-ZBKB-NEXT:    li a1, 0
-; RV32ZBP-ZBKB-NEXT:    ret
+; RV32ZBKB-LABEL: packh_i64_2:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    packh a0, a0, a2
+; RV32ZBKB-NEXT:    li a1, 0
+; RV32ZBKB-NEXT:    ret
   %and = and i64 %a, 255
   %and1 = and i64 %b, 255
   %shl = shl i64 %and1, 8

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbp-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbp-intrinsic.ll
deleted file mode 100644
index ad627b982f35..000000000000
--- a/llvm/test/CodeGen/RISCV/rv32zbp-intrinsic.ll
+++ /dev/null
@@ -1,435 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV32ZBP
-
-declare i32 @llvm.riscv.grev.i32(i32 %a, i32 %b)
-
-define i32 @grev32(i32 %a, i32 %b) nounwind {
-; RV32ZBP-LABEL: grev32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    grev a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-define i32 @grev32_demandedbits(i32 %a, i32 %b) nounwind {
-; RV32ZBP-LABEL: grev32_demandedbits:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    grev a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %c = and i32 %b, 31
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-define i32 @grevi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: grevi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    grevi a0, a0, 13
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 13)
-  ret i32 %tmp
-}
-
-define i32 @revi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: revi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 31)
-  ret i32 %tmp
-}
-
-define i32 @rev2i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: rev2i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev2 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 30)
-  ret i32 %tmp
-}
-
-define i32 @rev4i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: rev4i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev4 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 28)
-  ret i32 %tmp
-}
-
-define i32 @rev8i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: rev8i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev8 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 24)
-  ret i32 %tmp
-}
-
-define i32 @rev16i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: rev16i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev16 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 16)
-  ret i32 %tmp
-}
-
-declare i32 @llvm.riscv.gorc.i32(i32 %a, i32 %b)
-
-define i32 @gorc32(i32 %a, i32 %b) nounwind {
-; RV32ZBP-LABEL: gorc32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    gorc a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-define i32 @gorc32_demandedbits(i32 %a, i32 %b) nounwind {
-; RV32ZBP-LABEL: gorc32_demandedbits:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    gorc a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %c = and i32 %b, 31
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-define i32 @gorci32(i32 %a) nounwind {
-; RV32ZBP-LABEL: gorci32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    gorci a0, a0, 13
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 13)
-  ret i32 %tmp
-}
-
-define i32 @orchi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: orchi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 15)
-  ret i32 %tmp
-}
-
-define i32 @orc16i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: orc16i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc16 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 16)
-  ret i32 %tmp
-}
-
-define i32 @orc8i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: orc8i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc8 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 24)
-  ret i32 %tmp
-}
-
-define i32 @orc4i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: orc4i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc4 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 28)
-  ret i32 %tmp
-}
-
-define i32 @orc2i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: orc2i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc2 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 30)
-  ret i32 %tmp
-}
-
-define i32 @orci32(i32 %a) nounwind {
-; RV32ZBP-LABEL: orci32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 31)
-  ret i32 %tmp
-}
-
-declare i32 @llvm.riscv.shfl.i32(i32 %a, i32 %b)
-
-define i32 @shfl32(i32 %a, i32 %b) nounwind {
-; RV32ZBP-LABEL: shfl32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    shfl a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-define i32 @shfl32_demandedbits(i32 %a, i32 %b) nounwind {
-; RV32ZBP-LABEL: shfl32_demandedbits:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    shfl a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %c = and i32 %b, 15
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 %c)
-  ret i32 %tmp
-}
-
-define i32 @zipni32(i32 %a) nounwind {
-; RV32ZBP-LABEL: zipni32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip.n a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 1)
-  ret i32 %tmp
-}
-
-define i32 @zip2bi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: zip2bi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip2.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 2)
-  ret i32 %tmp
-}
-
-define i32 @zipbi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: zipbi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 3)
-  ret i32 %tmp
-}
-
-define i32 @zip4hi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: zip4hi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip4.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 4)
-  ret i32 %tmp
-}
-
-define i32 @zip2hi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: zip2hi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip2.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 6)
-  ret i32 %tmp
-}
-
-define i32 @ziphi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: ziphi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 7)
-  ret i32 %tmp
-}
-
-define i32 @shfli32(i32 %a) nounwind {
-; RV32ZBP-LABEL: shfli32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    shfli a0, a0, 13
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 13)
-  ret i32 %tmp
-}
-
-define i32 @zip4i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: zip4i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip4 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 12)
-  ret i32 %tmp
-}
-
-define i32 @zip2i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: zip2i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip2 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 14)
-  ret i32 %tmp
-}
-
-define i32 @zipi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: zipi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 15)
-  ret i32 %tmp
-}
-
-define i32 @zip8i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: zip8i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip8 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 8)
-  ret i32 %tmp
-}
-
-declare i32 @llvm.riscv.unshfl.i32(i32 %a, i32 %b)
-
-define i32 @unshfl32(i32 %a, i32 %b) nounwind {
-; RV32ZBP-LABEL: unshfl32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unshfl a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-define i32 @unshfl32_demandedbits(i32 %a, i32 %b) nounwind {
-; RV32ZBP-LABEL: unshfl32_demandedbits:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unshfl a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %c = and i32 %b, 15
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 %c)
-  ret i32 %tmp
-}
-
-define i32 @unzipni32(i32 %a) nounwind {
-; RV32ZBP-LABEL: unzipni32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unzip.n a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 1)
-  ret i32 %tmp
-}
-
-define i32 @unzip2bi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: unzip2bi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unzip2.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 2)
-  ret i32 %tmp
-}
-
-define i32 @unzipbi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: unzipbi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unzip.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 3)
-  ret i32 %tmp
-}
-
-define i32 @unzip4hi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: unzip4hi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unzip4.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 4)
-  ret i32 %tmp
-}
-
-define i32 @unzip2hi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: unzip2hi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unzip2.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 6)
-  ret i32 %tmp
-}
-
-define i32 @unziphi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: unziphi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unzip.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 7)
-  ret i32 %tmp
-}
-
-define i32 @unshfli32(i32 %a) nounwind {
-; RV32ZBP-LABEL: unshfli32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unshfli a0, a0, 13
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 13)
-  ret i32 %tmp
-}
-
-define i32 @unzip4i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: unzip4i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unzip4 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 12)
-  ret i32 %tmp
-}
-
-define i32 @unzip2i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: unzip2i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unzip2 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 14)
-  ret i32 %tmp
-}
-
-define i32 @unzipi32(i32 %a) nounwind {
-; RV32ZBP-LABEL: unzipi32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unzip a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 15)
-  ret i32 %tmp
-}
-
-define i32 @unzip8i32(i32 %a) nounwind {
-; RV32ZBP-LABEL: unzip8i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    unzip8 a0, a0
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 8)
-  ret i32 %tmp
-}
-
-declare i32 @llvm.riscv.xperm.n.i32(i32 %a, i32 %b)
-
-define i32 @xpermn32(i32 %a, i32 %b) nounwind {
-; RV32ZBP-LABEL: xpermn32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    xperm.n a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.xperm.n.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-declare i32 @llvm.riscv.xperm.b.i32(i32 %a, i32 %b)
-
-define i32 @xpermb32(i32 %a, i32 %b) nounwind {
-; RV32ZBP-LABEL: xpermb32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    xperm.b a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.xperm.b.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-declare i32 @llvm.riscv.xperm.h.i32(i32 %a, i32 %b)
-
-define i32 @xpermh32(i32 %a, i32 %b) nounwind {
-; RV32ZBP-LABEL: xpermh32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    xperm.h a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.xperm.h.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbp.ll
deleted file mode 100644
index e3f824c214ad..000000000000
--- a/llvm/test/CodeGen/RISCV/rv32zbp.ll
+++ /dev/null
@@ -1,3375 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32I
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32ZBP
-
-define i32 @gorc1_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc1_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 1
-; RV32I-NEXT:    lui a3, 349525
-; RV32I-NEXT:    addi a3, a3, 1365
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc1_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc.p a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = shl i32 %a, 1
-  %shl = and i32 %and, -1431655766
-  %and1 = lshr i32 %a, 1
-  %shr = and i32 %and1, 1431655765
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
-}
-
-define i64 @gorc1_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc1_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 1
-; RV32I-NEXT:    slli a3, a1, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 1
-; RV32I-NEXT:    srli a5, a0, 1
-; RV32I-NEXT:    lui a6, 349525
-; RV32I-NEXT:    addi a6, a6, 1365
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc1_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc.p a0, a0
-; RV32ZBP-NEXT:    orc.p a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = shl i64 %a, 1
-  %shl = and i64 %and, -6148914691236517206
-  %and1 = lshr i64 %a, 1
-  %shr = and i64 %and1, 6148914691236517205
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
-}
-
-define i32 @gorc2_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc2_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 2
-; RV32I-NEXT:    lui a3, 209715
-; RV32I-NEXT:    addi a3, a3, 819
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc2_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc2.n a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = shl i32 %a, 2
-  %shl = and i32 %and, -858993460
-  %and1 = lshr i32 %a, 2
-  %shr = and i32 %and1, 858993459
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
-}
-
-define i64 @gorc2_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc2_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 2
-; RV32I-NEXT:    slli a3, a1, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 2
-; RV32I-NEXT:    srli a5, a0, 2
-; RV32I-NEXT:    lui a6, 209715
-; RV32I-NEXT:    addi a6, a6, 819
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc2_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc2.n a0, a0
-; RV32ZBP-NEXT:    orc2.n a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = shl i64 %a, 2
-  %shl = and i64 %and, -3689348814741910324
-  %and1 = lshr i64 %a, 2
-  %shr = and i64 %and1, 3689348814741910323
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
-}
-
-define i32 @gorc3_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc3_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 1
-; RV32I-NEXT:    lui a3, 349525
-; RV32I-NEXT:    addi a3, a3, 1365
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 2
-; RV32I-NEXT:    lui a3, 209715
-; RV32I-NEXT:    addi a3, a3, 819
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc3_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc.n a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1b, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  ret i32 %or2b
-}
-
-define i64 @gorc3_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc3_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a1, 1
-; RV32I-NEXT:    slli a3, a0, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a0, 1
-; RV32I-NEXT:    srli a5, a1, 1
-; RV32I-NEXT:    lui a6, 349525
-; RV32I-NEXT:    addi a6, a6, 1365
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a5, a1
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    slli a2, a0, 2
-; RV32I-NEXT:    slli a3, a1, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 2
-; RV32I-NEXT:    srli a5, a0, 2
-; RV32I-NEXT:    lui a6, 209715
-; RV32I-NEXT:    addi a6, a6, 819
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc3_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc.n a0, a0
-; RV32ZBP-NEXT:    orc.n a1, a1
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1b, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  ret i64 %or2b
-}
-
-define i32 @gorc4_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc4_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 4
-; RV32I-NEXT:    lui a3, 61681
-; RV32I-NEXT:    addi a3, a3, -241
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc4_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc4.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = shl i32 %a, 4
-  %shl = and i32 %and, -252645136
-  %and1 = lshr i32 %a, 4
-  %shr = and i32 %and1, 252645135
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
-}
-
-define i64 @gorc4_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc4_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 4
-; RV32I-NEXT:    slli a3, a1, 4
-; RV32I-NEXT:    lui a4, 986895
-; RV32I-NEXT:    addi a4, a4, 240
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 4
-; RV32I-NEXT:    srli a5, a0, 4
-; RV32I-NEXT:    lui a6, 61681
-; RV32I-NEXT:    addi a6, a6, -241
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc4_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc4.b a0, a0
-; RV32ZBP-NEXT:    orc4.b a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = shl i64 %a, 4
-  %shl = and i64 %and, -1085102592571150096
-  %and1 = lshr i64 %a, 4
-  %shr = and i64 %and1, 1085102592571150095
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
-}
-
-define i32 @gorc5_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc5_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 1
-; RV32I-NEXT:    lui a3, 349525
-; RV32I-NEXT:    addi a3, a3, 1365
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 4
-; RV32I-NEXT:    lui a3, 61681
-; RV32I-NEXT:    addi a3, a3, -241
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc5_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    gorci a0, a0, 5
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1b, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  ret i32 %or2b
-}
-
-define i64 @gorc5_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc5_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a1, 1
-; RV32I-NEXT:    slli a3, a0, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a0, 1
-; RV32I-NEXT:    srli a5, a1, 1
-; RV32I-NEXT:    lui a6, 349525
-; RV32I-NEXT:    addi a6, a6, 1365
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a5, a1
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    slli a2, a0, 4
-; RV32I-NEXT:    slli a3, a1, 4
-; RV32I-NEXT:    lui a4, 986895
-; RV32I-NEXT:    addi a4, a4, 240
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 4
-; RV32I-NEXT:    srli a5, a0, 4
-; RV32I-NEXT:    lui a6, 61681
-; RV32I-NEXT:    addi a6, a6, -241
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc5_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    gorci a0, a0, 5
-; RV32ZBP-NEXT:    gorci a1, a1, 5
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 4
-  %shl2 = and i64 %and2, -1085102592571150096
-  %and2b = lshr i64 %or1b, 4
-  %shr2 = and i64 %and2b, 1085102592571150095
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  ret i64 %or2b
-}
-
-define i32 @gorc6_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc6_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 2
-; RV32I-NEXT:    lui a3, 209715
-; RV32I-NEXT:    addi a3, a3, 819
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 4
-; RV32I-NEXT:    lui a3, 61681
-; RV32I-NEXT:    addi a3, a3, -241
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc6_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc2.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 2
-  %shl1 = and i32 %and1, -858993460
-  %and1b = lshr i32 %a, 2
-  %shr1 = and i32 %and1b, 858993459
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1b, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  ret i32 %or2b
-}
-
-define i64 @gorc6_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc6_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a1, 2
-; RV32I-NEXT:    slli a3, a0, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a0, 2
-; RV32I-NEXT:    srli a5, a1, 2
-; RV32I-NEXT:    lui a6, 209715
-; RV32I-NEXT:    addi a6, a6, 819
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a5, a1
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    slli a2, a0, 4
-; RV32I-NEXT:    slli a3, a1, 4
-; RV32I-NEXT:    lui a4, 986895
-; RV32I-NEXT:    addi a4, a4, 240
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 4
-; RV32I-NEXT:    srli a5, a0, 4
-; RV32I-NEXT:    lui a6, 61681
-; RV32I-NEXT:    addi a6, a6, -241
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc6_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc2.b a0, a0
-; RV32ZBP-NEXT:    orc2.b a1, a1
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 2
-  %shl1 = and i64 %and1, -3689348814741910324
-  %and1b = lshr i64 %a, 2
-  %shr1 = and i64 %and1b, 3689348814741910323
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 4
-  %shl2 = and i64 %and2, -1085102592571150096
-  %and2b = lshr i64 %or1b, 4
-  %shr2 = and i64 %and2b, 1085102592571150095
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  ret i64 %or2b
-}
-
-define i32 @gorc7_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc7_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 1
-; RV32I-NEXT:    lui a3, 349525
-; RV32I-NEXT:    addi a3, a3, 1365
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 2
-; RV32I-NEXT:    lui a3, 209715
-; RV32I-NEXT:    addi a3, a3, 819
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 4
-; RV32I-NEXT:    lui a3, 61681
-; RV32I-NEXT:    addi a3, a3, -241
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc7_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1b, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  %and3 = shl i32 %or2b, 4
-  %shl3 = and i32 %and3, -252645136
-  %and3b = lshr i32 %or2b, 4
-  %shr3 = and i32 %and3b, 252645135
-  %or3 = or i32 %shr3, %or2b
-  %or3b = or i32 %or3, %shl3
-  ret i32 %or3b
-}
-
-define i64 @gorc7_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc7_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 1
-; RV32I-NEXT:    slli a3, a1, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 1
-; RV32I-NEXT:    srli a5, a0, 1
-; RV32I-NEXT:    lui a6, 349525
-; RV32I-NEXT:    addi a6, a6, 1365
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    slli a2, a1, 2
-; RV32I-NEXT:    slli a3, a0, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a0, 2
-; RV32I-NEXT:    srli a5, a1, 2
-; RV32I-NEXT:    lui a6, 209715
-; RV32I-NEXT:    addi a6, a6, 819
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a5, a1
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    slli a2, a0, 4
-; RV32I-NEXT:    slli a3, a1, 4
-; RV32I-NEXT:    lui a4, 986895
-; RV32I-NEXT:    addi a4, a4, 240
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 4
-; RV32I-NEXT:    srli a5, a0, 4
-; RV32I-NEXT:    lui a6, 61681
-; RV32I-NEXT:    addi a6, a6, -241
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc7_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc.b a0, a0
-; RV32ZBP-NEXT:    orc.b a1, a1
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1b, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  %and3 = shl i64 %or2b, 4
-  %shl3 = and i64 %and3, -1085102592571150096
-  %and3b = lshr i64 %or2b, 4
-  %shr3 = and i64 %and3b, 1085102592571150095
-  %or3 = or i64 %shr3, %or2b
-  %or3b = or i64 %or3, %shl3
-  ret i64 %or3b
-}
-
-define i32 @gorc8_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc8_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 8
-; RV32I-NEXT:    lui a2, 1044496
-; RV32I-NEXT:    addi a2, a2, -256
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 8
-; RV32I-NEXT:    lui a3, 4080
-; RV32I-NEXT:    addi a3, a3, 255
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc8_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc8.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = shl i32 %a, 8
-  %shl = and i32 %and, -16711936
-  %and1 = lshr i32 %a, 8
-  %shr = and i32 %and1, 16711935
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
-}
-
-define i64 @gorc8_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc8_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 8
-; RV32I-NEXT:    slli a3, a1, 8
-; RV32I-NEXT:    lui a4, 1044496
-; RV32I-NEXT:    addi a4, a4, -256
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 8
-; RV32I-NEXT:    srli a5, a0, 8
-; RV32I-NEXT:    lui a6, 4080
-; RV32I-NEXT:    addi a6, a6, 255
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc8_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc8.h a0, a0
-; RV32ZBP-NEXT:    orc8.h a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = shl i64 %a, 8
-  %shl = and i64 %and, -71777214294589696
-  %and1 = lshr i64 %a, 8
-  %shr = and i64 %and1, 71777214294589695
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
-}
-
-define i32 @gorc12_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc12_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 4
-; RV32I-NEXT:    lui a3, 61681
-; RV32I-NEXT:    addi a3, a3, -241
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 8
-; RV32I-NEXT:    lui a2, 1044496
-; RV32I-NEXT:    addi a2, a2, -256
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 8
-; RV32I-NEXT:    lui a3, 4080
-; RV32I-NEXT:    addi a3, a3, 255
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc12_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc4.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 4
-  %shl1 = and i32 %and1, -252645136
-  %and1b = lshr i32 %a, 4
-  %shr1 = and i32 %and1b, 252645135
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 8
-  %shl2 = and i32 %and2, -16711936
-  %and2b = lshr i32 %or1b, 8
-  %shr2 = and i32 %and2b, 16711935
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  ret i32 %or2b
-}
-
-define i64 @gorc12_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc12_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a1, 4
-; RV32I-NEXT:    slli a3, a0, 4
-; RV32I-NEXT:    lui a4, 986895
-; RV32I-NEXT:    addi a4, a4, 240
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a0, 4
-; RV32I-NEXT:    srli a5, a1, 4
-; RV32I-NEXT:    lui a6, 61681
-; RV32I-NEXT:    addi a6, a6, -241
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a5, a1
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    slli a2, a0, 8
-; RV32I-NEXT:    slli a3, a1, 8
-; RV32I-NEXT:    lui a4, 1044496
-; RV32I-NEXT:    addi a4, a4, -256
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 8
-; RV32I-NEXT:    srli a5, a0, 8
-; RV32I-NEXT:    lui a6, 4080
-; RV32I-NEXT:    addi a6, a6, 255
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc12_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc4.h a0, a0
-; RV32ZBP-NEXT:    orc4.h a1, a1
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 4
-  %shl1 = and i64 %and1, -1085102592571150096
-  %and1b = lshr i64 %a, 4
-  %shr1 = and i64 %and1b, 1085102592571150095
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 8
-  %shl2 = and i64 %and2, -71777214294589696
-  %and2b = lshr i64 %or1b, 8
-  %shr2 = and i64 %and2b, 71777214294589695
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  ret i64 %or2b
-}
-
-define i32 @gorc14_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc14_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 2
-; RV32I-NEXT:    lui a3, 209715
-; RV32I-NEXT:    addi a3, a3, 819
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 4
-; RV32I-NEXT:    lui a3, 61681
-; RV32I-NEXT:    addi a3, a3, -241
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 8
-; RV32I-NEXT:    lui a2, 1044496
-; RV32I-NEXT:    addi a2, a2, -256
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 8
-; RV32I-NEXT:    lui a3, 4080
-; RV32I-NEXT:    addi a3, a3, 255
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc14_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc2.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 2
-  %shl1 = and i32 %and1, -858993460
-  %and1b = lshr i32 %a, 2
-  %shr1 = and i32 %and1b, 858993459
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1b, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  %and3 = shl i32 %or2b, 8
-  %shl3 = and i32 %and3, -16711936
-  %and3b = lshr i32 %or2b, 8
-  %shr3 = and i32 %and3b, 16711935
-  %or3 = or i32 %shr3, %or2b
-  %or3b = or i32 %or3, %shl3
-  ret i32 %or3b
-}
-
-define i64 @gorc14_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc14_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 2
-; RV32I-NEXT:    slli a3, a1, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 2
-; RV32I-NEXT:    srli a5, a0, 2
-; RV32I-NEXT:    lui a6, 209715
-; RV32I-NEXT:    addi a6, a6, 819
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    slli a2, a1, 4
-; RV32I-NEXT:    slli a3, a0, 4
-; RV32I-NEXT:    lui a4, 986895
-; RV32I-NEXT:    addi a4, a4, 240
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a0, 4
-; RV32I-NEXT:    srli a5, a1, 4
-; RV32I-NEXT:    lui a6, 61681
-; RV32I-NEXT:    addi a6, a6, -241
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a5, a1
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    slli a2, a0, 8
-; RV32I-NEXT:    slli a3, a1, 8
-; RV32I-NEXT:    lui a4, 1044496
-; RV32I-NEXT:    addi a4, a4, -256
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 8
-; RV32I-NEXT:    srli a5, a0, 8
-; RV32I-NEXT:    lui a6, 4080
-; RV32I-NEXT:    addi a6, a6, 255
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc14_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc2.h a0, a0
-; RV32ZBP-NEXT:    orc2.h a1, a1
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 2
-  %shl1 = and i64 %and1, -3689348814741910324
-  %and1b = lshr i64 %a, 2
-  %shr1 = and i64 %and1b, 3689348814741910323
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 4
-  %shl2 = and i64 %and2, -1085102592571150096
-  %and2b = lshr i64 %or1b, 4
-  %shr2 = and i64 %and2b, 1085102592571150095
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  %and3 = shl i64 %or2b, 8
-  %shl3 = and i64 %and3, -71777214294589696
-  %and3b = lshr i64 %or2b, 8
-  %shr3 = and i64 %and3b, 71777214294589695
-  %or3 = or i64 %shr3, %or2b
-  %or3b = or i64 %or3, %shl3
-  ret i64 %or3b
-}
-
-define i32 @gorc16_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc16_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 16
-; RV32I-NEXT:    srli a2, a0, 16
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc16_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc16 a0, a0
-; RV32ZBP-NEXT:    ret
-  %shl = shl i32 %a, 16
-  %shr = lshr i32 %a, 16
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
-}
-
-define i32 @gorc16_rotl_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc16_rotl_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 16
-; RV32I-NEXT:    slli a2, a0, 16
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc16_rotl_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc16 a0, a0
-; RV32ZBP-NEXT:    ret
-  %rot = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 16)
-  %or = or i32 %rot, %a
-  ret i32 %or
-}
-
-define i32 @gorc16_rotr_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc16_rotr_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 16
-; RV32I-NEXT:    srli a2, a0, 16
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc16_rotr_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc16 a0, a0
-; RV32ZBP-NEXT:    ret
-  %rot = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 16)
-  %or = or i32 %rot, %a
-  ret i32 %or
-}
-
-define i64 @gorc16_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc16_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a1, 16
-; RV32I-NEXT:    slli a3, a0, 16
-; RV32I-NEXT:    srli a4, a0, 16
-; RV32I-NEXT:    srli a5, a1, 16
-; RV32I-NEXT:    or a1, a5, a1
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc16_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc16 a0, a0
-; RV32ZBP-NEXT:    orc16 a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = shl i64 %a, 16
-  %shl = and i64 %and, -281470681808896
-  %and1 = lshr i64 %a, 16
-  %shr = and i64 %and1, 281470681808895
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
-}
-
-; gorc2, gorc2 -> gorc2
-define i32 @gorc2b_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc2b_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a3, a0, 2
-; RV32I-NEXT:    lui a4, 209715
-; RV32I-NEXT:    addi a4, a4, 819
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    or a1, a0, a1
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    srli a2, a1, 2
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc2b_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    srli a1, a0, 2
-; RV32ZBP-NEXT:    or a1, a1, a0
-; RV32ZBP-NEXT:    orc2.n a0, a0
-; RV32ZBP-NEXT:    slli a1, a1, 2
-; RV32ZBP-NEXT:    lui a2, 838861
-; RV32ZBP-NEXT:    addi a2, a2, -820
-; RV32ZBP-NEXT:    and a1, a1, a2
-; RV32ZBP-NEXT:    srli a2, a0, 2
-; RV32ZBP-NEXT:    lui a3, 209715
-; RV32ZBP-NEXT:    addi a3, a3, 819
-; RV32ZBP-NEXT:    and a2, a2, a3
-; RV32ZBP-NEXT:    or a0, a2, a0
-; RV32ZBP-NEXT:    or a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 2
-  %shl1 = and i32 %and1, -858993460
-  %and1b = lshr i32 %a, 2
-  %shr1 = and i32 %and1b, 858993459
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1b, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  ret i32 %or2b
-}
-
-; gorc2, gorc2 -> gorc2
-define i64 @gorc2b_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc2b_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 2
-; RV32I-NEXT:    slli a3, a1, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a5, a1, 2
-; RV32I-NEXT:    srli a6, a0, 2
-; RV32I-NEXT:    lui a7, 209715
-; RV32I-NEXT:    addi a7, a7, 819
-; RV32I-NEXT:    and a6, a6, a7
-; RV32I-NEXT:    and a5, a5, a7
-; RV32I-NEXT:    or a1, a5, a1
-; RV32I-NEXT:    or a0, a6, a0
-; RV32I-NEXT:    or a2, a0, a2
-; RV32I-NEXT:    or a3, a1, a3
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    slli a1, a1, 2
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    srli a4, a3, 2
-; RV32I-NEXT:    srli a5, a2, 2
-; RV32I-NEXT:    and a5, a5, a7
-; RV32I-NEXT:    and a4, a4, a7
-; RV32I-NEXT:    or a3, a4, a3
-; RV32I-NEXT:    or a2, a5, a2
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc2b_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    srli a2, a0, 2
-; RV32ZBP-NEXT:    srli a3, a1, 2
-; RV32ZBP-NEXT:    or a3, a3, a1
-; RV32ZBP-NEXT:    or a2, a2, a0
-; RV32ZBP-NEXT:    orc2.n a0, a0
-; RV32ZBP-NEXT:    orc2.n a1, a1
-; RV32ZBP-NEXT:    slli a2, a2, 2
-; RV32ZBP-NEXT:    slli a3, a3, 2
-; RV32ZBP-NEXT:    lui a4, 838861
-; RV32ZBP-NEXT:    addi a4, a4, -820
-; RV32ZBP-NEXT:    and a3, a3, a4
-; RV32ZBP-NEXT:    and a2, a2, a4
-; RV32ZBP-NEXT:    srli a4, a1, 2
-; RV32ZBP-NEXT:    srli a5, a0, 2
-; RV32ZBP-NEXT:    lui a6, 209715
-; RV32ZBP-NEXT:    addi a6, a6, 819
-; RV32ZBP-NEXT:    and a5, a5, a6
-; RV32ZBP-NEXT:    and a4, a4, a6
-; RV32ZBP-NEXT:    or a1, a4, a1
-; RV32ZBP-NEXT:    or a0, a5, a0
-; RV32ZBP-NEXT:    or a0, a0, a2
-; RV32ZBP-NEXT:    or a1, a1, a3
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 2
-  %shl1 = and i64 %and1, -3689348814741910324
-  %and1b = lshr i64 %a, 2
-  %shr1 = and i64 %and1b, 3689348814741910323
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1b, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  ret i64 %or2b
-}
-
-; gorc1, gorc2, gorc1 -> gorc2
-define i32 @gorc3b_i32(i32 %a) nounwind {
-; RV32I-LABEL: gorc3b_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a3, a0, 1
-; RV32I-NEXT:    lui a4, 349525
-; RV32I-NEXT:    addi a4, a4, 1365
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a3, 838861
-; RV32I-NEXT:    addi a3, a3, -820
-; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:    srli a3, a0, 2
-; RV32I-NEXT:    lui a5, 209715
-; RV32I-NEXT:    addi a5, a5, 819
-; RV32I-NEXT:    and a3, a3, a5
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 1
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc3b_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc.n a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1b, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  %and3 = shl i32 %or2b, 1
-  %shl3 = and i32 %and3, -1431655766
-  %and3b = lshr i32 %or2b, 1
-  %shr3 = and i32 %and3b, 1431655765
-  %or3 = or i32 %shr3, %or2b
-  %or3b = or i32 %or3, %shl3
-  ret i32 %or3b
-}
-
-; gorc1, gorc2, gorc1 -> gorc2
-define i64 @gorc3b_i64(i64 %a) nounwind {
-; RV32I-LABEL: gorc3b_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 1
-; RV32I-NEXT:    slli a3, a1, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a5, a1, 1
-; RV32I-NEXT:    srli a6, a0, 1
-; RV32I-NEXT:    lui a7, 349525
-; RV32I-NEXT:    addi a7, a7, 1365
-; RV32I-NEXT:    and a6, a6, a7
-; RV32I-NEXT:    and a5, a5, a7
-; RV32I-NEXT:    or a1, a5, a1
-; RV32I-NEXT:    or a0, a6, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    slli a2, a1, 2
-; RV32I-NEXT:    slli a3, a0, 2
-; RV32I-NEXT:    lui a5, 838861
-; RV32I-NEXT:    addi a5, a5, -820
-; RV32I-NEXT:    and a3, a3, a5
-; RV32I-NEXT:    and a2, a2, a5
-; RV32I-NEXT:    srli a5, a0, 2
-; RV32I-NEXT:    srli a6, a1, 2
-; RV32I-NEXT:    lui t0, 209715
-; RV32I-NEXT:    addi t0, t0, 819
-; RV32I-NEXT:    and a6, a6, t0
-; RV32I-NEXT:    and a5, a5, t0
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a1, a6, a1
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    slli a2, a0, 1
-; RV32I-NEXT:    slli a3, a1, 1
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a4, a1, 1
-; RV32I-NEXT:    srli a5, a0, 1
-; RV32I-NEXT:    and a5, a5, a7
-; RV32I-NEXT:    and a4, a4, a7
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: gorc3b_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    orc.n a0, a0
-; RV32ZBP-NEXT:    orc.n a1, a1
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1b, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  %and3 = shl i64 %or2b, 1
-  %shl3 = and i64 %and3, -6148914691236517206
-  %and3b = lshr i64 %or2b, 1
-  %shr3 = and i64 %and3b, 6148914691236517205
-  %or3 = or i64 %shr3, %or2b
-  %or3b = or i64 %or3, %shl3
-  ret i64 %or3b
-}
-
-define i32 @grev1_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev1_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev1_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.p a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = shl i32 %a, 1
-  %shl = and i32 %and, -1431655766
-  %and1 = lshr i32 %a, 1
-  %shr = and i32 %and1, 1431655765
-  %or = or i32 %shl, %shr
-  ret i32 %or
-}
-
-define i64 @grev1_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev1_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 1
-; RV32I-NEXT:    slli a3, a1, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    lui a4, 349525
-; RV32I-NEXT:    addi a4, a4, 1365
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev1_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.p a0, a0
-; RV32ZBP-NEXT:    rev.p a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = shl i64 %a, 1
-  %shl = and i64 %and, -6148914691236517206
-  %and1 = lshr i64 %a, 1
-  %shr = and i64 %and1, 6148914691236517205
-  %or = or i64 %shl, %shr
-  ret i64 %or
-}
-
-define i32 @grev2_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev2_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a2, 209715
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev2_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev2.n a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = shl i32 %a, 2
-  %shl = and i32 %and, -858993460
-  %and1 = lshr i32 %a, 2
-  %shr = and i32 %and1, 858993459
-  %or = or i32 %shl, %shr
-  ret i32 %or
-}
-
-define i64 @grev2_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev2_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 2
-; RV32I-NEXT:    slli a3, a1, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    srli a1, a1, 2
-; RV32I-NEXT:    lui a4, 209715
-; RV32I-NEXT:    addi a4, a4, 819
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev2_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev2.n a0, a0
-; RV32ZBP-NEXT:    rev2.n a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = shl i64 %a, 2
-  %shl = and i64 %and, -3689348814741910324
-  %and1 = lshr i64 %a, 2
-  %shr = and i64 %and1, 3689348814741910323
-  %or = or i64 %shl, %shr
-  ret i64 %or
-}
-
-define i32 @grev3_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev3_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a2, 209715
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev3_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.n a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shl2, %shr2
-  ret i32 %or2
-}
-
-define i64 @grev3_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev3_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a1, 1
-; RV32I-NEXT:    slli a3, a0, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a4, 349525
-; RV32I-NEXT:    addi a4, a4, 1365
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    slli a2, a0, 2
-; RV32I-NEXT:    slli a3, a1, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    srli a1, a1, 2
-; RV32I-NEXT:    lui a4, 209715
-; RV32I-NEXT:    addi a4, a4, 819
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev3_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.n a0, a0
-; RV32ZBP-NEXT:    rev.n a1, a1
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shl2, %shr2
-  ret i64 %or2
-}
-
-define i32 @grev4_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev4_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    lui a2, 61681
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev4_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev4.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = shl i32 %a, 4
-  %shl = and i32 %and, -252645136
-  %and1 = lshr i32 %a, 4
-  %shr = and i32 %and1, 252645135
-  %or = or i32 %shl, %shr
-  ret i32 %or
-}
-
-define i64 @grev4_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev4_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 4
-; RV32I-NEXT:    slli a3, a1, 4
-; RV32I-NEXT:    lui a4, 986895
-; RV32I-NEXT:    addi a4, a4, 240
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    srli a1, a1, 4
-; RV32I-NEXT:    lui a4, 61681
-; RV32I-NEXT:    addi a4, a4, -241
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev4_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev4.b a0, a0
-; RV32ZBP-NEXT:    rev4.b a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = shl i64 %a, 4
-  %shl = and i64 %and, -1085102592571150096
-  %and1 = lshr i64 %a, 4
-  %shr = and i64 %and1, 1085102592571150095
-  %or = or i64 %shl, %shr
-  ret i64 %or
-}
-
-define i32 @grev5_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev5_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    lui a2, 61681
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev5_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    grevi a0, a0, 5
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shl2, %shr2
-  ret i32 %or2
-}
-
-define i64 @grev5_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev5_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a1, 1
-; RV32I-NEXT:    slli a3, a0, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a4, 349525
-; RV32I-NEXT:    addi a4, a4, 1365
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    slli a2, a0, 4
-; RV32I-NEXT:    slli a3, a1, 4
-; RV32I-NEXT:    lui a4, 986895
-; RV32I-NEXT:    addi a4, a4, 240
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    srli a1, a1, 4
-; RV32I-NEXT:    lui a4, 61681
-; RV32I-NEXT:    addi a4, a4, -241
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev5_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    grevi a0, a0, 5
-; RV32ZBP-NEXT:    grevi a1, a1, 5
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-
-  %and2 = shl i64 %or1, 4
-  %shl2 = and i64 %and2, -1085102592571150096
-  %and2b = lshr i64 %or1, 4
-  %shr2 = and i64 %and2b, 1085102592571150095
-  %or2 = or i64 %shl2, %shr2
-  ret i64 %or2
-}
-
-define i32 @grev6_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev6_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a2, 209715
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    lui a2, 61681
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev6_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev2.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 2
-  %shl1 = and i32 %and1, -858993460
-  %and1b = lshr i32 %a, 2
-  %shr1 = and i32 %and1b, 858993459
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shl2, %shr2
-  ret i32 %or2
-}
-
-define i64 @grev6_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev6_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a1, 2
-; RV32I-NEXT:    slli a3, a0, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a1, a1, 2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a4, 209715
-; RV32I-NEXT:    addi a4, a4, 819
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    slli a2, a0, 4
-; RV32I-NEXT:    slli a3, a1, 4
-; RV32I-NEXT:    lui a4, 986895
-; RV32I-NEXT:    addi a4, a4, 240
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    srli a1, a1, 4
-; RV32I-NEXT:    lui a4, 61681
-; RV32I-NEXT:    addi a4, a4, -241
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev6_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev2.b a0, a0
-; RV32ZBP-NEXT:    rev2.b a1, a1
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 2
-  %shl1 = and i64 %and1, -3689348814741910324
-  %and1b = lshr i64 %a, 2
-  %shr1 = and i64 %and1b, 3689348814741910323
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 4
-  %shl2 = and i64 %and2, -1085102592571150096
-  %and2b = lshr i64 %or1, 4
-  %shr2 = and i64 %and2b, 1085102592571150095
-  %or2 = or i64 %shl2, %shr2
-  ret i64 %or2
-}
-
-define i32 @grev7_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev7_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a2, 209715
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    lui a2, 61681
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev7_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shl2, %shr2
-  %and3 = shl i32 %or2, 4
-  %shl3 = and i32 %and3, -252645136
-  %and3b = lshr i32 %or2, 4
-  %shr3 = and i32 %and3b, 252645135
-  %or3 = or i32 %shl3, %shr3
-  ret i32 %or3
-}
-
-define i64 @grev7_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev7_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 1
-; RV32I-NEXT:    slli a3, a1, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    lui a4, 349525
-; RV32I-NEXT:    addi a4, a4, 1365
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    slli a2, a1, 2
-; RV32I-NEXT:    slli a3, a0, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a1, a1, 2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a4, 209715
-; RV32I-NEXT:    addi a4, a4, 819
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    slli a2, a0, 4
-; RV32I-NEXT:    slli a3, a1, 4
-; RV32I-NEXT:    lui a4, 986895
-; RV32I-NEXT:    addi a4, a4, 240
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    srli a1, a1, 4
-; RV32I-NEXT:    lui a4, 61681
-; RV32I-NEXT:    addi a4, a4, -241
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev7_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    rev.b a1, a1
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shl2, %shr2
-  %and3 = shl i64 %or2, 4
-  %shl3 = and i64 %and3, -1085102592571150096
-  %and3b = lshr i64 %or2, 4
-  %shr3 = and i64 %and3b, 1085102592571150095
-  %or3 = or i64 %shl3, %shr3
-  ret i64 %or3
-}
-
-define i32 @grev8_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev8_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 8
-; RV32I-NEXT:    lui a2, 1044496
-; RV32I-NEXT:    addi a2, a2, -256
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 8
-; RV32I-NEXT:    lui a2, 4080
-; RV32I-NEXT:    addi a2, a2, 255
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev8_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev8.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = shl i32 %a, 8
-  %shl = and i32 %and, -16711936
-  %and1 = lshr i32 %a, 8
-  %shr = and i32 %and1, 16711935
-  %or = or i32 %shl, %shr
-  ret i32 %or
-}
-
-define i32 @grev12_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev12_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    lui a2, 61681
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 8
-; RV32I-NEXT:    lui a2, 1044496
-; RV32I-NEXT:    addi a2, a2, -256
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 8
-; RV32I-NEXT:    lui a2, 4080
-; RV32I-NEXT:    addi a2, a2, 255
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev12_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev4.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 4
-  %shl1 = and i32 %and1, -252645136
-  %and1b = lshr i32 %a, 4
-  %shr1 = and i32 %and1b, 252645135
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 8
-  %shl2 = and i32 %and2, -16711936
-  %and2b = lshr i32 %or1, 8
-  %shr2 = and i32 %and2b, 16711935
-  %or2 = or i32 %shl2, %shr2
-  ret i32 %or2
-}
-
-define i32 @grev14_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev14_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a2, 209715
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 4
-; RV32I-NEXT:    lui a2, 986895
-; RV32I-NEXT:    addi a2, a2, 240
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    lui a2, 61681
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 8
-; RV32I-NEXT:    lui a2, 1044496
-; RV32I-NEXT:    addi a2, a2, -256
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 8
-; RV32I-NEXT:    lui a2, 4080
-; RV32I-NEXT:    addi a2, a2, 255
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev14_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev2.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 2
-  %shl1 = and i32 %and1, -858993460
-  %and1b = lshr i32 %a, 2
-  %shr1 = and i32 %and1b, 858993459
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shl2, %shr2
-  %and3 = shl i32 %or2, 8
-  %shl3 = and i32 %and3, -16711936
-  %and3b = lshr i32 %or2, 8
-  %shr3 = and i32 %and3b, 16711935
-  %or3 = or i32 %shl3, %shr3
-  ret i32 %or3
-}
-
-define i64 @grev8_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev8_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 8
-; RV32I-NEXT:    slli a3, a1, 8
-; RV32I-NEXT:    lui a4, 1044496
-; RV32I-NEXT:    addi a4, a4, -256
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 8
-; RV32I-NEXT:    srli a1, a1, 8
-; RV32I-NEXT:    lui a4, 4080
-; RV32I-NEXT:    addi a4, a4, 255
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev8_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev8.h a0, a0
-; RV32ZBP-NEXT:    rev8.h a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = shl i64 %a, 8
-  %shl = and i64 %and, -71777214294589696
-  %and1 = lshr i64 %a, 8
-  %shr = and i64 %and1, 71777214294589695
-  %or = or i64 %shl, %shr
-  ret i64 %or
-}
-
-define i32 @grev16_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev16_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 16
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev16_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rori a0, a0, 16
-; RV32ZBP-NEXT:    ret
-  %shl = shl i32 %a, 16
-  %shr = lshr i32 %a, 16
-  %or = or i32 %shl, %shr
-  ret i32 %or
-}
-
-
-define i32 @grev3b_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev3b_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a2, 838861
-; RV32I-NEXT:    addi a2, a2, -820
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a2, 209715
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev3b_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.n a0, a0
-; RV32ZBP-NEXT:    ret
-  %and2 = shl i32 %a, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %a, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shl2, %shr2
-  %and1 = shl i32 %or2, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %or2, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  ret i32 %or1
-}
-
-define i64 @grev3b_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev3b_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a1, 2
-; RV32I-NEXT:    slli a3, a0, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a1, a1, 2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a4, 209715
-; RV32I-NEXT:    addi a4, a4, 819
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    slli a2, a0, 1
-; RV32I-NEXT:    slli a3, a1, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    lui a4, 349525
-; RV32I-NEXT:    addi a4, a4, 1365
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev3b_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.n a0, a0
-; RV32ZBP-NEXT:    rev.n a1, a1
-; RV32ZBP-NEXT:    ret
-  %and2 = shl i64 %a, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %a, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shl2, %shr2
-  %and1 = shl i64 %or2, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %or2, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-  ret i64 %or1
-}
-
-; grev1, grev2, grev1 -> grev2
-define i32 @grev2b_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev2b_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a3, 349525
-; RV32I-NEXT:    addi a3, a3, 1365
-; RV32I-NEXT:    and a0, a0, a3
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a4, 209715
-; RV32I-NEXT:    addi a4, a4, 819
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    and a0, a0, a3
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev2b_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev2.n a0, a0
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shl2, %shr2
-  %and3 = shl i32 %or2, 1
-  %shl3 = and i32 %and3, -1431655766
-  %and3b = lshr i32 %or2, 1
-  %shr3 = and i32 %and3b, 1431655765
-  %or3 = or i32 %shl3, %shr3
-  ret i32 %or3
-}
-
-; grev1, grev2, grev1 -> grev2
-define i64 @grev2b_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev2b_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a0, 1
-; RV32I-NEXT:    slli a3, a1, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    lui a5, 349525
-; RV32I-NEXT:    addi a5, a5, 1365
-; RV32I-NEXT:    and a1, a1, a5
-; RV32I-NEXT:    and a0, a0, a5
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    slli a2, a1, 2
-; RV32I-NEXT:    slli a3, a0, 2
-; RV32I-NEXT:    lui a6, 838861
-; RV32I-NEXT:    addi a6, a6, -820
-; RV32I-NEXT:    and a3, a3, a6
-; RV32I-NEXT:    and a2, a2, a6
-; RV32I-NEXT:    srli a1, a1, 2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a6, 209715
-; RV32I-NEXT:    addi a6, a6, 819
-; RV32I-NEXT:    and a0, a0, a6
-; RV32I-NEXT:    and a1, a1, a6
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    slli a2, a0, 1
-; RV32I-NEXT:    slli a3, a1, 1
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    and a1, a1, a5
-; RV32I-NEXT:    and a0, a0, a5
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev2b_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev2.n a0, a0
-; RV32ZBP-NEXT:    rev2.n a1, a1
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shl2, %shr2
-  %and3 = shl i64 %or2, 1
-  %shl3 = and i64 %and3, -6148914691236517206
-  %and3b = lshr i64 %or2, 1
-  %shr3 = and i64 %and3b, 6148914691236517205
-  %or3 = or i64 %shl3, %shr3
-  ret i64 %or3
-}
-
-; grev1, grev2, grev1, grev2 -> identity
-define i32 @grev0_i32(i32 %a) nounwind {
-; RV32I-LABEL: grev0_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    lui a2, 699051
-; RV32I-NEXT:    addi a2, a2, -1366
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a3, 349525
-; RV32I-NEXT:    addi a3, a3, 1365
-; RV32I-NEXT:    and a0, a0, a3
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    lui a4, 838861
-; RV32I-NEXT:    addi a4, a4, -820
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a5, 209715
-; RV32I-NEXT:    addi a5, a5, 819
-; RV32I-NEXT:    and a0, a0, a5
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 1
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    and a0, a0, a3
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    slli a1, a0, 2
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, a5
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev0_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shl2, %shr2
-  %and3 = shl i32 %or2, 1
-  %shl3 = and i32 %and3, -1431655766
-  %and3b = lshr i32 %or2, 1
-  %shr3 = and i32 %and3b, 1431655765
-  %or3 = or i32 %shl3, %shr3
-  %and4 = shl i32 %or3, 2
-  %shl4 = and i32 %and4, -858993460
-  %and4b = lshr i32 %or3, 2
-  %shr4 = and i32 %and4b, 858993459
-  %or4 = or i32 %shl4, %shr4
-  ret i32 %or4
-}
-
-; grev1, grev2, grev1, grev2 -> identity
-define i64 @grev0_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev0_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a2, a1, 1
-; RV32I-NEXT:    slli a3, a0, 1
-; RV32I-NEXT:    lui a4, 699051
-; RV32I-NEXT:    addi a4, a4, -1366
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a5, 349525
-; RV32I-NEXT:    addi a5, a5, 1365
-; RV32I-NEXT:    and a0, a0, a5
-; RV32I-NEXT:    and a1, a1, a5
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    slli a2, a0, 2
-; RV32I-NEXT:    slli a3, a1, 2
-; RV32I-NEXT:    lui a6, 838861
-; RV32I-NEXT:    addi a6, a6, -820
-; RV32I-NEXT:    and a3, a3, a6
-; RV32I-NEXT:    and a2, a2, a6
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    srli a1, a1, 2
-; RV32I-NEXT:    lui a7, 209715
-; RV32I-NEXT:    addi a7, a7, 819
-; RV32I-NEXT:    and a1, a1, a7
-; RV32I-NEXT:    and a0, a0, a7
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    slli a2, a1, 1
-; RV32I-NEXT:    slli a3, a0, 1
-; RV32I-NEXT:    and a3, a3, a4
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    and a0, a0, a5
-; RV32I-NEXT:    and a1, a1, a5
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    slli a2, a0, 2
-; RV32I-NEXT:    slli a3, a1, 2
-; RV32I-NEXT:    and a3, a3, a6
-; RV32I-NEXT:    and a2, a2, a6
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    srli a1, a1, 2
-; RV32I-NEXT:    and a1, a1, a7
-; RV32I-NEXT:    and a0, a0, a7
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev0_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shl2, %shr2
-  %and3 = shl i64 %or2, 1
-  %shl3 = and i64 %and3, -6148914691236517206
-  %and3b = lshr i64 %or2, 1
-  %shr3 = and i64 %and3b, 6148914691236517205
-  %or3 = or i64 %shl3, %shr3
-  %and4 = shl i64 %or3, 2
-  %shl4 = and i64 %and4, -3689348814741910324
-  %and4b = lshr i64 %or3, 2
-  %shr4 = and i64 %and4b, 3689348814741910323
-  %or4 = or i64 %shl4, %shr4
-  ret i64 %or4
-}
-
-declare i32 @llvm.fshl.i32(i32, i32, i32)
-declare i32 @llvm.fshr.i32(i32, i32, i32)
-
-define signext i32 @grev16_i32_fshl(i32 signext %a) nounwind {
-; RV32I-LABEL: grev16_i32_fshl:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 16
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev16_i32_fshl:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rori a0, a0, 16
-; RV32ZBP-NEXT:    ret
-  %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 16)
-  ret i32 %or
-}
-
-define signext i32 @grev16_i32_fshr(i32 signext %a) nounwind {
-; RV32I-LABEL: grev16_i32_fshr:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 16
-; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev16_i32_fshr:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rori a0, a0, 16
-; RV32ZBP-NEXT:    ret
-  %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 16)
-  ret i32 %or
-}
-
-define i64 @grev16_i64(i64 %a) nounwind {
-; RV32I-LABEL: grev16_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a2, a0, 16
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    srli a2, a1, 16
-; RV32I-NEXT:    slli a1, a1, 16
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: grev16_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rori a0, a0, 16
-; RV32ZBP-NEXT:    rori a1, a1, 16
-; RV32ZBP-NEXT:    ret
-  %and = shl i64 %a, 16
-  %shl = and i64 %and, -281470681808896
-  %and1 = lshr i64 %a, 16
-  %shr = and i64 %and1, 281470681808895
-  %or = or i64 %shl, %shr
-  ret i64 %or
-}
-
-declare i16 @llvm.bswap.i16(i16)
-
-define zeroext i16 @bswap_i16(i16 zeroext %a) nounwind {
-; RV32I-LABEL: bswap_i16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    slli a0, a0, 8
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: bswap_i16:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev8.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %1 = tail call i16 @llvm.bswap.i16(i16 %a)
-  ret i16 %1
-}
-
-declare i32 @llvm.bswap.i32(i32)
-
-define i32 @bswap_i32(i32 %a) nounwind {
-; RV32I-LABEL: bswap_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    lui a2, 16
-; RV32I-NEXT:    addi a2, a2, -256
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 24
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    slli a2, a0, 8
-; RV32I-NEXT:    lui a3, 4080
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: bswap_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev8 a0, a0
-; RV32ZBP-NEXT:    ret
-  %1 = tail call i32 @llvm.bswap.i32(i32 %a)
-  ret i32 %1
-}
-
-declare i64 @llvm.bswap.i64(i64)
-
-define i64 @bswap_i64(i64 %a) {
-; RV32I-LABEL: bswap_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a2, a1, 8
-; RV32I-NEXT:    lui a3, 16
-; RV32I-NEXT:    addi a3, a3, -256
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    srli a4, a1, 24
-; RV32I-NEXT:    or a2, a2, a4
-; RV32I-NEXT:    slli a4, a1, 8
-; RV32I-NEXT:    lui a5, 4080
-; RV32I-NEXT:    and a4, a4, a5
-; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or a1, a1, a4
-; RV32I-NEXT:    or a2, a1, a2
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:    srli a3, a0, 24
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    slli a3, a0, 8
-; RV32I-NEXT:    and a3, a3, a5
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    or a1, a0, a1
-; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: bswap_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev8 a2, a1
-; RV32ZBP-NEXT:    rev8 a1, a0
-; RV32ZBP-NEXT:    mv a0, a2
-; RV32ZBP-NEXT:    ret
-  %1 = call i64 @llvm.bswap.i64(i64 %a)
-  ret i64 %1
-}
-
-declare i8 @llvm.bitreverse.i8(i8)
-
-define zeroext i8 @bitreverse_i8(i8 zeroext %a) nounwind {
-; RV32I-LABEL: bitreverse_i8:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    andi a0, a0, 15
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    andi a1, a0, 51
-; RV32I-NEXT:    slli a1, a1, 2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    andi a0, a0, 51
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    andi a1, a0, 85
-; RV32I-NEXT:    slli a1, a1, 1
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    andi a0, a0, 85
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: bitreverse_i8:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %1 = tail call i8 @llvm.bitreverse.i8(i8 %a)
-  ret i8 %1
-}
-
-declare i16 @llvm.bitreverse.i16(i16)
-
-define zeroext i16 @bitreverse_i16(i16 zeroext %a) nounwind {
-; RV32I-LABEL: bitreverse_i16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    slli a0, a0, 8
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    lui a2, 1
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    lui a2, 3
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 5
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: bitreverse_i16:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %1 = tail call i16 @llvm.bitreverse.i16(i16 %a)
-  ret i16 %1
-}
-
-declare i32 @llvm.bitreverse.i32(i32)
-
-define i32 @bitreverse_i32(i32 %a) nounwind {
-; RV32I-LABEL: bitreverse_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    lui a2, 16
-; RV32I-NEXT:    addi a2, a2, -256
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 24
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    slli a2, a0, 8
-; RV32I-NEXT:    lui a3, 4080
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    lui a2, 61681
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    lui a2, 209715
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: bitreverse_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev a0, a0
-; RV32ZBP-NEXT:    ret
-  %1 = tail call i32 @llvm.bitreverse.i32(i32 %a)
-  ret i32 %1
-}
-
-declare i64 @llvm.bitreverse.i64(i64)
-
-define i64 @bitreverse_i64(i64 %a) nounwind {
-; RV32I-LABEL: bitreverse_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a2, a1, 8
-; RV32I-NEXT:    lui a3, 16
-; RV32I-NEXT:    addi a3, a3, -256
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    srli a4, a1, 24
-; RV32I-NEXT:    or a2, a2, a4
-; RV32I-NEXT:    slli a4, a1, 8
-; RV32I-NEXT:    lui a5, 4080
-; RV32I-NEXT:    and a4, a4, a5
-; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or a1, a1, a4
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    srli a2, a1, 4
-; RV32I-NEXT:    lui a4, 61681
-; RV32I-NEXT:    addi a4, a4, -241
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    slli a1, a1, 4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a2, a1, 2
-; RV32I-NEXT:    lui a6, 209715
-; RV32I-NEXT:    addi a6, a6, 819
-; RV32I-NEXT:    and a2, a2, a6
-; RV32I-NEXT:    and a1, a1, a6
-; RV32I-NEXT:    slli a1, a1, 2
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a2, a1, 1
-; RV32I-NEXT:    lui a7, 349525
-; RV32I-NEXT:    addi a7, a7, 1365
-; RV32I-NEXT:    and a2, a2, a7
-; RV32I-NEXT:    and a1, a1, a7
-; RV32I-NEXT:    slli a1, a1, 1
-; RV32I-NEXT:    or a2, a2, a1
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:    srli a3, a0, 24
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    slli a3, a0, 8
-; RV32I-NEXT:    and a3, a3, a5
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    and a1, a1, a6
-; RV32I-NEXT:    and a0, a0, a6
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    and a1, a1, a7
-; RV32I-NEXT:    and a0, a0, a7
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a1, a1, a0
-; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: bitreverse_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev a2, a1
-; RV32ZBP-NEXT:    rev a1, a0
-; RV32ZBP-NEXT:    mv a0, a2
-; RV32ZBP-NEXT:    ret
-  %1 = call i64 @llvm.bitreverse.i64(i64 %a)
-  ret i64 %1
-}
-
-define i32 @bswap_rotr_i32(i32 %a) {
-; RV32I-LABEL: bswap_rotr_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 8
-; RV32I-NEXT:    lui a2, 4080
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    slli a2, a0, 24
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a2, a0, 24
-; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    slli a0, a0, 8
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    srli a1, a1, 16
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: bswap_rotr_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev8.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %1 = call i32 @llvm.bswap.i32(i32 %a)
-  %2 = call i32 @llvm.fshr.i32(i32 %1, i32 %1, i32 16)
-  ret i32 %2
-}
-
-define i32 @bswap_rotl_i32(i32 %a) {
-; RV32I-LABEL: bswap_rotl_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 24
-; RV32I-NEXT:    srli a2, a0, 16
-; RV32I-NEXT:    slli a2, a2, 8
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    slli a2, a0, 8
-; RV32I-NEXT:    lui a3, 4080
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    slli a1, a1, 16
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: bswap_rotl_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev8.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %1 = call i32 @llvm.bswap.i32(i32 %a)
-  %2 = call i32 @llvm.fshl.i32(i32 %1, i32 %1, i32 16)
-  ret i32 %2
-}
-
-define i32 @bitreverse_bswap_i32(i32 %a) {
-; RV32I-LABEL: bitreverse_bswap_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    lui a2, 61681
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    lui a2, 209715
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: bitreverse_bswap_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %1 = call i32 @llvm.bitreverse.i32(i32 %a)
-  %2 = call i32 @llvm.bswap.i32(i32 %1)
-  ret i32 %2
-}
-
-define i64 @bitreverse_bswap_i64(i64 %a) {
-; RV32I-LABEL: bitreverse_bswap_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a2, a0, 4
-; RV32I-NEXT:    lui a3, 61681
-; RV32I-NEXT:    addi a3, a3, -241
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    and a0, a0, a3
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    srli a2, a0, 2
-; RV32I-NEXT:    lui a4, 209715
-; RV32I-NEXT:    addi a4, a4, 819
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    srli a2, a0, 1
-; RV32I-NEXT:    lui a5, 349525
-; RV32I-NEXT:    addi a5, a5, 1365
-; RV32I-NEXT:    and a2, a2, a5
-; RV32I-NEXT:    and a0, a0, a5
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    srli a2, a1, 4
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:    slli a1, a1, 4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a2, a1, 2
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    slli a1, a1, 2
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a2, a1, 1
-; RV32I-NEXT:    and a2, a2, a5
-; RV32I-NEXT:    and a1, a1, a5
-; RV32I-NEXT:    slli a1, a1, 1
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: bitreverse_bswap_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    rev.b a0, a0
-; RV32ZBP-NEXT:    rev.b a1, a1
-; RV32ZBP-NEXT:    ret
-  %1 = call i64 @llvm.bitreverse.i64(i64 %a)
-  %2 = call i64 @llvm.bswap.i64(i64 %1)
-  ret i64 %2
-}
-
-define i32 @shfl1_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: shfl1_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 629146
-; RV32I-NEXT:    addi a1, a1, -1639
-; RV32I-NEXT:    and a1, a0, a1
-; RV32I-NEXT:    slli a2, a0, 1
-; RV32I-NEXT:    lui a3, 279620
-; RV32I-NEXT:    addi a3, a3, 1092
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    lui a2, 139810
-; RV32I-NEXT:    addi a2, a2, 546
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: shfl1_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip.n a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = and i32 %a, -1717986919
-  %shl = shl i32 %a, 1
-  %and1 = and i32 %shl, 1145324612
-  %or = or i32 %and1, %and
-  %shr = lshr i32 %a, 1
-  %and2 = and i32 %shr, 572662306
-  %or3 = or i32 %or, %and2
-  ret i32 %or3
-}
-
-define i64 @shfl1_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: shfl1_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 629146
-; RV32I-NEXT:    addi a2, a2, -1639
-; RV32I-NEXT:    and a3, a0, a2
-; RV32I-NEXT:    and a2, a1, a2
-; RV32I-NEXT:    slli a4, a1, 1
-; RV32I-NEXT:    slli a5, a0, 1
-; RV32I-NEXT:    lui a6, 279620
-; RV32I-NEXT:    addi a6, a6, 1092
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a2, a2, a4
-; RV32I-NEXT:    or a3, a3, a5
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    srli a1, a1, 1
-; RV32I-NEXT:    lui a4, 139810
-; RV32I-NEXT:    addi a4, a4, 546
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: shfl1_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip.n a0, a0
-; RV32ZBP-NEXT:    zip.n a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = and i64 %a, -7378697629483820647
-  %shl = shl i64 %a, 1
-  %and1 = and i64 %shl, 4919131752989213764
-  %or = or i64 %and, %and1
-  %shr = lshr i64 %a, 1
-  %and2 = and i64 %shr, 2459565876494606882
-  %or3 = or i64 %or, %and2
-  ret i64 %or3
-}
-
-define i32 @shfl2_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: shfl2_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 801852
-; RV32I-NEXT:    addi a1, a1, 963
-; RV32I-NEXT:    and a1, a0, a1
-; RV32I-NEXT:    slli a2, a0, 2
-; RV32I-NEXT:    lui a3, 197379
-; RV32I-NEXT:    addi a3, a3, 48
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    lui a2, 49345
-; RV32I-NEXT:    addi a2, a2, -1012
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: shfl2_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip2.b a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = and i32 %a, -1010580541
-  %shl = shl i32 %a, 2
-  %and1 = and i32 %shl, 808464432
-  %or = or i32 %and1, %and
-  %shr = lshr i32 %a, 2
-  %and2 = and i32 %shr, 202116108
-  %or3 = or i32 %and2, %or
-  ret i32 %or3
-}
-
-define i64 @shfl2_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: shfl2_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 801852
-; RV32I-NEXT:    addi a2, a2, 963
-; RV32I-NEXT:    and a3, a0, a2
-; RV32I-NEXT:    and a2, a1, a2
-; RV32I-NEXT:    slli a4, a1, 2
-; RV32I-NEXT:    slli a5, a0, 2
-; RV32I-NEXT:    lui a6, 197379
-; RV32I-NEXT:    addi a6, a6, 48
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    or a2, a2, a4
-; RV32I-NEXT:    or a3, a3, a5
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    srli a1, a1, 2
-; RV32I-NEXT:    lui a4, 49345
-; RV32I-NEXT:    addi a4, a4, -1012
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: shfl2_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip2.b a0, a0
-; RV32ZBP-NEXT:    zip2.b a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = and i64 %a, -4340410370284600381
-  %shl = shl i64 %a, 2
-  %and1 = and i64 %shl, 3472328296227680304
-  %or = or i64 %and, %and1
-  %shr = lshr i64 %a, 2
-  %and2 = and i64 %shr, 868082074056920076
-  %or3 = or i64 %and2, %or
-  ret i64 %or3
-}
-
-define i32 @shfl4_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: shfl4_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 983295
-; RV32I-NEXT:    addi a1, a1, 15
-; RV32I-NEXT:    and a1, a0, a1
-; RV32I-NEXT:    slli a2, a0, 4
-; RV32I-NEXT:    lui a3, 61441
-; RV32I-NEXT:    addi a3, a3, -256
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    lui a3, 3840
-; RV32I-NEXT:    addi a3, a3, 240
-; RV32I-NEXT:    and a0, a0, a3
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: shfl4_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip4.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = and i32 %a, -267390961
-  %shl = shl i32 %a, 4
-  %and1 = and i32 %shl, 251662080
-  %shr = lshr i32 %a, 4
-  %and2 = and i32 %shr, 15728880
-  %or = or i32 %and2, %and
-  %or3 = or i32 %or, %and1
-  ret i32 %or3
-}
-
-define i64 @shfl4_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: shfl4_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 983295
-; RV32I-NEXT:    addi a2, a2, 15
-; RV32I-NEXT:    and a3, a1, a2
-; RV32I-NEXT:    and a2, a0, a2
-; RV32I-NEXT:    slli a4, a1, 4
-; RV32I-NEXT:    slli a5, a0, 4
-; RV32I-NEXT:    lui a6, 61441
-; RV32I-NEXT:    addi a6, a6, -256
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    srli a1, a1, 4
-; RV32I-NEXT:    srli a0, a0, 4
-; RV32I-NEXT:    lui a6, 3840
-; RV32I-NEXT:    addi a6, a6, 240
-; RV32I-NEXT:    and a0, a0, a6
-; RV32I-NEXT:    and a1, a1, a6
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a5, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: shfl4_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip4.h a0, a0
-; RV32ZBP-NEXT:    zip4.h a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = and i64 %a, -1148435428713435121
-  %shl = shl i64 %a, 4
-  %and1 = and i64 %shl, 1080880403494997760
-  %shr = lshr i64 %a, 4
-  %and2 = and i64 %shr, 67555025218437360
-  %or = or i64 %and1, %and2
-  %or3 = or i64 %or, %and
-  ret i64 %or3
-}
-
-define i32 @shfl8_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: shfl8_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 1044480
-; RV32I-NEXT:    addi a1, a1, 255
-; RV32I-NEXT:    and a1, a0, a1
-; RV32I-NEXT:    slli a2, a0, 8
-; RV32I-NEXT:    lui a3, 4080
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    srli a0, a0, 8
-; RV32I-NEXT:    lui a3, 16
-; RV32I-NEXT:    addi a3, a3, -256
-; RV32I-NEXT:    and a0, a0, a3
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: shfl8_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip8 a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = and i32 %a, -16776961
-  %shl = shl i32 %a, 8
-  %and1 = and i32 %shl, 16711680
-  %shr = lshr i32 %a, 8
-  %and2 = and i32 %shr, 65280
-  %or = or i32 %and, %and2
-  %or3 = or i32 %or, %and1
-  ret i32 %or3
-}
-
-define i64 @shfl8_i64(i64 %a, i64 %b) nounwind {
-; RV32I-LABEL: shfl8_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 1044480
-; RV32I-NEXT:    addi a2, a2, 255
-; RV32I-NEXT:    and a3, a0, a2
-; RV32I-NEXT:    and a2, a1, a2
-; RV32I-NEXT:    slli a4, a0, 8
-; RV32I-NEXT:    slli a5, a1, 8
-; RV32I-NEXT:    lui a6, 4080
-; RV32I-NEXT:    and a5, a5, a6
-; RV32I-NEXT:    and a4, a4, a6
-; RV32I-NEXT:    srli a1, a1, 8
-; RV32I-NEXT:    srli a0, a0, 8
-; RV32I-NEXT:    lui a6, 16
-; RV32I-NEXT:    addi a6, a6, -256
-; RV32I-NEXT:    and a0, a0, a6
-; RV32I-NEXT:    and a1, a1, a6
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    or a0, a4, a0
-; RV32I-NEXT:    or a1, a5, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: shfl8_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zip8 a0, a0
-; RV32ZBP-NEXT:    zip8 a1, a1
-; RV32ZBP-NEXT:    ret
-  %and = and i64 %a, -72056494543077121
-  %shl = shl i64 %a, 8
-  %and1 = and i64 %shl, 71776119077928960
-  %shr = lshr i64 %a, 8
-  %and2 = and i64 %shr, 280375465148160
-  %or = or i64 %and2, %and
-  %or3 = or i64 %and1, %or
-  ret i64 %or3
-}
-
-define i32 @packu_i32(i32 %a, i32 %b) nounwind {
-; RV32I-LABEL: packu_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    lui a2, 1048560
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: packu_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    packu a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %shr = lshr i32 %a, 16
-  %shr1 = and i32 %b, -65536
-  %or = or i32 %shr1, %shr
-  ret i32 %or
-}
-
-define i32 @zexth_i32(i32 %a) nounwind {
-; RV32I-LABEL: zexth_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: zexth_i32:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zext.h a0, a0
-; RV32ZBP-NEXT:    ret
-  %and = and i32 %a, 65535
-  ret i32 %and
-}
-
-define i64 @zexth_i64(i64 %a) nounwind {
-; RV32I-LABEL: zexth_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: zexth_i64:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    zext.h a0, a0
-; RV32ZBP-NEXT:    li a1, 0
-; RV32ZBP-NEXT:    ret
-  %and = and i64 %a, 65535
-  ret i64 %and
-}
-
-define i32 @or_shl_fshl(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_shl_fshl:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    sll a3, a1, a2
-; CHECK-NEXT:    sll a0, a0, a2
-; CHECK-NEXT:    not a2, a2
-; CHECK-NEXT:    srli a1, a1, 1
-; CHECK-NEXT:    srl a1, a1, a2
-; CHECK-NEXT:    or a0, a0, a1
-; CHECK-NEXT:    or a0, a0, a3
-; CHECK-NEXT:    ret
-  %shy = shl i32 %y, %s
-  %fun = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %s)
-  %or = or i32 %fun, %shy
-  ret i32 %or
-}
-
-define i32 @or_shl_rot(i32 %x, i32 %y, i32 %s) {
-; RV32I-LABEL: or_shl_rot:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    sll a0, a0, a2
-; RV32I-NEXT:    sll a3, a1, a2
-; RV32I-NEXT:    neg a2, a2
-; RV32I-NEXT:    srl a1, a1, a2
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: or_shl_rot:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    sll a0, a0, a2
-; RV32ZBP-NEXT:    rol a1, a1, a2
-; RV32ZBP-NEXT:    or a0, a1, a0
-; RV32ZBP-NEXT:    ret
-  %shx = shl i32 %x, %s
-  %rot = call i32 @llvm.fshl.i32(i32 %y, i32 %y, i32 %s)
-  %or = or i32 %rot, %shx
-  ret i32 %or
-}
-
-define i32 @or_shl_fshl_commute(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_shl_fshl_commute:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    sll a3, a1, a2
-; CHECK-NEXT:    sll a0, a0, a2
-; CHECK-NEXT:    not a2, a2
-; CHECK-NEXT:    srli a1, a1, 1
-; CHECK-NEXT:    srl a1, a1, a2
-; CHECK-NEXT:    or a0, a0, a1
-; CHECK-NEXT:    or a0, a3, a0
-; CHECK-NEXT:    ret
-  %shy = shl i32 %y, %s
-  %fun = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %s)
-  %or = or i32 %shy, %fun
-  ret i32 %or
-}
-
-define i32 @or_shl_rot_commute(i32 %x, i32 %y, i32 %s) {
-; RV32I-LABEL: or_shl_rot_commute:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    sll a0, a0, a2
-; RV32I-NEXT:    sll a3, a1, a2
-; RV32I-NEXT:    neg a2, a2
-; RV32I-NEXT:    srl a1, a1, a2
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: or_shl_rot_commute:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    sll a0, a0, a2
-; RV32ZBP-NEXT:    rol a1, a1, a2
-; RV32ZBP-NEXT:    or a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %shx = shl i32 %x, %s
-  %rot = call i32 @llvm.fshl.i32(i32 %y, i32 %y, i32 %s)
-  %or = or i32 %shx, %rot
-  ret i32 %or
-}
-
-define i32 @or_lshr_fshr(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_lshr_fshr:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    srl a3, a1, a2
-; CHECK-NEXT:    srl a0, a0, a2
-; CHECK-NEXT:    not a2, a2
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    sll a1, a1, a2
-; CHECK-NEXT:    or a0, a1, a0
-; CHECK-NEXT:    or a0, a0, a3
-; CHECK-NEXT:    ret
-  %shy = lshr i32 %y, %s
-  %fun = call i32 @llvm.fshr.i32(i32 %y, i32 %x, i32 %s)
-  %or = or i32 %fun, %shy
-  ret i32 %or
-}
-
-define i32 @or_lshr_rotr(i32 %x, i32 %y, i32 %s) {
-; RV32I-LABEL: or_lshr_rotr:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srl a0, a0, a2
-; RV32I-NEXT:    srl a3, a1, a2
-; RV32I-NEXT:    neg a2, a2
-; RV32I-NEXT:    sll a1, a1, a2
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: or_lshr_rotr:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    srl a0, a0, a2
-; RV32ZBP-NEXT:    ror a1, a1, a2
-; RV32ZBP-NEXT:    or a0, a1, a0
-; RV32ZBP-NEXT:    ret
-  %shx = lshr i32 %x, %s
-  %rot = call i32 @llvm.fshr.i32(i32 %y, i32 %y, i32 %s)
-  %or = or i32 %rot, %shx
-  ret i32 %or
-}
-
-define i32 @or_lshr_fshr_commute(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_lshr_fshr_commute:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    srl a3, a1, a2
-; CHECK-NEXT:    srl a0, a0, a2
-; CHECK-NEXT:    not a2, a2
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    sll a1, a1, a2
-; CHECK-NEXT:    or a0, a1, a0
-; CHECK-NEXT:    or a0, a3, a0
-; CHECK-NEXT:    ret
-  %shy = lshr i32 %y, %s
-  %fun = call i32 @llvm.fshr.i32(i32 %y, i32 %x, i32 %s)
-  %or = or i32 %shy, %fun
-  ret i32 %or
-}
-
-define i32 @or_lshr_rotr_commute(i32 %x, i32 %y, i32 %s) {
-; RV32I-LABEL: or_lshr_rotr_commute:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srl a0, a0, a2
-; RV32I-NEXT:    srl a3, a1, a2
-; RV32I-NEXT:    neg a2, a2
-; RV32I-NEXT:    sll a1, a1, a2
-; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV32ZBP-LABEL: or_lshr_rotr_commute:
-; RV32ZBP:       # %bb.0:
-; RV32ZBP-NEXT:    srl a0, a0, a2
-; RV32ZBP-NEXT:    ror a1, a1, a2
-; RV32ZBP-NEXT:    or a0, a0, a1
-; RV32ZBP-NEXT:    ret
-  %shx = lshr i32 %x, %s
-  %rot = call i32 @llvm.fshr.i32(i32 %y, i32 %y, i32 %s)
-  %or = or i32 %shx, %rot
-  ret i32 %or
-}
-
-define i32 @or_shl_fshl_simplify(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_shl_fshl_simplify:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    sll a1, a1, a2
-; CHECK-NEXT:    not a2, a2
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    srl a0, a0, a2
-; CHECK-NEXT:    or a0, a1, a0
-; CHECK-NEXT:    ret
-  %shy = shl i32 %y, %s
-  %fun = call i32 @llvm.fshl.i32(i32 %y, i32 %x, i32 %s)
-  %or = or i32 %fun, %shy
-  ret i32 %or
-}
-
-define i32 @or_lshr_fshr_simplify(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_lshr_fshr_simplify:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    srl a1, a1, a2
-; CHECK-NEXT:    not a2, a2
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    sll a0, a0, a2
-; CHECK-NEXT:    or a0, a0, a1
-; CHECK-NEXT:    ret
-  %shy = lshr i32 %y, %s
-  %fun = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %s)
-  %or = or i32 %shy, %fun
-  ret i32 %or
-}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll
index 268a7f41aaae..e8d3ec1b128e 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll
@@ -1,8 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64ZBB
-; RUN: llc -mtriple=riscv64 -mattr=+zbb,+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64ZBP
 
 declare i32 @llvm.riscv.orc.b.i32(i32)
 
@@ -12,11 +10,6 @@ define signext i32 @orcb32(i32 signext %a) nounwind {
 ; RV64ZBB-NEXT:    orc.b a0, a0
 ; RV64ZBB-NEXT:    sext.w a0, a0
 ; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: orcb32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 7
-; RV64ZBP-NEXT:    ret
   %tmp = call i32 @llvm.riscv.orc.b.i32(i32 %a)
   ret i32 %tmp
 }
@@ -26,11 +19,6 @@ define zeroext i32 @orcb32_zext(i32 zeroext %a) nounwind {
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    orc.b a0, a0
 ; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: orcb32_zext:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc.b a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i32 @llvm.riscv.orc.b.i32(i32 %a)
   ret i32 %tmp
 }
@@ -47,16 +35,6 @@ define signext i32 @orcb32_knownbits(i32 signext %a) nounwind {
 ; RV64ZBB-NEXT:    orc.b a0, a0
 ; RV64ZBB-NEXT:    sext.w a0, a0
 ; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: orcb32_knownbits:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    lui a1, 1044480
-; RV64ZBP-NEXT:    and a0, a0, a1
-; RV64ZBP-NEXT:    lui a1, 2048
-; RV64ZBP-NEXT:    addiw a1, a1, 1
-; RV64ZBP-NEXT:    or a0, a0, a1
-; RV64ZBP-NEXT:    gorciw a0, a0, 7
-; RV64ZBP-NEXT:    ret
   %tmp = and i32 %a, 4278190080 ; 0xFF000000
   %tmp2 = or i32 %tmp, 8388609 ; 0x800001
   %tmp3 = call i32 @llvm.riscv.orc.b.i32(i32 %tmp2)
@@ -72,11 +50,6 @@ define i64 @orcb64(i64 %a) nounwind {
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    orc.b a0, a0
 ; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: orcb64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc.b a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = call i64 @llvm.riscv.orc.b.i64(i64 %a)
   ret i64 %tmp
 }
@@ -96,20 +69,6 @@ define i64 @orcb64_knownbits(i64 %a) nounwind {
 ; RV64ZBB-NEXT:    or a0, a0, a1
 ; RV64ZBB-NEXT:    orc.b a0, a0
 ; RV64ZBB-NEXT:    ret
-;
-; RV64ZBP-LABEL: orcb64_knownbits:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    lui a1, 65535
-; RV64ZBP-NEXT:    slli a1, a1, 12
-; RV64ZBP-NEXT:    and a0, a0, a1
-; RV64ZBP-NEXT:    lui a1, 131073
-; RV64ZBP-NEXT:    slli a1, a1, 13
-; RV64ZBP-NEXT:    addi a1, a1, 1
-; RV64ZBP-NEXT:    slli a1, a1, 20
-; RV64ZBP-NEXT:    addi a1, a1, 8
-; RV64ZBP-NEXT:    or a0, a0, a1
-; RV64ZBP-NEXT:    orc.b a0, a0
-; RV64ZBP-NEXT:    ret
   %tmp = and i64 %a, 1099494850560 ; 0x000000ffff000000
   %tmp2 = or i64 %tmp, 4611721202800525320 ; 0x4000200000100008
   %tmp3 = call i64 @llvm.riscv.orc.b.i64(i64 %tmp2)

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbp-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
similarity index 70%
rename from llvm/test/CodeGen/RISCV/rv64zbb-zbp-zbkb.ll
rename to llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
index 1bdcdcc862ec..2f8213cc80ac 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-zbp-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
@@ -2,11 +2,9 @@
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64I
 ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBP-ZBKB
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBP-ZBKB
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBKB
 ; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBP-ZBKB
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBKB
 
 define signext i32 @andn_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: andn_i32:
@@ -15,10 +13,10 @@ define signext i32 @andn_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: andn_i32:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: andn_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %neg = xor i32 %b, -1
   %and = and i32 %neg, %a
   ret i32 %and
@@ -31,10 +29,10 @@ define i64 @andn_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: andn_i64:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: andn_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %neg = xor i64 %b, -1
   %and = and i64 %neg, %a
   ret i64 %and
@@ -47,10 +45,10 @@ define signext i32 @orn_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: orn_i32:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    orn a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: orn_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %neg = xor i32 %b, -1
   %or = or i32 %neg, %a
   ret i32 %or
@@ -63,10 +61,10 @@ define i64 @orn_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: orn_i64:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    orn a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: orn_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %neg = xor i64 %b, -1
   %or = or i64 %neg, %a
   ret i64 %or
@@ -79,10 +77,10 @@ define signext i32 @xnor_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: xnor_i32:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    xnor a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: xnor_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %neg = xor i32 %a, -1
   %xor = xor i32 %neg, %b
   ret i32 %xor
@@ -95,10 +93,10 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: xnor_i64:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    xnor a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: xnor_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %neg = xor i64 %a, -1
   %xor = xor i64 %neg, %b
   ret i64 %xor
@@ -115,10 +113,10 @@ define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: rol_i32:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    rolw a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: rol_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    rolw a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
   ret i32 %1
 }
@@ -134,11 +132,11 @@ define void @rol_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
 ; RV64I-NEXT:    sw a0, 0(a2)
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: rol_i32_nosext:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    rolw a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    sw a0, 0(a2)
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: rol_i32_nosext:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    rolw a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    sw a0, 0(a2)
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b)
   store i32 %1, i32* %x
   ret void
@@ -154,11 +152,11 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: rol_i32_neg_constant_rhs:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    li a1, -2
-; RV64ZBB-ZBP-ZBKB-NEXT:    rolw a0, a1, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: rol_i32_neg_constant_rhs:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    li a1, -2
+; RV64ZBB-ZBKB-NEXT:    rolw a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 -2, i32 -2, i32 %a)
   ret i32 %1
 }
@@ -174,10 +172,10 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: rol_i64:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    rol a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: rol_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    rol a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %or = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %b)
   ret i64 %or
 }
@@ -193,10 +191,10 @@ define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: ror_i32:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    rorw a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: ror_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    rorw a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
   ret i32 %1
 }
@@ -212,11 +210,11 @@ define void @ror_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
 ; RV64I-NEXT:    sw a0, 0(a2)
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: ror_i32_nosext:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    rorw a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    sw a0, 0(a2)
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: ror_i32_nosext:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    rorw a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    sw a0, 0(a2)
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b)
   store i32 %1, i32* %x
   ret void
@@ -232,11 +230,11 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: ror_i32_neg_constant_rhs:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    li a1, -2
-; RV64ZBB-ZBP-ZBKB-NEXT:    rorw a0, a1, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: ror_i32_neg_constant_rhs:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    li a1, -2
+; RV64ZBB-ZBKB-NEXT:    rorw a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 -2, i32 -2, i32 %a)
   ret i32 %1
 }
@@ -252,10 +250,10 @@ define i64 @ror_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: ror_i64:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    ror a0, a0, a1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: ror_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    ror a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b)
   ret i64 %or
 }
@@ -268,10 +266,10 @@ define signext i32 @rori_i32_fshl(i32 signext %a) nounwind {
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: rori_i32_fshl:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    roriw a0, a0, 1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: rori_i32_fshl:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    roriw a0, a0, 1
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
   ret i32 %1
 }
@@ -286,11 +284,11 @@ define void @rori_i32_fshl_nosext(i32 signext %a, i32* %x) nounwind {
 ; RV64I-NEXT:    sw a0, 0(a1)
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: rori_i32_fshl_nosext:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    roriw a0, a0, 1
-; RV64ZBB-ZBP-ZBKB-NEXT:    sw a0, 0(a1)
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: rori_i32_fshl_nosext:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    roriw a0, a0, 1
+; RV64ZBB-ZBKB-NEXT:    sw a0, 0(a1)
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31)
   store i32 %1, i32* %x
   ret void
@@ -304,10 +302,10 @@ define signext i32 @rori_i32_fshr(i32 signext %a) nounwind {
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: rori_i32_fshr:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    roriw a0, a0, 31
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: rori_i32_fshr:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    roriw a0, a0, 31
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
   ret i32 %1
 }
@@ -322,11 +320,11 @@ define void @rori_i32_fshr_nosext(i32 signext %a, i32* %x) nounwind {
 ; RV64I-NEXT:    sw a0, 0(a1)
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: rori_i32_fshr_nosext:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    roriw a0, a0, 31
-; RV64ZBB-ZBP-ZBKB-NEXT:    sw a0, 0(a1)
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: rori_i32_fshr_nosext:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    roriw a0, a0, 31
+; RV64ZBB-ZBKB-NEXT:    sw a0, 0(a1)
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31)
   store i32 %1, i32* %x
   ret void
@@ -379,10 +377,10 @@ define i64 @rori_i64_fshl(i64 %a) nounwind {
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: rori_i64_fshl:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    rori a0, a0, 1
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: rori_i64_fshl:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    rori a0, a0, 1
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 63)
   ret i64 %1
 }
@@ -395,10 +393,10 @@ define i64 @rori_i64_fshr(i64 %a) nounwind {
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: rori_i64_fshr:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    rori a0, a0, 63
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: rori_i64_fshr:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    rori a0, a0, 63
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 63)
   ret i64 %1
 }
@@ -411,11 +409,11 @@ define signext i32 @not_shl_one_i32(i32 signext %x) {
 ; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: not_shl_one_i32:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    li a1, -2
-; RV64ZBB-ZBP-ZBKB-NEXT:    rolw a0, a1, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: not_shl_one_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    li a1, -2
+; RV64ZBB-ZBKB-NEXT:    rolw a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = shl i32 1, %x
   %2 = xor i32 %1, -1
   ret i32 %2
@@ -429,11 +427,11 @@ define i64 @not_shl_one_i64(i64 %x) {
 ; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: not_shl_one_i64:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    li a1, -2
-; RV64ZBB-ZBP-ZBKB-NEXT:    rol a0, a1, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: not_shl_one_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    li a1, -2
+; RV64ZBB-ZBKB-NEXT:    rol a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    ret
   %1 = shl i64 1, %x
   %2 = xor i64 %1, -1
   ret i64 %2
@@ -493,11 +491,11 @@ define i1 @andn_seqz_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: andn_seqz_i32:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    seqz a0, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: andn_seqz_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    andn a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    seqz a0, a0
+; RV64ZBB-ZBKB-NEXT:    ret
   %and = and i32 %a, %b
   %cmpeq = icmp eq i32 %and, %b
   ret i1 %cmpeq
@@ -511,11 +509,11 @@ define i1 @andn_seqz_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: andn_seqz_i64:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    seqz a0, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: andn_seqz_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    andn a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    seqz a0, a0
+; RV64ZBB-ZBKB-NEXT:    ret
   %and = and i64 %a, %b
   %cmpeq = icmp eq i64 %and, %b
   ret i1 %cmpeq
@@ -529,11 +527,11 @@ define i1 @andn_snez_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: andn_snez_i32:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    snez a0, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: andn_snez_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    andn a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    snez a0, a0
+; RV64ZBB-ZBKB-NEXT:    ret
   %and = and i32 %a, %b
   %cmpeq = icmp ne i32 %and, %b
   ret i1 %cmpeq
@@ -547,11 +545,11 @@ define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBB-ZBP-ZBKB-LABEL: andn_snez_i64:
-; RV64ZBB-ZBP-ZBKB:       # %bb.0:
-; RV64ZBB-ZBP-ZBKB-NEXT:    andn a0, a1, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    snez a0, a0
-; RV64ZBB-ZBP-ZBKB-NEXT:    ret
+; RV64ZBB-ZBKB-LABEL: andn_snez_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    andn a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    snez a0, a0
+; RV64ZBB-ZBKB-NEXT:    ret
   %and = and i64 %a, %b
   %cmpeq = icmp ne i64 %and, %b
   ret i1 %cmpeq

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbp-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
similarity index 71%
rename from llvm/test/CodeGen/RISCV/rv64zbp-zbkb.ll
rename to llvm/test/CodeGen/RISCV/rv64zbkb.ll
index a2c27e028fca..ac28336dd0b6 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbp-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
@@ -1,10 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64I
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64ZBP-ZBKB
 ; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64ZBP-ZBKB
+; RUN:   | FileCheck %s -check-prefix=RV64ZBKB
 
 define signext i32 @pack_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: pack_i32:
@@ -15,10 +13,10 @@ define signext i32 @pack_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBP-ZBKB-LABEL: pack_i32:
-; RV64ZBP-ZBKB:       # %bb.0:
-; RV64ZBP-ZBKB-NEXT:    packw a0, a0, a1
-; RV64ZBP-ZBKB-NEXT:    ret
+; RV64ZBKB-LABEL: pack_i32:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    packw a0, a0, a1
+; RV64ZBKB-NEXT:    ret
   %shl = and i32 %a, 65535
   %shl1 = shl i32 %b, 16
   %or = or i32 %shl1, %shl
@@ -34,10 +32,10 @@ define i64 @pack_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBP-ZBKB-LABEL: pack_i64:
-; RV64ZBP-ZBKB:       # %bb.0:
-; RV64ZBP-ZBKB-NEXT:    pack a0, a0, a1
-; RV64ZBP-ZBKB-NEXT:    ret
+; RV64ZBKB-LABEL: pack_i64:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    pack a0, a0, a1
+; RV64ZBKB-NEXT:    ret
   %shl = and i64 %a, 4294967295
   %shl1 = shl i64 %b, 32
   %or = or i64 %shl1, %shl
@@ -53,10 +51,10 @@ define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBP-ZBKB-LABEL: packh_i32:
-; RV64ZBP-ZBKB:       # %bb.0:
-; RV64ZBP-ZBKB-NEXT:    packh a0, a0, a1
-; RV64ZBP-ZBKB-NEXT:    ret
+; RV64ZBKB-LABEL: packh_i32:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    packh a0, a0, a1
+; RV64ZBKB-NEXT:    ret
   %and = and i32 %a, 255
   %and1 = shl i32 %b, 8
   %shl = and i32 %and1, 65280
@@ -73,10 +71,10 @@ define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBP-ZBKB-LABEL: packh_i32_2:
-; RV64ZBP-ZBKB:       # %bb.0:
-; RV64ZBP-ZBKB-NEXT:    packh a0, a0, a1
-; RV64ZBP-ZBKB-NEXT:    ret
+; RV64ZBKB-LABEL: packh_i32_2:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    packh a0, a0, a1
+; RV64ZBKB-NEXT:    ret
   %and = and i32 %a, 255
   %and1 = and i32 %b, 255
   %shl = shl i32 %and1, 8
@@ -93,10 +91,10 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBP-ZBKB-LABEL: packh_i64:
-; RV64ZBP-ZBKB:       # %bb.0:
-; RV64ZBP-ZBKB-NEXT:    packh a0, a0, a1
-; RV64ZBP-ZBKB-NEXT:    ret
+; RV64ZBKB-LABEL: packh_i64:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    packh a0, a0, a1
+; RV64ZBKB-NEXT:    ret
   %and = and i64 %a, 255
   %and1 = shl i64 %b, 8
   %shl = and i64 %and1, 65280
@@ -113,10 +111,10 @@ define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
-; RV64ZBP-ZBKB-LABEL: packh_i64_2:
-; RV64ZBP-ZBKB:       # %bb.0:
-; RV64ZBP-ZBKB-NEXT:    packh a0, a0, a1
-; RV64ZBP-ZBKB-NEXT:    ret
+; RV64ZBKB-LABEL: packh_i64_2:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    packh a0, a0, a1
+; RV64ZBKB-NEXT:    ret
   %and = and i64 %a, 255
   %and1 = and i64 %b, 255
   %shl = shl i64 %and1, 8

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbp-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbp-intrinsic.ll
deleted file mode 100644
index fe5acf8dfef5..000000000000
--- a/llvm/test/CodeGen/RISCV/rv64zbp-intrinsic.ll
+++ /dev/null
@@ -1,986 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64ZBP
-
-declare i32 @llvm.riscv.grev.i32(i32 %a, i32 %b)
-
-define signext i32 @grev32(i32 signext %a, i32 signext %b) nounwind {
-; RV64ZBP-LABEL: grev32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    grevw a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-define signext i32 @grev32_demandedbits(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
-; RV64ZBP-LABEL: grev32_demandedbits:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    add a0, a0, a1
-; RV64ZBP-NEXT:    grevw a0, a0, a2
-; RV64ZBP-NEXT:    ret
-  %d = add i32 %a, %b
-  %e = and i32 %c, 31
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %d, i32 %e)
-  ret i32 %tmp
-}
-
-define signext i32 @grevi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: grevi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 13
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 13)
-  ret i32 %tmp
-}
-
-define zeroext i32 @grevi32_zext(i32 zeroext %a) nounwind {
-; RV64ZBP-LABEL: grevi32_zext:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    grevi a0, a0, 13
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 13)
-  ret i32 %tmp
-}
-
-declare i32 @llvm.riscv.gorc.i32(i32 %a, i32 %b)
-
-define signext i32 @gorc32(i32 signext %a, i32 signext %b) nounwind {
-; RV64ZBP-LABEL: gorc32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorcw a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-define signext i32 @gorc32_demandedbits(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
-; RV64ZBP-LABEL: gorc32_demandedbits:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    add a0, a0, a1
-; RV64ZBP-NEXT:    gorcw a0, a0, a2
-; RV64ZBP-NEXT:    ret
-  %d = add i32 %a, %b
-  %e = and i32 %c, 31
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %d, i32 %e)
-  ret i32 %tmp
-}
-
-define signext i32 @gorci32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: gorci32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 13
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 13)
-  ret i32 %tmp
-}
-
-define zeroext i32 @gorci32_zext(i32 zeroext %a) nounwind {
-; RV64ZBP-LABEL: gorci32_zext:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 31)
-  ret i32 %tmp
-}
-
-declare i32 @llvm.riscv.shfl.i32(i32 %a, i32 %b)
-
-define signext i32 @shfl32(i32 signext %a, i32 signext %b) nounwind {
-; RV64ZBP-LABEL: shfl32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    shflw a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-define signext i32 @shfl32_demandedbits(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
-; RV64ZBP-LABEL: shfl32_demandedbits:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    add a0, a0, a1
-; RV64ZBP-NEXT:    shflw a0, a0, a2
-; RV64ZBP-NEXT:    ret
-  %d = add i32 %a, %b
-  %e = and i32 %c, 15
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %d, i32 %e)
-  ret i32 %tmp
-}
-
-define signext i32 @zipni32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: zipni32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 1)
-  ret i32 %tmp
-}
-
-define signext i32 @zip2bi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: zip2bi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip2.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 2)
-  ret i32 %tmp
-}
-
-define signext i32 @zipbi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: zipbi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 3)
-  ret i32 %tmp
-}
-
-define signext i32 @zip4hi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: zip4hi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip4.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 4)
-  ret i32 %tmp
-}
-
-define signext i32 @zip2hi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: zip2hi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip2.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 6)
-  ret i32 %tmp
-}
-
-define signext i32 @ziphi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: ziphi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 7)
-  ret i32 %tmp
-}
-
-define signext i32 @shfli32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: shfli32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    shfli a0, a0, 13
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 13)
-  ret i32 %tmp
-}
-
-define signext i32 @zip4wi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: zip4wi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip4.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 12)
-  ret i32 %tmp
-}
-
-define signext i32 @zip2wi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: zip2wi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip2.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 14)
-  ret i32 %tmp
-}
-
-define signext i32 @zipwi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: zipwi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 15)
-  ret i32 %tmp
-}
-
-define signext i32 @zip8wi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: zip8wi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip8.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 8)
-  ret i32 %tmp
-}
-
-declare i32 @llvm.riscv.unshfl.i32(i32 %a, i32 %b)
-
-define signext i32 @unshfl32(i32 signext %a, i32 signext %b) nounwind {
-; RV64ZBP-LABEL: unshfl32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unshflw a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 %b)
-  ret i32 %tmp
-}
-
-define signext i32 @unshfl32_demandedbits(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
-; RV64ZBP-LABEL: unshfl32_demandedbits:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    add a0, a0, a1
-; RV64ZBP-NEXT:    unshflw a0, a0, a2
-; RV64ZBP-NEXT:    ret
-  %d = add i32 %a, %b
-  %e = and i32 %c, 15
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %d, i32 %e)
-  ret i32 %tmp
-}
-
-define signext i32 @unzipni32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: unzipni32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 1)
-  ret i32 %tmp
-}
-
-define signext i32 @unzip2bi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: unzip2bi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip2.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 2)
-  ret i32 %tmp
-}
-
-define signext i32 @unzipbi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: unzipbi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 3)
-  ret i32 %tmp
-}
-
-define signext i32 @unzip4hi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: unzip4hi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip4.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 4)
-  ret i32 %tmp
-}
-
-define signext i32 @unzip2hi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: unzip2hi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip2.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 6)
-  ret i32 %tmp
-}
-
-define signext i32 @unziphi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: unziphi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 7)
-  ret i32 %tmp
-}
-
-define signext i32 @unshfli32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: unshfli32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unshfli a0, a0, 13
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 13)
-  ret i32 %tmp
-}
-
-define signext i32 @unzip4wi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: unzip4wi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip4.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 12)
-  ret i32 %tmp
-}
-
-define signext i32 @unzip2wi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: unzip2wi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip2.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 14)
-  ret i32 %tmp
-}
-
-define signext i32 @unzipwi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: unzipwi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 15)
-  ret i32 %tmp
-}
-
-define signext i32 @unzip8wi32(i32 signext %a) nounwind {
-; RV64ZBP-LABEL: unzip8wi32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip8.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 8)
-  ret i32 %tmp
-}
-
-declare i64 @llvm.riscv.grev.i64(i64 %a, i64 %b)
-
-define i64 @grev64(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: grev64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    grev a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 %b)
-  ret i64 %tmp
-}
-
-define i64 @grev64_demandedbits(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: grev64_demandedbits:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    grev a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %c = and i64 %b, 63
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 %c)
-  ret i64 %tmp
-}
-
-define i64 @grevi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: grevi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    grevi a0, a0, 13
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 13)
-  ret i64 %tmp
-}
-
-; Make sure we don't fold this rotate with the grev. We can only fold a rotate
-; by 32.
-define i64 @grevi64_24_rotl_16(i64 %a) nounwind {
-; RV64ZBP-LABEL: grevi64_24_rotl_16:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8.w a0, a0
-; RV64ZBP-NEXT:    rori a0, a0, 48
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 24)
-  %tmp1 = call i64 @llvm.fshl.i64(i64 %tmp, i64 %tmp, i64 16)
-  ret i64 %tmp1
-}
-declare i64 @llvm.fshl.i64(i64, i64, i64)
-
-; Make sure we don't fold this rotate with the grev. We can only fold a rotate
-; by 32.
-define i64 @grevi64_24_rotr_16(i64 %a) nounwind {
-; RV64ZBP-LABEL: grevi64_24_rotr_16:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8.w a0, a0
-; RV64ZBP-NEXT:    rori a0, a0, 16
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 24)
-  %tmp1 = call i64 @llvm.fshr.i64(i64 %tmp, i64 %tmp, i64 16)
-  ret i64 %tmp1
-}
-declare i64 @llvm.fshr.i64(i64, i64, i64)
-
-define i64 @revhwi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: revhwi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 15)
-  ret i64 %tmp
-}
-
-define i64 @rev16wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: rev16wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev16.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 16)
-  ret i64 %tmp
-}
-
-define i64 @rev8wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: rev8wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 24)
-  ret i64 %tmp
-}
-
-define i64 @rev4wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: rev4wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev4.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 28)
-  ret i64 %tmp
-}
-
-define i64 @rev2wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: rev2wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev2.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 30)
-  ret i64 %tmp
-}
-
-define i64 @revwi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: revwi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 31)
-  ret i64 %tmp
-}
-
-define i64 @rev32i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: rev32i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev32 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 32)
-  ret i64 %tmp
-}
-
-define i64 @rev16i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: rev16i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev16 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 48)
-  ret i64 %tmp
-}
-
-define i64 @rev8i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: rev8i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 56)
-  ret i64 %tmp
-}
-
-define i64 @rev4i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: rev4i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev4 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 60)
-  ret i64 %tmp
-}
-
-define i64 @rev2i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: rev2i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev2 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 62)
-  ret i64 %tmp
-}
-
-define i64 @revi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: revi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.grev.i64(i64 %a, i64 63)
-  ret i64 %tmp
-}
-
-declare i64 @llvm.riscv.gorc.i64(i64 %a, i64 %b)
-
-define i64 @gorc64(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: gorc64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorc a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 %b)
-  ret i64 %tmp
-}
-
-define i64 @gorc64_demandedbits(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: gorc64_demandedbits:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorc a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %c = and i64 %b, 63
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 %c)
-  ret i64 %tmp
-}
-
-define i64 @gorci64(i64 %a) nounwind {
-; RV64ZBP-LABEL: gorci64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorci a0, a0, 13
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 13)
-  ret i64 %tmp
-}
-
-; The second OR is redundant with the first. Make sure we remove it.
-define i64 @gorci64_knownbits(i64 %a) nounwind {
-; RV64ZBP-LABEL: gorci64_knownbits:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    lui a1, %hi(.LCPI54_0)
-; RV64ZBP-NEXT:    ld a1, %lo(.LCPI54_0)(a1)
-; RV64ZBP-NEXT:    or a0, a0, a1
-; RV64ZBP-NEXT:    orc32 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = or i64 %a, 72624976668147840 ; 0x102040810204080
-  %tmp2 = call i64 @llvm.riscv.gorc.i64(i64 %tmp, i64 32)
-  %tmp3 = or i64 %tmp2, 1234624599046636680 ; 0x1122448811224488
-  ret i64 %tmp3
-}
-
-define i64 @orchi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orchi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 15)
-  ret i64 %tmp
-}
-
-define i64 @orc16wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orc16wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc16.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 16)
-  ret i64 %tmp
-}
-
-define i64 @orc8wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orc8wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc8.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 24)
-  ret i64 %tmp
-}
-
-define i64 @orc4wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orc4wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc4.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 28)
-  ret i64 %tmp
-}
-
-define i64 @orc2wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orc2wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc2.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 30)
-  ret i64 %tmp
-}
-
-define i64 @orcwi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orcwi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 31)
-  ret i64 %tmp
-}
-
-define i64 @orc32i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orc32i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc32 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 32)
-  ret i64 %tmp
-}
-
-define i64 @orc16i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orc16i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc16 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 48)
-  ret i64 %tmp
-}
-
-define i64 @orc8i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orc8i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc8 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 56)
-  ret i64 %tmp
-}
-
-define i64 @orc4i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orc4i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc4 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 60)
-  ret i64 %tmp
-}
-
-define i64 @orc2i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orc2i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc2 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 62)
-  ret i64 %tmp
-}
-
-define i64 @orci64(i64 %a) nounwind {
-; RV64ZBP-LABEL: orci64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 63)
-  ret i64 %tmp
-}
-
-declare i64 @llvm.riscv.shfl.i64(i64 %a, i64 %b)
-
-define i64 @shfl64(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: shfl64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    shfl a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 %b)
-  ret i64 %tmp
-}
-
-define i64 @shfl64_demandedbits(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: shfl64_demandedbits:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    shfl a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %c = and i64 %b, 31
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 %c)
-  ret i64 %tmp
-}
-
-define i64 @shfli64(i64 %a) nounwind {
-; RV64ZBP-LABEL: shfli64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    shfli a0, a0, 13
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 13)
-  ret i64 %tmp
-}
-
-define i64 @zip4wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zip4wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip4.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 12)
-  ret i64 %tmp
-}
-
-define i64 @zip2wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zip2wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip2.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 14)
-  ret i64 %tmp
-}
-
-define i64 @zipwi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zipwi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 15)
-  ret i64 %tmp
-}
-
-define i64 @zip8i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zip8i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip8 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 24)
-  ret i64 %tmp
-}
-
-define i64 @zip4i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zip4i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip4 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 28)
-  ret i64 %tmp
-}
-
-define i64 @zip2i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zip2i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip2 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 30)
-  ret i64 %tmp
-}
-
-define i64 @zipi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zipi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 31)
-  ret i64 %tmp
-}
-
-define i64 @zipni64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zipni64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 1)
-  ret i64 %tmp
-}
-
-define i64 @zip2bi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zip2bi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip2.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 2)
-  ret i64 %tmp
-}
-
-define i64 @zipbi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zipbi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 3)
-  ret i64 %tmp
-}
-
-define i64 @zip4hi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zip4hi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip4.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 4)
-  ret i64 %tmp
-}
-
-define i64 @zip2hi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: zip2hi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip2.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 6)
-  ret i64 %tmp
-}
-
-define i64 @ziphi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: ziphi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.shfl.i64(i64 %a, i64 7)
-  ret i64 %tmp
-}
-
-declare i64 @llvm.riscv.unshfl.i64(i64 %a, i64 %b)
-
-define i64 @unshfl64(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: unshfl64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unshfl a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 %b)
-  ret i64 %tmp
-}
-
-define i64 @unshfl64_demandedbits(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: unshfl64_demandedbits:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unshfl a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %c = and i64 %b, 31
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 %c)
-  ret i64 %tmp
-}
-
-define i64 @unshfli64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unshfli64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unshfli a0, a0, 13
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 13)
-  ret i64 %tmp
-}
-
-define i64 @unzip4wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzip4wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip4.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 12)
-  ret i64 %tmp
-}
-
-define i64 @unzip2wi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzip2wi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip2.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 14)
-  ret i64 %tmp
-}
-
-define i64 @unzipwi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzipwi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 15)
-  ret i64 %tmp
-}
-
-define i64 @unzip8i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzip8i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip8 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 24)
-  ret i64 %tmp
-}
-
-define i64 @unzip4i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzip4i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip4 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 28)
-  ret i64 %tmp
-}
-
-define i64 @unzip2i64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzip2i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip2 a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 30)
-  ret i64 %tmp
-}
-
-define i64 @unzipi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzipi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 31)
-  ret i64 %tmp
-}
-
-define i64 @unzipni64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzipni64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 1)
-  ret i64 %tmp
-}
-
-define i64 @unzip2bi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzip2bi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip2.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 2)
-  ret i64 %tmp
-}
-
-define i64 @unzipbi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzipbi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 3)
-  ret i64 %tmp
-}
-
-define i64 @unzip4hi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzip4hi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip4.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 4)
-  ret i64 %tmp
-}
-
-define i64 @unzip2hi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unzip2hi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip2.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 6)
-  ret i64 %tmp
-}
-
-define i64 @unziphi64(i64 %a) nounwind {
-; RV64ZBP-LABEL: unziphi64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    unzip.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.unshfl.i64(i64 %a, i64 7)
-  ret i64 %tmp
-}
-
-declare i64 @llvm.riscv.xperm.n.i64(i64 %a, i64 %b)
-
-define i64 @xpermn64(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: xpermn64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    xperm.n a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.xperm.n.i64(i64 %a, i64 %b)
-  ret i64 %tmp
-}
-
-declare i64 @llvm.riscv.xperm.b.i64(i64 %a, i64 %b)
-
-define i64 @xpermb64(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: xpermb64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    xperm.b a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.xperm.b.i64(i64 %a, i64 %b)
-  ret i64 %tmp
-}
-
-declare i64 @llvm.riscv.xperm.h.i64(i64 %a, i64 %b)
-
-define i64 @xpermh64(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: xpermh64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    xperm.h a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.xperm.h.i64(i64 %a, i64 %b)
-  ret i64 %tmp
-}
-
-declare i64 @llvm.riscv.xperm.w.i64(i64 %a, i64 %b)
-
-define i64 @xpermw64(i64 %a, i64 %b) nounwind {
-; RV64ZBP-LABEL: xpermw64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    xperm.w a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.xperm.w.i64(i64 %a, i64 %b)
-  ret i64 %tmp
-}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbp.ll
deleted file mode 100644
index 5f1ac90398de..000000000000
--- a/llvm/test/CodeGen/RISCV/rv64zbp.ll
+++ /dev/null
@@ -1,3339 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64I
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64ZBP
-
-define signext i32 @gorc1_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc1_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 1
-; RV64I-NEXT:    lui a3, 349525
-; RV64I-NEXT:    addiw a3, a3, 1365
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc1_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 1
-; RV64ZBP-NEXT:    ret
-  %and = shl i32 %a, 1
-  %shl = and i32 %and, -1431655766
-  %and1 = lshr i32 %a, 1
-  %shr = and i32 %and1, 1431655765
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
-}
-
-define i64 @gorc1_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc1_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI1_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI1_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI1_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI1_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc1_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc.p a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = shl i64 %a, 1
-  %shl = and i64 %and, -6148914691236517206
-  %and1 = lshr i64 %a, 1
-  %shr = and i64 %and1, 6148914691236517205
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
-}
-
-define signext i32 @gorc2_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc2_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 2
-; RV64I-NEXT:    lui a3, 209715
-; RV64I-NEXT:    addiw a3, a3, 819
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc2_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 2
-; RV64ZBP-NEXT:    ret
-  %and = shl i32 %a, 2
-  %shl = and i32 %and, -858993460
-  %and1 = lshr i32 %a, 2
-  %shr = and i32 %and1, 858993459
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
-}
-
-define i64 @gorc2_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc2_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI3_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI3_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI3_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 2
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc2_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc2.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = shl i64 %a, 2
-  %shl = and i64 %and, -3689348814741910324
-  %and1 = lshr i64 %a, 2
-  %shr = and i64 %and1, 3689348814741910323
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
-}
-
-define signext i32 @gorc3_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc3_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 1
-; RV64I-NEXT:    lui a3, 349525
-; RV64I-NEXT:    addiw a3, a3, 1365
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 2
-; RV64I-NEXT:    lui a3, 209715
-; RV64I-NEXT:    addiw a3, a3, 819
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc3_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 3
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1b, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  ret i32 %or2b
-}
-
-define i64 @gorc3_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc3_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI5_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI5_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI5_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI5_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI5_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI5_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI5_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI5_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 2
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc3_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1b, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  ret i64 %or2b
-}
-
-define signext i32 @gorc4_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc4_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    lui a3, 61681
-; RV64I-NEXT:    addiw a3, a3, -241
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc4_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 4
-; RV64ZBP-NEXT:    ret
-  %and = shl i32 %a, 4
-  %shl = and i32 %and, -252645136
-  %and1 = lshr i32 %a, 4
-  %shr = and i32 %and1, 252645135
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
-}
-
-define i64 @gorc4_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc4_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI7_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI7_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI7_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI7_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 4
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc4_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc4.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = shl i64 %a, 4
-  %shl = and i64 %and, -1085102592571150096
-  %and1 = lshr i64 %a, 4
-  %shr = and i64 %and1, 1085102592571150095
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
-}
-
-define signext i32 @gorc5_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc5_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 1
-; RV64I-NEXT:    lui a3, 349525
-; RV64I-NEXT:    addiw a3, a3, 1365
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    slliw a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    lui a3, 61681
-; RV64I-NEXT:    addiw a3, a3, -241
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc5_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 5
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1b, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  ret i32 %or2b
-}
-
-define i64 @gorc5_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc5_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI9_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI9_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI9_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI9_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI9_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI9_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI9_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 4
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc5_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorci a0, a0, 5
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 4
-  %shl2 = and i64 %and2, -1085102592571150096
-  %and2b = lshr i64 %or1b, 4
-  %shr2 = and i64 %and2b, 1085102592571150095
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  ret i64 %or2b
-}
-
-define signext i32 @gorc6_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc6_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 2
-; RV64I-NEXT:    lui a3, 209715
-; RV64I-NEXT:    addiw a3, a3, 819
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    slliw a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    lui a3, 61681
-; RV64I-NEXT:    addiw a3, a3, -241
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc6_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 6
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 2
-  %shl1 = and i32 %and1, -858993460
-  %and1b = lshr i32 %a, 2
-  %shr1 = and i32 %and1b, 858993459
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1b, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  ret i32 %or2b
-}
-
-define i64 @gorc6_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc6_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI11_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI11_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI11_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI11_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 2
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI11_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI11_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI11_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI11_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 4
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc6_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc2.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 2
-  %shl1 = and i64 %and1, -3689348814741910324
-  %and1b = lshr i64 %a, 2
-  %shr1 = and i64 %and1b, 3689348814741910323
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 4
-  %shl2 = and i64 %and2, -1085102592571150096
-  %and2b = lshr i64 %or1b, 4
-  %shr2 = and i64 %and2b, 1085102592571150095
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  ret i64 %or2b
-}
-
-define signext i32 @gorc7_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc7_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 1
-; RV64I-NEXT:    lui a3, 349525
-; RV64I-NEXT:    addiw a3, a3, 1365
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 2
-; RV64I-NEXT:    lui a3, 209715
-; RV64I-NEXT:    addiw a3, a3, 819
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    slli a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    lui a3, 61681
-; RV64I-NEXT:    addiw a3, a3, -241
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc7_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 7
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1b, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  %and3 = shl i32 %or2b, 4
-  %shl3 = and i32 %and3, -252645136
-  %and3b = lshr i32 %or2b, 4
-  %shr3 = and i32 %and3b, 252645135
-  %or3 = or i32 %shr3, %or2b
-  %or3b = or i32 %or3, %shl3
-  ret i32 %or3b
-}
-
-define i64 @gorc7_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc7_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI13_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI13_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI13_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI13_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI13_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI13_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI13_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI13_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 2
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI13_4)
-; RV64I-NEXT:    ld a1, %lo(.LCPI13_4)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI13_5)
-; RV64I-NEXT:    ld a2, %lo(.LCPI13_5)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 4
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc7_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1b, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  %and3 = shl i64 %or2b, 4
-  %shl3 = and i64 %and3, -1085102592571150096
-  %and3b = lshr i64 %or2b, 4
-  %shr3 = and i64 %and3b, 1085102592571150095
-  %or3 = or i64 %shr3, %or2b
-  %or3b = or i64 %or3, %shl3
-  ret i64 %or3b
-}
-
-define signext i32 @gorc8_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc8_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 8
-; RV64I-NEXT:    lui a2, 1044496
-; RV64I-NEXT:    addiw a2, a2, -256
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 8
-; RV64I-NEXT:    lui a3, 4080
-; RV64I-NEXT:    addiw a3, a3, 255
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc8_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 8
-; RV64ZBP-NEXT:    ret
-  %and = shl i32 %a, 8
-  %shl = and i32 %and, -16711936
-  %and1 = lshr i32 %a, 8
-  %shr = and i32 %and1, 16711935
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
-}
-
-define i64 @gorc8_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc8_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI15_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI15_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI15_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI15_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 8
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 8
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc8_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc8.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = shl i64 %a, 8
-  %shl = and i64 %and, -71777214294589696
-  %and1 = lshr i64 %a, 8
-  %shr = and i64 %and1, 71777214294589695
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
-}
-
-define signext i32 @gorc12_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc12_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    lui a3, 61681
-; RV64I-NEXT:    addiw a3, a3, -241
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    slliw a1, a0, 8
-; RV64I-NEXT:    lui a2, 1044496
-; RV64I-NEXT:    addiw a2, a2, -256
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 8
-; RV64I-NEXT:    lui a3, 4080
-; RV64I-NEXT:    addiw a3, a3, 255
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc12_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 12
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 4
-  %shl1 = and i32 %and1, -252645136
-  %and1b = lshr i32 %a, 4
-  %shr1 = and i32 %and1b, 252645135
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 8
-  %shl2 = and i32 %and2, -16711936
-  %and2b = lshr i32 %or1b, 8
-  %shr2 = and i32 %and2b, 16711935
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  ret i32 %or2b
-}
-
-define i64 @gorc12_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc12_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI17_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI17_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI17_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 4
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI17_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI17_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI17_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI17_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 8
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 8
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc12_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc4.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 4
-  %shl1 = and i64 %and1, -1085102592571150096
-  %and1b = lshr i64 %a, 4
-  %shr1 = and i64 %and1b, 1085102592571150095
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 8
-  %shl2 = and i64 %and2, -71777214294589696
-  %and2b = lshr i64 %or1b, 8
-  %shr2 = and i64 %and2b, 71777214294589695
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  ret i64 %or2b
-}
-
-define signext i32 @gorc14_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc14_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 2
-; RV64I-NEXT:    lui a3, 209715
-; RV64I-NEXT:    addiw a3, a3, 819
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    slliw a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    lui a3, 61681
-; RV64I-NEXT:    addiw a3, a3, -241
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    slli a1, a0, 8
-; RV64I-NEXT:    lui a2, 1044496
-; RV64I-NEXT:    addiw a2, a2, -256
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 8
-; RV64I-NEXT:    lui a3, 4080
-; RV64I-NEXT:    addiw a3, a3, 255
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc14_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 14
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 2
-  %shl1 = and i32 %and1, -858993460
-  %and1b = lshr i32 %a, 2
-  %shr1 = and i32 %and1b, 858993459
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1b, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  %and3 = shl i32 %or2b, 8
-  %shl3 = and i32 %and3, -16711936
-  %and3b = lshr i32 %or2b, 8
-  %shr3 = and i32 %and3b, 16711935
-  %or3 = or i32 %shr3, %or2b
-  %or3b = or i32 %or3, %shl3
-  ret i32 %or3b
-}
-
-define i64 @gorc14_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc14_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI19_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI19_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI19_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 2
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI19_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI19_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI19_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI19_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 4
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI19_4)
-; RV64I-NEXT:    ld a1, %lo(.LCPI19_4)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI19_5)
-; RV64I-NEXT:    ld a2, %lo(.LCPI19_5)(a2)
-; RV64I-NEXT:    slli a3, a0, 8
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 8
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc14_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc2.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 2
-  %shl1 = and i64 %and1, -3689348814741910324
-  %and1b = lshr i64 %a, 2
-  %shr1 = and i64 %and1b, 3689348814741910323
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 4
-  %shl2 = and i64 %and2, -1085102592571150096
-  %and2b = lshr i64 %or1b, 4
-  %shr2 = and i64 %and2b, 1085102592571150095
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  %and3 = shl i64 %or2b, 8
-  %shl3 = and i64 %and3, -71777214294589696
-  %and3b = lshr i64 %or2b, 8
-  %shr3 = and i64 %and3b, 71777214294589695
-  %or3 = or i64 %shr3, %or2b
-  %or3b = or i64 %or3, %shl3
-  ret i64 %or3b
-}
-
-define signext i32 @gorc16_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc16_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 16
-; RV64I-NEXT:    srliw a2, a0, 16
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc16_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 16
-; RV64ZBP-NEXT:    ret
-  %shl = shl i32 %a, 16
-  %shr = lshr i32 %a, 16
-  %or = or i32 %shr, %a
-  %or2 = or i32 %or, %shl
-  ret i32 %or2
-}
-
-define i32 @gorc16_rotl_i32(i32 %a) nounwind {
-; RV64I-LABEL: gorc16_rotl_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srliw a1, a0, 16
-; RV64I-NEXT:    slliw a2, a0, 16
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc16_rotl_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc16.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %rot = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 16)
-  %or = or i32 %rot, %a
-  ret i32 %or
-}
-
-define i32 @gorc16_rotr_i32(i32 %a) nounwind {
-; RV64I-LABEL: gorc16_rotr_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 16
-; RV64I-NEXT:    srliw a2, a0, 16
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc16_rotr_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc16.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %rot = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 16)
-  %or = or i32 %rot, %a
-  ret i32 %or
-}
-
-define i64 @gorc16_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc16_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 16
-; RV64I-NEXT:    lui a2, 983041
-; RV64I-NEXT:    slli a3, a2, 4
-; RV64I-NEXT:    addi a3, a3, -1
-; RV64I-NEXT:    slli a3, a3, 16
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srli a3, a0, 16
-; RV64I-NEXT:    slli a2, a2, 20
-; RV64I-NEXT:    addi a2, a2, -1
-; RV64I-NEXT:    srli a2, a2, 16
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc16_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc16.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = shl i64 %a, 16
-  %shl = and i64 %and, -281470681808896
-  %and1 = lshr i64 %a, 16
-  %shr = and i64 %and1, 281470681808895
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
-}
-
-define i64 @gorc32(i64 %a) nounwind {
-; RV64I-LABEL: gorc32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a2, a0, 32
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc32 a0, a0
-; RV64ZBP-NEXT:    ret
-  %shl = shl i64 %a, 32
-  %shr = lshr i64 %a, 32
-  %or = or i64 %shr, %a
-  %or2 = or i64 %or, %shl
-  ret i64 %or2
-}
-
-; gorc2, gorc2 -> gorc2
-define signext i32 @gorc2b_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc2b_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a3, a0, 2
-; RV64I-NEXT:    lui a4, 209715
-; RV64I-NEXT:    addiw a4, a4, 819
-; RV64I-NEXT:    and a3, a3, a4
-; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    or a1, a0, a1
-; RV64I-NEXT:    slliw a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    srli a2, a1, 2
-; RV64I-NEXT:    and a2, a2, a4
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc2b_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    srliw a1, a0, 2
-; RV64ZBP-NEXT:    or a1, a1, a0
-; RV64ZBP-NEXT:    orc2.n a0, a0
-; RV64ZBP-NEXT:    slli a1, a1, 2
-; RV64ZBP-NEXT:    lui a2, 838861
-; RV64ZBP-NEXT:    addiw a2, a2, -820
-; RV64ZBP-NEXT:    and a1, a1, a2
-; RV64ZBP-NEXT:    srli a2, a0, 2
-; RV64ZBP-NEXT:    lui a3, 209715
-; RV64ZBP-NEXT:    addiw a3, a3, 819
-; RV64ZBP-NEXT:    and a2, a2, a3
-; RV64ZBP-NEXT:    or a0, a2, a0
-; RV64ZBP-NEXT:    or a0, a0, a1
-; RV64ZBP-NEXT:    sext.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 2
-  %shl1 = and i32 %and1, -858993460
-  %and1b = lshr i32 %a, 2
-  %shr1 = and i32 %and1b, 858993459
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1b, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  ret i32 %or2b
-}
-
-; gorc2, gorc2 -> gorc2
-define i64 @gorc2b_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc2b_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI26_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI26_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI26_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a3, a3, a1
-; RV64I-NEXT:    srli a4, a0, 2
-; RV64I-NEXT:    and a4, a4, a2
-; RV64I-NEXT:    or a0, a4, a0
-; RV64I-NEXT:    or a3, a0, a3
-; RV64I-NEXT:    slli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    srli a1, a3, 2
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    or a1, a1, a3
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc2b_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    srli a1, a0, 2
-; RV64ZBP-NEXT:    or a1, a1, a0
-; RV64ZBP-NEXT:    orc2.n a0, a0
-; RV64ZBP-NEXT:    lui a2, %hi(.LCPI26_0)
-; RV64ZBP-NEXT:    ld a2, %lo(.LCPI26_0)(a2)
-; RV64ZBP-NEXT:    lui a3, %hi(.LCPI26_1)
-; RV64ZBP-NEXT:    ld a3, %lo(.LCPI26_1)(a3)
-; RV64ZBP-NEXT:    slli a1, a1, 2
-; RV64ZBP-NEXT:    and a1, a1, a2
-; RV64ZBP-NEXT:    srli a2, a0, 2
-; RV64ZBP-NEXT:    and a2, a2, a3
-; RV64ZBP-NEXT:    or a0, a2, a0
-; RV64ZBP-NEXT:    or a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 2
-  %shl1 = and i64 %and1, -3689348814741910324
-  %and1b = lshr i64 %a, 2
-  %shr1 = and i64 %and1b, 3689348814741910323
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1b, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  ret i64 %or2b
-}
-
-; gorc1, gorc2, gorc1 -> gorc2
-define signext i32 @gorc3b_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: gorc3b_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    lui a4, 349525
-; RV64I-NEXT:    addiw a4, a4, 1365
-; RV64I-NEXT:    and a3, a3, a4
-; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a3, 838861
-; RV64I-NEXT:    addiw a3, a3, -820
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srli a3, a0, 2
-; RV64I-NEXT:    lui a5, 209715
-; RV64I-NEXT:    addiw a5, a5, 819
-; RV64I-NEXT:    and a3, a3, a5
-; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    slli a1, a0, 1
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 1
-; RV64I-NEXT:    and a2, a2, a4
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc3b_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    gorciw a0, a0, 3
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shr1, %a
-  %or1b = or i32 %or1, %shl1
-  %and2 = shl i32 %or1b, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1b, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shr2, %or1b
-  %or2b = or i32 %or2, %shl2
-  %and3 = shl i32 %or2b, 1
-  %shl3 = and i32 %and3, -1431655766
-  %and3b = lshr i32 %or2b, 1
-  %shr3 = and i32 %and3b, 1431655765
-  %or3 = or i32 %shr3, %or2b
-  %or3b = or i32 %or3, %shl3
-  ret i32 %or3b
-}
-
-; gorc1, gorc2, gorc1 -> gorc2
-define i64 @gorc3b_i64(i64 %a) nounwind {
-; RV64I-LABEL: gorc3b_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI28_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI28_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI28_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI28_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a3, a3, a1
-; RV64I-NEXT:    srli a4, a0, 1
-; RV64I-NEXT:    and a4, a4, a2
-; RV64I-NEXT:    or a0, a4, a0
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    lui a3, %hi(.LCPI28_2)
-; RV64I-NEXT:    ld a3, %lo(.LCPI28_2)(a3)
-; RV64I-NEXT:    lui a4, %hi(.LCPI28_3)
-; RV64I-NEXT:    ld a4, %lo(.LCPI28_3)(a4)
-; RV64I-NEXT:    slli a5, a0, 2
-; RV64I-NEXT:    and a3, a5, a3
-; RV64I-NEXT:    srli a5, a0, 2
-; RV64I-NEXT:    and a4, a5, a4
-; RV64I-NEXT:    or a0, a4, a0
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc3b_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shr1, %a
-  %or1b = or i64 %or1, %shl1
-  %and2 = shl i64 %or1b, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1b, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shr2, %or1b
-  %or2b = or i64 %or2, %shl2
-  %and3 = shl i64 %or2b, 1
-  %shl3 = and i64 %and3, -6148914691236517206
-  %and3b = lshr i64 %or2b, 1
-  %shr3 = and i64 %and3b, 6148914691236517205
-  %or3 = or i64 %shr3, %or2b
-  %or3b = or i64 %or3, %shl3
-  ret i64 %or3b
-}
-
-define i64 @gorc32_rotl(i64 %a) nounwind {
-; RV64I-LABEL: gorc32_rotl:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 32
-; RV64I-NEXT:    slli a2, a0, 32
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc32_rotl:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc32 a0, a0
-; RV64ZBP-NEXT:    ret
-  %rot = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 32)
-  %or = or i64 %rot, %a
-  ret i64 %or
-}
-
-define i64 @gorc32_rotr(i64 %a) nounwind {
-; RV64I-LABEL: gorc32_rotr:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a2, a0, 32
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: gorc32_rotr:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    orc32 a0, a0
-; RV64ZBP-NEXT:    ret
-  %rot = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 32)
-  %or = or i64 %rot, %a
-  ret i64 %or
-}
-
-define signext i32 @grev1_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev1_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev1_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 1
-; RV64ZBP-NEXT:    ret
-  %and = shl i32 %a, 1
-  %shl = and i32 %and, -1431655766
-  %and1 = lshr i32 %a, 1
-  %shr = and i32 %and1, 1431655765
-  %or = or i32 %shl, %shr
-  ret i32 %or
-}
-
-define i64 @grev1_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev1_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI32_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI32_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI32_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI32_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev1_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.p a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = shl i64 %a, 1
-  %shl = and i64 %and, -6148914691236517206
-  %and1 = lshr i64 %a, 1
-  %shr = and i64 %and1, 6148914691236517205
-  %or = or i64 %shl, %shr
-  ret i64 %or
-}
-
-define signext i32 @grev2_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev2_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev2_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 2
-; RV64ZBP-NEXT:    ret
-  %and = shl i32 %a, 2
-  %shl = and i32 %and, -858993460
-  %and1 = lshr i32 %a, 2
-  %shr = and i32 %and1, 858993459
-  %or = or i32 %shl, %shr
-  ret i32 %or
-}
-
-define i64 @grev2_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev2_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI34_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI34_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI34_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev2_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev2.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = shl i64 %a, 2
-  %shl = and i64 %and, -3689348814741910324
-  %and1 = lshr i64 %a, 2
-  %shr = and i64 %and1, 3689348814741910323
-  %or = or i64 %shl, %shr
-  ret i64 %or
-}
-
-define signext i32 @grev3_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev3_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev3_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 3
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shl2, %shr2
-  ret i32 %or2
-}
-
-define i64 @grev3_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev3_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI36_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI36_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI36_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI36_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI36_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI36_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI36_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI36_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev3_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shl2, %shr2
-  ret i64 %or2
-}
-
-define signext i32 @grev4_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev4_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev4_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 4
-; RV64ZBP-NEXT:    ret
-  %and = shl i32 %a, 4
-  %shl = and i32 %and, -252645136
-  %and1 = lshr i32 %a, 4
-  %shr = and i32 %and1, 252645135
-  %or = or i32 %shl, %shr
-  ret i32 %or
-}
-
-define i64 @grev4_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev4_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI38_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI38_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI38_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI38_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev4_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev4.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = shl i64 %a, 4
-  %shl = and i64 %and, -1085102592571150096
-  %and1 = lshr i64 %a, 4
-  %shr = and i64 %and1, 1085102592571150095
-  %or = or i64 %shl, %shr
-  ret i64 %or
-}
-
-define signext i32 @grev5_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev5_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev5_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 5
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shl2, %shr2
-  ret i32 %or2
-}
-
-define i64 @grev5_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev5_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI40_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI40_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI40_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI40_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI40_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI40_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI40_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI40_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev5_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    grevi a0, a0, 5
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-
-  %and2 = shl i64 %or1, 4
-  %shl2 = and i64 %and2, -1085102592571150096
-  %and2b = lshr i64 %or1, 4
-  %shr2 = and i64 %and2b, 1085102592571150095
-  %or2 = or i64 %shl2, %shr2
-  ret i64 %or2
-}
-
-define signext i32 @grev6_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev6_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev6_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 6
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 2
-  %shl1 = and i32 %and1, -858993460
-  %and1b = lshr i32 %a, 2
-  %shr1 = and i32 %and1b, 858993459
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shl2, %shr2
-  ret i32 %or2
-}
-
-define i64 @grev6_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev6_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI42_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI42_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI42_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI42_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI42_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI42_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI42_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI42_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev6_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev2.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 2
-  %shl1 = and i64 %and1, -3689348814741910324
-  %and1b = lshr i64 %a, 2
-  %shr1 = and i64 %and1b, 3689348814741910323
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 4
-  %shl2 = and i64 %and2, -1085102592571150096
-  %and2b = lshr i64 %or1, 4
-  %shr2 = and i64 %and2b, 1085102592571150095
-  %or2 = or i64 %shl2, %shr2
-  ret i64 %or2
-}
-
-define signext i32 @grev7_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev7_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev7_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 7
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shl2, %shr2
-  %and3 = shl i32 %or2, 4
-  %shl3 = and i32 %and3, -252645136
-  %and3b = lshr i32 %or2, 4
-  %shr3 = and i32 %and3b, 252645135
-  %or3 = or i32 %shl3, %shr3
-  ret i32 %or3
-}
-
-define zeroext i32 @grev7_i32_zext(i32 zeroext %a) nounwind {
-; RV64I-LABEL: grev7_i32_zext:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slli a1, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    slli a3, a2, 4
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev7_i32_zext:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shl2, %shr2
-  %and3 = shl i32 %or2, 4
-  %shl3 = and i32 %and3, -252645136
-  %and3b = lshr i32 %or2, 4
-  %shr3 = and i32 %and3b, 252645135
-  %or3 = or i32 %shl3, %shr3
-  ret i32 %or3
-}
-
-define i64 @grev7_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev7_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI45_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI45_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI45_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI45_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI45_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI45_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI45_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI45_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI45_4)
-; RV64I-NEXT:    ld a1, %lo(.LCPI45_4)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI45_5)
-; RV64I-NEXT:    ld a2, %lo(.LCPI45_5)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev7_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shl2, %shr2
-  %and3 = shl i64 %or2, 4
-  %shl3 = and i64 %and3, -1085102592571150096
-  %and3b = lshr i64 %or2, 4
-  %shr3 = and i64 %and3b, 1085102592571150095
-  %or3 = or i64 %shl3, %shr3
-  ret i64 %or3
-}
-
-define signext i32 @grev8_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev8_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 8
-; RV64I-NEXT:    lui a2, 1044496
-; RV64I-NEXT:    addiw a2, a2, -256
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 8
-; RV64I-NEXT:    lui a2, 4080
-; RV64I-NEXT:    addiw a2, a2, 255
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev8_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 8
-; RV64ZBP-NEXT:    ret
-  %and = shl i32 %a, 8
-  %shl = and i32 %and, -16711936
-  %and1 = lshr i32 %a, 8
-  %shr = and i32 %and1, 16711935
-  %or = or i32 %shl, %shr
-  ret i32 %or
-}
-
-define i64 @grev8_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev8_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI47_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI47_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI47_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI47_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 8
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 8
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev8_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = shl i64 %a, 8
-  %shl = and i64 %and, -71777214294589696
-  %and1 = lshr i64 %a, 8
-  %shr = and i64 %and1, 71777214294589695
-  %or = or i64 %shl, %shr
-  ret i64 %or
-}
-
-define signext i32 @grev12_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev12_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 8
-; RV64I-NEXT:    lui a2, 1044496
-; RV64I-NEXT:    addiw a2, a2, -256
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 8
-; RV64I-NEXT:    lui a2, 4080
-; RV64I-NEXT:    addiw a2, a2, 255
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev12_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 12
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 4
-  %shl1 = and i32 %and1, -252645136
-  %and1b = lshr i32 %a, 4
-  %shr1 = and i32 %and1b, 252645135
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 8
-  %shl2 = and i32 %and2, -16711936
-  %and2b = lshr i32 %or1, 8
-  %shr2 = and i32 %and2b, 16711935
-  %or2 = or i32 %shl2, %shr2
-  ret i32 %or2
-}
-
-define i64 @grev12_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev12_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI49_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI49_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI49_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI49_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI49_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI49_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI49_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI49_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 8
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 8
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev12_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev4.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 4
-  %shl1 = and i64 %and1, -1085102592571150096
-  %and1b = lshr i64 %a, 4
-  %shr1 = and i64 %and1b, 1085102592571150095
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 8
-  %shl2 = and i64 %and2, -71777214294589696
-  %and2b = lshr i64 %or1, 8
-  %shr2 = and i64 %and2b, 71777214294589695
-  %or2 = or i64 %shl2, %shr2
-  ret i64 %or2
-}
-
-define signext i32 @grev14_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev14_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 4
-; RV64I-NEXT:    lui a2, 986895
-; RV64I-NEXT:    addiw a2, a2, 240
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 8
-; RV64I-NEXT:    lui a2, 1044496
-; RV64I-NEXT:    addiw a2, a2, -256
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 8
-; RV64I-NEXT:    lui a2, 4080
-; RV64I-NEXT:    addiw a2, a2, 255
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev14_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 14
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 2
-  %shl1 = and i32 %and1, -858993460
-  %and1b = lshr i32 %a, 2
-  %shr1 = and i32 %and1b, 858993459
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 4
-  %shl2 = and i32 %and2, -252645136
-  %and2b = lshr i32 %or1, 4
-  %shr2 = and i32 %and2b, 252645135
-  %or2 = or i32 %shl2, %shr2
-  %and3 = shl i32 %or2, 8
-  %shl3 = and i32 %and3, -16711936
-  %and3b = lshr i32 %or2, 8
-  %shr3 = and i32 %and3b, 16711935
-  %or3 = or i32 %shl3, %shr3
-  ret i32 %or3
-}
-
-define i64 @grev14_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev14_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI51_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI51_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI51_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI51_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI51_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI51_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI51_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI51_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI51_4)
-; RV64I-NEXT:    ld a1, %lo(.LCPI51_4)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI51_5)
-; RV64I-NEXT:    ld a2, %lo(.LCPI51_5)(a2)
-; RV64I-NEXT:    slli a3, a0, 8
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 8
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev14_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev2.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 2
-  %shl1 = and i64 %and1, -3689348814741910324
-  %and1b = lshr i64 %a, 2
-  %shr1 = and i64 %and1b, 3689348814741910323
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 4
-  %shl2 = and i64 %and2, -1085102592571150096
-  %and2b = lshr i64 %or1, 4
-  %shr2 = and i64 %and2b, 1085102592571150095
-  %or2 = or i64 %shl2, %shr2
-  %and3 = shl i64 %or2, 8
-  %shl3 = and i64 %and3, -71777214294589696
-  %and3b = lshr i64 %or2, 8
-  %shr3 = and i64 %and3b, 71777214294589695
-  %or3 = or i64 %shl3, %shr3
-  ret i64 %or3
-}
-
-define signext i32 @grev16_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev16_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srliw a1, a0, 16
-; RV64I-NEXT:    slliw a0, a0, 16
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev16_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    roriw a0, a0, 16
-; RV64ZBP-NEXT:    ret
-  %shl = shl i32 %a, 16
-  %shr = lshr i32 %a, 16
-  %or = or i32 %shl, %shr
-  ret i32 %or
-}
-
-declare i32 @llvm.fshl.i32(i32, i32, i32)
-declare i32 @llvm.fshr.i32(i32, i32, i32)
-
-define signext i32 @grev16_i32_fshl(i32 signext %a) nounwind {
-; RV64I-LABEL: grev16_i32_fshl:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srliw a1, a0, 16
-; RV64I-NEXT:    slliw a0, a0, 16
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev16_i32_fshl:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    roriw a0, a0, 16
-; RV64ZBP-NEXT:    ret
-  %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 16)
-  ret i32 %or
-}
-
-define signext i32 @grev16_i32_fshr(i32 signext %a) nounwind {
-; RV64I-LABEL: grev16_i32_fshr:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 16
-; RV64I-NEXT:    srliw a0, a0, 16
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev16_i32_fshr:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    roriw a0, a0, 16
-; RV64ZBP-NEXT:    ret
-  %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 16)
-  ret i32 %or
-}
-
-define i64 @grev16_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev16_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 16
-; RV64I-NEXT:    lui a2, 983041
-; RV64I-NEXT:    slli a3, a2, 4
-; RV64I-NEXT:    addi a3, a3, -1
-; RV64I-NEXT:    slli a3, a3, 16
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srli a0, a0, 16
-; RV64I-NEXT:    slli a2, a2, 20
-; RV64I-NEXT:    addi a2, a2, -1
-; RV64I-NEXT:    srli a2, a2, 16
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev16_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev16.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = shl i64 %a, 16
-  %shl = and i64 %and, -281470681808896
-  %and1 = lshr i64 %a, 16
-  %shr = and i64 %and1, 281470681808895
-  %or = or i64 %shl, %shr
-  ret i64 %or
-}
-
-define i64 @grev32(i64 %a) nounwind {
-; RV64I-LABEL: grev32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 32
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rori a0, a0, 32
-; RV64ZBP-NEXT:    ret
-  %shl = shl i64 %a, 32
-  %shr = lshr i64 %a, 32
-  %or = or i64 %shl, %shr
-  ret i64 %or
-}
-
-define signext i32 @grev3b_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev3b_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a2, 838861
-; RV64I-NEXT:    addiw a2, a2, -820
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev3b_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 3
-; RV64ZBP-NEXT:    ret
-  %and2 = shl i32 %a, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %a, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shl2, %shr2
-  %and1 = shl i32 %or2, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %or2, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  ret i32 %or1
-}
-
-define i64 @grev3b_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev3b_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI58_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI58_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI58_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI58_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI58_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI58_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI58_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI58_3)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev3b_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %and2 = shl i64 %a, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %a, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shl2, %shr2
-  %and1 = shl i64 %or2, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %or2, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-  ret i64 %or1
-}
-
-; grev1, grev2, grev1 -> grev2
-define signext i32 @grev2b_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev2b_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    lui a3, 349525
-; RV64I-NEXT:    addiw a3, a3, 1365
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a4, 838861
-; RV64I-NEXT:    addiw a4, a4, -820
-; RV64I-NEXT:    and a1, a1, a4
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    lui a4, 209715
-; RV64I-NEXT:    addiw a4, a4, 819
-; RV64I-NEXT:    and a0, a0, a4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev2b_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 2
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shl2, %shr2
-  %and3 = shl i32 %or2, 1
-  %shl3 = and i32 %and3, -1431655766
-  %and3b = lshr i32 %or2, 1
-  %shr3 = and i32 %and3b, 1431655765
-  %or3 = or i32 %shl3, %shr3
-  ret i32 %or3
-}
-
-; grev1, grev2, grev1 -> grev2
-define i64 @grev2b_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev2b_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI60_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI60_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI60_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI60_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a3, a3, a1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    lui a3, %hi(.LCPI60_2)
-; RV64I-NEXT:    ld a3, %lo(.LCPI60_2)(a3)
-; RV64I-NEXT:    lui a4, %hi(.LCPI60_3)
-; RV64I-NEXT:    ld a4, %lo(.LCPI60_3)(a4)
-; RV64I-NEXT:    slli a5, a0, 2
-; RV64I-NEXT:    and a3, a5, a3
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a4
-; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev2b_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev2.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shl2, %shr2
-  %and3 = shl i64 %or2, 1
-  %shl3 = and i64 %and3, -6148914691236517206
-  %and3b = lshr i64 %or2, 1
-  %shr3 = and i64 %and3b, 6148914691236517205
-  %or3 = or i64 %shl3, %shr3
-  ret i64 %or3
-}
-
-; grev1, grev2, grev1, grev2 -> identity
-define signext i32 @grev0_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: grev0_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    lui a2, 699051
-; RV64I-NEXT:    addiw a2, a2, -1366
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    lui a3, 349525
-; RV64I-NEXT:    addiw a3, a3, 1365
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    lui a4, 838861
-; RV64I-NEXT:    addiw a4, a4, -820
-; RV64I-NEXT:    and a1, a1, a4
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    lui a5, 209715
-; RV64I-NEXT:    addiw a5, a5, 819
-; RV64I-NEXT:    and a0, a0, a5
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 1
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slliw a1, a0, 2
-; RV64I-NEXT:    and a1, a1, a4
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a5
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev0_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i32 %a, 1
-  %shl1 = and i32 %and1, -1431655766
-  %and1b = lshr i32 %a, 1
-  %shr1 = and i32 %and1b, 1431655765
-  %or1 = or i32 %shl1, %shr1
-  %and2 = shl i32 %or1, 2
-  %shl2 = and i32 %and2, -858993460
-  %and2b = lshr i32 %or1, 2
-  %shr2 = and i32 %and2b, 858993459
-  %or2 = or i32 %shl2, %shr2
-  %and3 = shl i32 %or2, 1
-  %shl3 = and i32 %and3, -1431655766
-  %and3b = lshr i32 %or2, 1
-  %shr3 = and i32 %and3b, 1431655765
-  %or3 = or i32 %shl3, %shr3
-  %and4 = shl i32 %or3, 2
-  %shl4 = and i32 %and4, -858993460
-  %and4b = lshr i32 %or3, 2
-  %shr4 = and i32 %and4b, 858993459
-  %or4 = or i32 %shl4, %shr4
-  ret i32 %or4
-}
-
-; grev1, grev2, grev1, grev2 -> identity
-define i64 @grev0_i64(i64 %a) nounwind {
-; RV64I-LABEL: grev0_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI62_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI62_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI62_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI62_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a3, a3, a1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    lui a3, %hi(.LCPI62_2)
-; RV64I-NEXT:    ld a3, %lo(.LCPI62_2)(a3)
-; RV64I-NEXT:    lui a4, %hi(.LCPI62_3)
-; RV64I-NEXT:    ld a4, %lo(.LCPI62_3)(a4)
-; RV64I-NEXT:    slli a5, a0, 2
-; RV64I-NEXT:    and a5, a5, a3
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a4
-; RV64I-NEXT:    or a0, a5, a0
-; RV64I-NEXT:    slli a5, a0, 1
-; RV64I-NEXT:    and a1, a5, a1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    slli a1, a0, 2
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev0_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    ret
-  %and1 = shl i64 %a, 1
-  %shl1 = and i64 %and1, -6148914691236517206
-  %and1b = lshr i64 %a, 1
-  %shr1 = and i64 %and1b, 6148914691236517205
-  %or1 = or i64 %shl1, %shr1
-  %and2 = shl i64 %or1, 2
-  %shl2 = and i64 %and2, -3689348814741910324
-  %and2b = lshr i64 %or1, 2
-  %shr2 = and i64 %and2b, 3689348814741910323
-  %or2 = or i64 %shl2, %shr2
-  %and3 = shl i64 %or2, 1
-  %shl3 = and i64 %and3, -6148914691236517206
-  %and3b = lshr i64 %or2, 1
-  %shr3 = and i64 %and3b, 6148914691236517205
-  %or3 = or i64 %shl3, %shr3
-  %and4 = shl i64 %or3, 2
-  %shl4 = and i64 %and4, -3689348814741910324
-  %and4b = lshr i64 %or3, 2
-  %shr4 = and i64 %and4b, 3689348814741910323
-  %or4 = or i64 %shl4, %shr4
-  ret i64 %or4
-}
-
-declare i64 @llvm.fshl.i64(i64, i64, i64)
-declare i64 @llvm.fshr.i64(i64, i64, i64)
-
-define i64 @grev32_fshl(i64 %a) nounwind {
-; RV64I-LABEL: grev32_fshl:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 32
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev32_fshl:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rori a0, a0, 32
-; RV64ZBP-NEXT:    ret
-  %or = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 32)
-  ret i64 %or
-}
-
-define i64 @grev32_fshr(i64 %a) nounwind {
-; RV64I-LABEL: grev32_fshr:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: grev32_fshr:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rori a0, a0, 32
-; RV64ZBP-NEXT:    ret
-  %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 32)
-  ret i64 %or
-}
-
-declare i16 @llvm.bswap.i16(i16)
-
-define zeroext i16 @bswap_i16(i16 zeroext %a) nounwind {
-; RV64I-LABEL: bswap_i16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 8
-; RV64I-NEXT:    slli a0, a0, 8
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    slli a0, a0, 48
-; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bswap_i16:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %1 = tail call i16 @llvm.bswap.i16(i16 %a)
-  ret i16 %1
-}
-
-declare i32 @llvm.bswap.i32(i32)
-
-define signext i32 @bswap_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: bswap_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 8
-; RV64I-NEXT:    lui a2, 16
-; RV64I-NEXT:    addiw a2, a2, -256
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srliw a2, a0, 24
-; RV64I-NEXT:    or a1, a1, a2
-; RV64I-NEXT:    slli a2, a0, 8
-; RV64I-NEXT:    lui a3, 4080
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    slliw a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bswap_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 24
-; RV64ZBP-NEXT:    ret
-  %1 = tail call i32 @llvm.bswap.i32(i32 %a)
-  ret i32 %1
-}
-
-; Similar to bswap_i32 but the result is not sign extended.
-define void @bswap_i32_nosext(i32 signext %a, i32* %x) nounwind {
-; RV64I-LABEL: bswap_i32_nosext:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a2, a0, 8
-; RV64I-NEXT:    lui a3, 16
-; RV64I-NEXT:    addiw a3, a3, -256
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    srliw a3, a0, 24
-; RV64I-NEXT:    or a2, a2, a3
-; RV64I-NEXT:    slli a3, a0, 8
-; RV64I-NEXT:    lui a4, 4080
-; RV64I-NEXT:    and a3, a3, a4
-; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    sw a0, 0(a1)
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bswap_i32_nosext:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 24
-; RV64ZBP-NEXT:    sw a0, 0(a1)
-; RV64ZBP-NEXT:    ret
-  %1 = tail call i32 @llvm.bswap.i32(i32 %a)
-  store i32 %1, i32* %x
-  ret void
-}
-
-declare i64 @llvm.bswap.i64(i64)
-
-define i64 @bswap_i64(i64 %a) {
-; RV64I-LABEL: bswap_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 24
-; RV64I-NEXT:    li a2, 255
-; RV64I-NEXT:    slli a3, a2, 40
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srliw a3, a0, 24
-; RV64I-NEXT:    slli a3, a3, 32
-; RV64I-NEXT:    or a1, a1, a3
-; RV64I-NEXT:    slli a3, a0, 40
-; RV64I-NEXT:    slli a2, a2, 48
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    slli a3, a0, 56
-; RV64I-NEXT:    or a2, a3, a2
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    srli a2, a0, 40
-; RV64I-NEXT:    lui a3, 16
-; RV64I-NEXT:    addiw a3, a3, -256
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    srli a3, a0, 56
-; RV64I-NEXT:    or a2, a2, a3
-; RV64I-NEXT:    srli a3, a0, 24
-; RV64I-NEXT:    lui a4, 4080
-; RV64I-NEXT:    and a3, a3, a4
-; RV64I-NEXT:    srli a0, a0, 8
-; RV64I-NEXT:    srliw a0, a0, 24
-; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bswap_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8 a0, a0
-; RV64ZBP-NEXT:    ret
-  %1 = call i64 @llvm.bswap.i64(i64 %a)
-  ret i64 %1
-}
-
-declare i8 @llvm.bitreverse.i8(i8)
-
-define zeroext i8 @bitreverse_i8(i8 zeroext %a) nounwind {
-; RV64I-LABEL: bitreverse_i8:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    andi a0, a0, 15
-; RV64I-NEXT:    slli a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    andi a1, a0, 51
-; RV64I-NEXT:    slli a1, a1, 2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    andi a0, a0, 51
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    andi a1, a0, 85
-; RV64I-NEXT:    slli a1, a1, 1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    andi a0, a0, 85
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bitreverse_i8:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %1 = tail call i8 @llvm.bitreverse.i8(i8 %a)
-  ret i8 %1
-}
-
-declare i16 @llvm.bitreverse.i16(i16)
-
-define zeroext i16 @bitreverse_i16(i16 zeroext %a) nounwind {
-; RV64I-LABEL: bitreverse_i16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 8
-; RV64I-NEXT:    slli a0, a0, 8
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    lui a2, 1
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    lui a2, 3
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 5
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bitreverse_i16:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %1 = tail call i16 @llvm.bitreverse.i16(i16 %a)
-  ret i16 %1
-}
-
-declare i32 @llvm.bitreverse.i32(i32)
-
-define signext i32 @bitreverse_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: bitreverse_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 8
-; RV64I-NEXT:    lui a2, 16
-; RV64I-NEXT:    addiw a2, a2, -256
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srliw a2, a0, 24
-; RV64I-NEXT:    or a1, a1, a2
-; RV64I-NEXT:    slli a2, a0, 8
-; RV64I-NEXT:    lui a3, 4080
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    slliw a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bitreverse_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 31
-; RV64ZBP-NEXT:    ret
-  %1 = tail call i32 @llvm.bitreverse.i32(i32 %a)
-  ret i32 %1
-}
-
-; Similar to bitreverse_i32 but the result is not sign extended.
-define void @bitreverse_i32_nosext(i32 signext %a, i32* %x) nounwind {
-; RV64I-LABEL: bitreverse_i32_nosext:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a2, a0, 8
-; RV64I-NEXT:    lui a3, 16
-; RV64I-NEXT:    addiw a3, a3, -256
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    srliw a3, a0, 24
-; RV64I-NEXT:    or a2, a2, a3
-; RV64I-NEXT:    slli a3, a0, 8
-; RV64I-NEXT:    lui a4, 4080
-; RV64I-NEXT:    and a3, a3, a4
-; RV64I-NEXT:    slliw a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    lui a3, 61681
-; RV64I-NEXT:    addiw a3, a3, -241
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    slliw a0, a0, 4
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    srli a2, a0, 2
-; RV64I-NEXT:    lui a3, 209715
-; RV64I-NEXT:    addiw a3, a3, 819
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    slliw a0, a0, 2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    srli a2, a0, 1
-; RV64I-NEXT:    lui a3, 349525
-; RV64I-NEXT:    addiw a3, a3, 1365
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    sw a0, 0(a1)
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bitreverse_i32_nosext:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 31
-; RV64ZBP-NEXT:    sw a0, 0(a1)
-; RV64ZBP-NEXT:    ret
-  %1 = tail call i32 @llvm.bitreverse.i32(i32 %a)
-  store i32 %1, i32* %x
-  ret void
-}
-
-declare i64 @llvm.bitreverse.i64(i64)
-
-define i64 @bitreverse_i64(i64 %a) nounwind {
-; RV64I-LABEL: bitreverse_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 24
-; RV64I-NEXT:    li a2, 255
-; RV64I-NEXT:    slli a3, a2, 40
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srliw a3, a0, 24
-; RV64I-NEXT:    slli a3, a3, 32
-; RV64I-NEXT:    or a1, a1, a3
-; RV64I-NEXT:    slli a3, a0, 40
-; RV64I-NEXT:    slli a2, a2, 48
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    slli a3, a0, 56
-; RV64I-NEXT:    or a2, a3, a2
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    srli a2, a0, 40
-; RV64I-NEXT:    lui a3, 16
-; RV64I-NEXT:    addiw a3, a3, -256
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    srli a3, a0, 56
-; RV64I-NEXT:    or a2, a2, a3
-; RV64I-NEXT:    srli a3, a0, 24
-; RV64I-NEXT:    lui a4, 4080
-; RV64I-NEXT:    and a3, a3, a4
-; RV64I-NEXT:    srli a0, a0, 8
-; RV64I-NEXT:    srliw a0, a0, 24
-; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    lui a3, %hi(.LCPI73_0)
-; RV64I-NEXT:    ld a3, %lo(.LCPI73_0)(a3)
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    lui a2, %hi(.LCPI73_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI73_1)(a2)
-; RV64I-NEXT:    slli a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI73_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI73_2)(a2)
-; RV64I-NEXT:    slli a0, a0, 2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bitreverse_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev a0, a0
-; RV64ZBP-NEXT:    ret
-  %1 = call i64 @llvm.bitreverse.i64(i64 %a)
-  ret i64 %1
-}
-
-define i32 @bswap_rotr_i32(i32 %a) {
-; RV64I-LABEL: bswap_rotr_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 8
-; RV64I-NEXT:    lui a2, 4080
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    slli a2, a0, 24
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    srliw a2, a0, 24
-; RV64I-NEXT:    srli a0, a0, 16
-; RV64I-NEXT:    slli a0, a0, 8
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 16
-; RV64I-NEXT:    srliw a1, a1, 16
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bswap_rotr_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 8
-; RV64ZBP-NEXT:    ret
-  %1 = call i32 @llvm.bswap.i32(i32 %a)
-  %2 = call i32 @llvm.fshr.i32(i32 %1, i32 %1, i32 16)
-  ret i32 %2
-}
-
-define i32 @bswap_rotl_i32(i32 %a) {
-; RV64I-LABEL: bswap_rotl_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srliw a1, a0, 24
-; RV64I-NEXT:    srli a2, a0, 16
-; RV64I-NEXT:    slli a2, a2, 8
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    slli a2, a0, 8
-; RV64I-NEXT:    lui a3, 4080
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    srliw a0, a0, 16
-; RV64I-NEXT:    slliw a1, a1, 16
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bswap_rotl_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    greviw a0, a0, 8
-; RV64ZBP-NEXT:    ret
-  %1 = call i32 @llvm.bswap.i32(i32 %a)
-  %2 = call i32 @llvm.fshl.i32(i32 %1, i32 %1, i32 16)
-  ret i32 %2
-}
-
-define i64 @bswap_rotr_i64(i64 %a) {
-; RV64I-LABEL: bswap_rotr_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 24
-; RV64I-NEXT:    li a2, 255
-; RV64I-NEXT:    slli a3, a2, 40
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srliw a3, a0, 24
-; RV64I-NEXT:    slli a3, a3, 32
-; RV64I-NEXT:    or a1, a1, a3
-; RV64I-NEXT:    slli a3, a0, 40
-; RV64I-NEXT:    slli a2, a2, 48
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    slli a3, a0, 56
-; RV64I-NEXT:    or a2, a3, a2
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    srli a2, a0, 40
-; RV64I-NEXT:    lui a3, 16
-; RV64I-NEXT:    addiw a3, a3, -256
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    srli a3, a0, 56
-; RV64I-NEXT:    or a2, a2, a3
-; RV64I-NEXT:    srli a3, a0, 24
-; RV64I-NEXT:    lui a4, 4080
-; RV64I-NEXT:    and a3, a3, a4
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bswap_rotr_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %1 = call i64 @llvm.bswap.i64(i64 %a)
-  %2 = call i64 @llvm.fshr.i64(i64 %1, i64 %1, i64 32)
-  ret i64 %2
-}
-
-define i64 @bswap_rotl_i64(i64 %a) {
-; RV64I-LABEL: bswap_rotl_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 40
-; RV64I-NEXT:    lui a2, 16
-; RV64I-NEXT:    addiw a2, a2, -256
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 56
-; RV64I-NEXT:    or a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 24
-; RV64I-NEXT:    lui a3, 4080
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    srli a3, a0, 32
-; RV64I-NEXT:    slli a3, a3, 24
-; RV64I-NEXT:    or a2, a3, a2
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    slli a2, a0, 24
-; RV64I-NEXT:    li a3, 255
-; RV64I-NEXT:    slli a4, a3, 40
-; RV64I-NEXT:    and a2, a2, a4
-; RV64I-NEXT:    srliw a4, a0, 24
-; RV64I-NEXT:    slli a4, a4, 32
-; RV64I-NEXT:    or a2, a2, a4
-; RV64I-NEXT:    slli a4, a0, 40
-; RV64I-NEXT:    slli a3, a3, 48
-; RV64I-NEXT:    and a3, a4, a3
-; RV64I-NEXT:    slli a0, a0, 56
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bswap_rotl_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev8.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %1 = call i64 @llvm.bswap.i64(i64 %a)
-  %2 = call i64 @llvm.fshl.i64(i64 %1, i64 %1, i64 32)
-  ret i64 %2
-}
-
-define i32 @bitreverse_bswap_i32(i32 %a) {
-; RV64I-LABEL: bitreverse_bswap_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bitreverse_bswap_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %1 = call i32 @llvm.bitreverse.i32(i32 %a)
-  %2 = call i32 @llvm.bswap.i32(i32 %1)
-  ret i32 %2
-}
-
-define i64 @bitreverse_bswap_i64(i64 %a) {
-; RV64I-LABEL: bitreverse_bswap_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI79_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI79_0)(a1)
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    and a2, a2, a1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI79_1)
-; RV64I-NEXT:    ld a1, %lo(.LCPI79_1)(a1)
-; RV64I-NEXT:    slli a0, a0, 4
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    srli a2, a0, 2
-; RV64I-NEXT:    and a2, a2, a1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI79_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI79_2)(a1)
-; RV64I-NEXT:    slli a0, a0, 2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    srli a2, a0, 1
-; RV64I-NEXT:    and a2, a2, a1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: bitreverse_bswap_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    rev.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %1 = call i64 @llvm.bitreverse.i64(i64 %a)
-  %2 = call i64 @llvm.bswap.i64(i64 %1)
-  ret i64 %2
-}
-
-define signext i32 @shfl1_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: shfl1_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 629146
-; RV64I-NEXT:    addiw a1, a1, -1639
-; RV64I-NEXT:    and a1, a0, a1
-; RV64I-NEXT:    slli a2, a0, 1
-; RV64I-NEXT:    lui a3, 279620
-; RV64I-NEXT:    addiw a3, a3, 1092
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    lui a2, 139810
-; RV64I-NEXT:    addiw a2, a2, 546
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: shfl1_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = and i32 %a, -1717986919
-  %shl = shl i32 %a, 1
-  %and1 = and i32 %shl, 1145324612
-  %or = or i32 %and1, %and
-  %shr = lshr i32 %a, 1
-  %and2 = and i32 %shr, 572662306
-  %or3 = or i32 %or, %and2
-  ret i32 %or3
-}
-
-define i64 @shfl1_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: shfl1_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI81_1)
-; RV64I-NEXT:    ld a1, %lo(.LCPI81_1)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI81_0)
-; RV64I-NEXT:    ld a2, %lo(.LCPI81_0)(a2)
-; RV64I-NEXT:    slli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    lui a3, %hi(.LCPI81_2)
-; RV64I-NEXT:    ld a3, %lo(.LCPI81_2)(a3)
-; RV64I-NEXT:    and a2, a0, a2
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: shfl1_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip.n a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = and i64 %a, -7378697629483820647
-  %shl = shl i64 %a, 1
-  %and1 = and i64 %shl, 4919131752989213764
-  %or = or i64 %and, %and1
-  %shr = lshr i64 %a, 1
-  %and2 = and i64 %shr, 2459565876494606882
-  %or3 = or i64 %or, %and2
-  ret i64 %or3
-}
-
-define signext i32 @shfl2_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: shfl2_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 801852
-; RV64I-NEXT:    addiw a1, a1, 963
-; RV64I-NEXT:    and a1, a0, a1
-; RV64I-NEXT:    slli a2, a0, 2
-; RV64I-NEXT:    lui a3, 197379
-; RV64I-NEXT:    addiw a3, a3, 48
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    lui a2, 49345
-; RV64I-NEXT:    addiw a2, a2, -1012
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: shfl2_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip2.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = and i32 %a, -1010580541
-  %shl = shl i32 %a, 2
-  %and1 = and i32 %shl, 808464432
-  %or = or i32 %and1, %and
-  %shr = lshr i32 %a, 2
-  %and2 = and i32 %shr, 202116108
-  %or3 = or i32 %and2, %or
-  ret i32 %or3
-}
-
-define i64 @shfl2_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: shfl2_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI83_1)
-; RV64I-NEXT:    ld a1, %lo(.LCPI83_1)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI83_0)
-; RV64I-NEXT:    ld a2, %lo(.LCPI83_0)(a2)
-; RV64I-NEXT:    slli a3, a0, 2
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    lui a3, %hi(.LCPI83_2)
-; RV64I-NEXT:    ld a3, %lo(.LCPI83_2)(a3)
-; RV64I-NEXT:    and a2, a0, a2
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: shfl2_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip2.b a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = and i64 %a, -4340410370284600381
-  %shl = shl i64 %a, 2
-  %and1 = and i64 %shl, 3472328296227680304
-  %or = or i64 %and, %and1
-  %shr = lshr i64 %a, 2
-  %and2 = and i64 %shr, 868082074056920076
-  %or3 = or i64 %and2, %or
-  ret i64 %or3
-}
-
-define signext i32 @shfl4_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: shfl4_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 983295
-; RV64I-NEXT:    addiw a1, a1, 15
-; RV64I-NEXT:    and a1, a0, a1
-; RV64I-NEXT:    slli a2, a0, 4
-; RV64I-NEXT:    lui a3, 61441
-; RV64I-NEXT:    addiw a3, a3, -256
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    lui a3, 3840
-; RV64I-NEXT:    addiw a3, a3, 240
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: shfl4_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip4.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = and i32 %a, -267390961
-  %shl = shl i32 %a, 4
-  %and1 = and i32 %shl, 251662080
-  %shr = lshr i32 %a, 4
-  %and2 = and i32 %shr, 15728880
-  %or = or i32 %and2, %and
-  %or3 = or i32 %or, %and1
-  ret i32 %or3
-}
-
-define i64 @shfl4_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: shfl4_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI85_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI85_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI85_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI85_1)(a2)
-; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    lui a4, %hi(.LCPI85_2)
-; RV64I-NEXT:    ld a4, %lo(.LCPI85_2)(a4)
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    and a1, a0, a1
-; RV64I-NEXT:    srli a0, a0, 4
-; RV64I-NEXT:    and a0, a0, a4
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: shfl4_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip4.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = and i64 %a, -1148435428713435121
-  %shl = shl i64 %a, 4
-  %and1 = and i64 %shl, 1080880403494997760
-  %shr = lshr i64 %a, 4
-  %and2 = and i64 %shr, 67555025218437360
-  %or = or i64 %and1, %and2
-  %or3 = or i64 %or, %and
-  ret i64 %or3
-}
-
-define signext i32 @shfl8_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: shfl8_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 1044480
-; RV64I-NEXT:    addiw a1, a1, 255
-; RV64I-NEXT:    and a1, a0, a1
-; RV64I-NEXT:    slli a2, a0, 8
-; RV64I-NEXT:    lui a3, 4080
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    srli a0, a0, 8
-; RV64I-NEXT:    lui a3, 16
-; RV64I-NEXT:    addiw a3, a3, -256
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: shfl8_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip8.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = and i32 %a, -16776961
-  %shl = shl i32 %a, 8
-  %and1 = and i32 %shl, 16711680
-  %shr = lshr i32 %a, 8
-  %and2 = and i32 %shr, 65280
-  %or = or i32 %and, %and2
-  %or3 = or i32 %or, %and1
-  ret i32 %or3
-}
-
-define i64 @shfl8_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: shfl8_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 983041
-; RV64I-NEXT:    slli a1, a1, 4
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    slli a1, a1, 24
-; RV64I-NEXT:    addi a1, a1, 255
-; RV64I-NEXT:    and a1, a0, a1
-; RV64I-NEXT:    slli a2, a0, 8
-; RV64I-NEXT:    li a3, 255
-; RV64I-NEXT:    slli a3, a3, 32
-; RV64I-NEXT:    addi a3, a3, 255
-; RV64I-NEXT:    slli a4, a3, 16
-; RV64I-NEXT:    and a2, a2, a4
-; RV64I-NEXT:    srli a0, a0, 8
-; RV64I-NEXT:    slli a3, a3, 8
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: shfl8_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip8.w a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = and i64 %a, -72056494543077121
-  %shl = shl i64 %a, 8
-  %and1 = and i64 %shl, 71776119077928960
-  %shr = lshr i64 %a, 8
-  %and2 = and i64 %shr, 280375465148160
-  %or = or i64 %and2, %and
-  %or3 = or i64 %and1, %or
-  ret i64 %or3
-}
-
-define i64 @shfl16(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: shfl16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    addi a1, a1, 1
-; RV64I-NEXT:    slli a1, a1, 16
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    and a1, a0, a1
-; RV64I-NEXT:    srliw a2, a0, 16
-; RV64I-NEXT:    slli a2, a2, 32
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    srli a0, a0, 16
-; RV64I-NEXT:    srliw a0, a0, 16
-; RV64I-NEXT:    slli a0, a0, 16
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: shfl16:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zip16 a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = and i64 %a, -281474976645121
-  %shl = shl i64 %a, 16
-  %and1 = and i64 %shl, 281470681743360
-  %or = or i64 %and1, %and
-  %shr = lshr i64 %a, 16
-  %and2 = and i64 %shr, 4294901760
-  %or3 = or i64 %or, %and2
-  ret i64 %or3
-}
-
-define signext i32 @packu_i32(i32 signext %a, i32 signext %b) nounwind {
-; RV64I-LABEL: packu_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srliw a0, a0, 16
-; RV64I-NEXT:    lui a2, 1048560
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: packu_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    packuw a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %shr = lshr i32 %a, 16
-  %shr1 = and i32 %b, -65536
-  %or = or i32 %shr1, %shr
-  ret i32 %or
-}
-
-define i64 @packu_i64(i64 %a, i64 %b) nounwind {
-; RV64I-LABEL: packu_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: packu_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    packu a0, a0, a1
-; RV64ZBP-NEXT:    ret
-  %shr = lshr i64 %a, 32
-  %shr1 = and i64 %b, -4294967296
-  %or = or i64 %shr1, %shr
-  ret i64 %or
-}
-
-define i32 @zexth_i32(i32 %a) nounwind {
-; RV64I-LABEL: zexth_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 48
-; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: zexth_i32:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zext.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = and i32 %a, 65535
-  ret i32 %and
-}
-
-define i64 @zexth_i64(i64 %a) nounwind {
-; RV64I-LABEL: zexth_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 48
-; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    ret
-;
-; RV64ZBP-LABEL: zexth_i64:
-; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    zext.h a0, a0
-; RV64ZBP-NEXT:    ret
-  %and = and i64 %a, 65535
-  ret i64 %and
-}

diff  --git a/llvm/test/MC/RISCV/attribute-arch-invalid.s b/llvm/test/MC/RISCV/attribute-arch-invalid.s
index 75ab25733270..f6ad5ff77966 100644
--- a/llvm/test/MC/RISCV/attribute-arch-invalid.s
+++ b/llvm/test/MC/RISCV/attribute-arch-invalid.s
@@ -14,14 +14,8 @@
 .attribute arch, "rv32izbm"
 # CHECK: error: invalid arch name 'rv32izbm', experimental extension requires explicit version number `zbm`
 
-.attribute arch, "rv32izbp"
-# CHECK: error: invalid arch name 'rv32izbp', experimental extension requires explicit version number `zbp`
-
 .attribute arch, "rv32izbr"
 # CHECK: error: invalid arch name 'rv32izbr', experimental extension requires explicit version number `zbr`
 
-.attribute arch, "rv32izbt"
-# CHECK: error: invalid arch name 'rv32izbt', experimental extension requires explicit version number `zbt`
-
 .attribute arch, "rv32izvfh"
 # CHECK: error: invalid arch name 'rv32izvfh', experimental extension requires explicit version number `zvfh`

diff  --git a/llvm/test/MC/RISCV/attribute-arch.s b/llvm/test/MC/RISCV/attribute-arch.s
index f6d0d7c66aec..ade362c1192d 100644
--- a/llvm/test/MC/RISCV/attribute-arch.s
+++ b/llvm/test/MC/RISCV/attribute-arch.s
@@ -116,9 +116,6 @@
 .attribute arch, "rv32izbm0p93"
 # CHECK: attribute      5, "rv32i2p0_zbm0p93"
 
-.attribute arch, "rv32izbp0p93"
-# CHECK: attribute      5, "rv32i2p0_zbp0p93"
-
 .attribute arch, "rv32izbr0p93"
 # CHECK: attribute      5, "rv32i2p0_zbr0p93"
 

diff  --git a/llvm/test/MC/RISCV/rv32zbb-invalid.s b/llvm/test/MC/RISCV/rv32zbb-invalid.s
index d4611d6dda44..c23593a106b0 100644
--- a/llvm/test/MC/RISCV/rv32zbb-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zbb-invalid.s
@@ -21,3 +21,22 @@ maxu t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
 clzw t0, t1 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
 ctzw t0, t1 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
 cpopw t0, t1 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
+# Too few operands
+andn t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+orn t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+xnor t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+rol t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+ror t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+rori t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Immediate operand out of range
+rori t0, t1, 32 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
+rori t0, t1, -1 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
+rolw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
+rorw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
+roriw t0, t1, 31 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
+roriw t0, t1, 0 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}

diff  --git a/llvm/test/MC/RISCV/rv32zbbp-only-valid.s b/llvm/test/MC/RISCV/rv32zbb-only-valid.s
similarity index 59%
rename from llvm/test/MC/RISCV/rv32zbbp-only-valid.s
rename to llvm/test/MC/RISCV/rv32zbb-only-valid.s
index 4453d530ca63..13afa20cde4a 100644
--- a/llvm/test/MC/RISCV/rv32zbbp-only-valid.s
+++ b/llvm/test/MC/RISCV/rv32zbb-only-valid.s
@@ -5,13 +5,6 @@
 # RUN:     | llvm-objdump --mattr=+zbb -M no-aliases -d -r - \
 # RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
 
-# With Bitmanip permutation extension:
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zbp -show-encoding \
-# RUN:     | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zbp < %s \
-# RUN:     | llvm-objdump --mattr=+experimental-zbp -d -r - \
-# RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-
 # CHECK-ASM-AND-OBJ: zext.h t0, t1
 # CHECK-ASM: encoding: [0xb3,0x42,0x03,0x08]
 zext.h t0, t1

diff  --git a/llvm/test/MC/RISCV/rv32zbb-valid.s b/llvm/test/MC/RISCV/rv32zbb-valid.s
index 1fb72620d6a5..b9eef05c7e43 100644
--- a/llvm/test/MC/RISCV/rv32zbb-valid.s
+++ b/llvm/test/MC/RISCV/rv32zbb-valid.s
@@ -37,3 +37,28 @@ max t0, t1, t2
 # CHECK-ASM-AND-OBJ: maxu t0, t1, t2
 # CHECK-ASM: encoding: [0xb3,0x72,0x73,0x0a]
 maxu t0, t1, t2
+
+# CHECK-ASM-AND-OBJ: andn t0, t1, t2
+# CHECK-ASM: encoding: [0xb3,0x72,0x73,0x40]
+andn t0, t1, t2
+# CHECK-ASM-AND-OBJ: orn t0, t1, t2
+# CHECK-ASM: encoding: [0xb3,0x62,0x73,0x40]
+orn t0, t1, t2
+# CHECK-ASM-AND-OBJ: xnor t0, t1, t2
+# CHECK-ASM: encoding: [0xb3,0x42,0x73,0x40]
+xnor t0, t1, t2
+# CHECK-ASM-AND-OBJ: rol t0, t1, t2
+# CHECK-ASM: encoding: [0xb3,0x12,0x73,0x60]
+rol t0, t1, t2
+# CHECK-ASM-AND-OBJ: ror t0, t1, t2
+# CHECK-ASM: encoding: [0xb3,0x52,0x73,0x60]
+ror t0, t1, t2
+# CHECK-ASM-AND-OBJ: rori t0, t1, 31
+# CHECK-ASM: encoding: [0x93,0x52,0xf3,0x61]
+rori t0, t1, 31
+# CHECK-ASM-AND-OBJ: rori t0, t1, 0
+# CHECK-ASM: encoding: [0x93,0x52,0x03,0x60]
+rori t0, t1, 0
+# CHECK-ASM-AND-OBJ: orc.b t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x73,0x28]
+orc.b t0, t1

diff  --git a/llvm/test/MC/RISCV/rv32zbbp-invalid.s b/llvm/test/MC/RISCV/rv32zbbp-invalid.s
deleted file mode 100644
index 4493054c41f6..000000000000
--- a/llvm/test/MC/RISCV/rv32zbbp-invalid.s
+++ /dev/null
@@ -1,21 +0,0 @@
-# RUN: not llvm-mc -triple riscv32 -mattr=+zbb,+experimental-zbp < %s 2>&1 | FileCheck %s
-
-# Too few operands
-andn t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-orn t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-xnor t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-rol t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-ror t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-rori t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-rori t0, t1, 32 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
-rori t0, t1, -1 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31]
-rolw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
-rorw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
-roriw t0, t1, 31 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
-roriw t0, t1, 0 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}

diff  --git a/llvm/test/MC/RISCV/rv32zbbp-valid.s b/llvm/test/MC/RISCV/rv32zbbp-valid.s
deleted file mode 100644
index a5357dd01634..000000000000
--- a/llvm/test/MC/RISCV/rv32zbbp-valid.s
+++ /dev/null
@@ -1,48 +0,0 @@
-# With Bitmanip base extension:
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+zbb -show-encoding \
-# RUN:     | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+zbb -show-encoding \
-# RUN:     | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+zbb < %s \
-# RUN:     | llvm-objdump --mattr=+zbb -d -r - \
-# RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zbb < %s \
-# RUN:     | llvm-objdump --mattr=+zbb -d -r - \
-# RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-
-# With Bitmanip permutation extension:
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zbp -show-encoding \
-# RUN:     | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zbp -show-encoding \
-# RUN:     | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zbp < %s \
-# RUN:     | llvm-objdump --mattr=+experimental-zbp -d -r - \
-# RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zbp < %s \
-# RUN:     | llvm-objdump --mattr=+experimental-zbp -d -r - \
-# RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-
-# CHECK-ASM-AND-OBJ: andn t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x72,0x73,0x40]
-andn t0, t1, t2
-# CHECK-ASM-AND-OBJ: orn t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x62,0x73,0x40]
-orn t0, t1, t2
-# CHECK-ASM-AND-OBJ: xnor t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x42,0x73,0x40]
-xnor t0, t1, t2
-# CHECK-ASM-AND-OBJ: rol t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x12,0x73,0x60]
-rol t0, t1, t2
-# CHECK-ASM-AND-OBJ: ror t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x52,0x73,0x60]
-ror t0, t1, t2
-# CHECK-ASM-AND-OBJ: rori t0, t1, 31
-# CHECK-ASM: encoding: [0x93,0x52,0xf3,0x61]
-rori t0, t1, 31
-# CHECK-ASM-AND-OBJ: rori t0, t1, 0
-# CHECK-ASM: encoding: [0x93,0x52,0x03,0x60]
-rori t0, t1, 0
-# CHECK-ASM-AND-OBJ: orc.b t0, t1
-# CHECK-ASM: encoding: [0x93,0x52,0x73,0x28]
-orc.b t0, t1

diff  --git a/llvm/test/MC/RISCV/rv32zbkx-invalid.s b/llvm/test/MC/RISCV/rv32zbkx-invalid.s
index 126a7597180b..6c2cafd33529 100644
--- a/llvm/test/MC/RISCV/rv32zbkx-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zbkx-invalid.s
@@ -4,6 +4,3 @@
 xperm8 t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
 # Too few operands
 xperm4 t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-
-# Undefined Zbp instruction in Zbkx
-xperm.h t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: 'Zbp' (Permutation 'Zb' Instructions){{$}}

diff  --git a/llvm/test/MC/RISCV/rv32zbp-aliases-valid.s b/llvm/test/MC/RISCV/rv32zbp-aliases-valid.s
deleted file mode 100644
index 6472394cee26..000000000000
--- a/llvm/test/MC/RISCV/rv32zbp-aliases-valid.s
+++ /dev/null
@@ -1,232 +0,0 @@
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zbp -riscv-no-aliases \
-# RUN:     | FileCheck -check-prefixes=CHECK-S-OBJ-NOALIAS %s
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zbp \
-# RUN:     | FileCheck -check-prefixes=CHECK-S-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+experimental-zbp < %s \
-# RUN:     | llvm-objdump -d -r -M no-aliases --mattr=+experimental-zbp - \
-# RUN:     | FileCheck -check-prefixes=CHECK-S-OBJ-NOALIAS %s
-# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+experimental-zbp < %s \
-# RUN:     | llvm-objdump -d -r --mattr=+experimental-zbp - \
-# RUN:     | FileCheck -check-prefixes=CHECK-S-OBJ %s
-
-# The following check prefixes are used in this test:
-# CHECK-S-OBJ            Match both the .s and objdumped object output with
-#                        aliases enabled
-# CHECK-S-OBJ-NOALIAS    Match both the .s and objdumped object output with
-#                        aliases disabled
-
-# CHECK-S-OBJ-NOALIAS: zext.h t0, t1
-# CHECK-S-OBJ: zext.h t0, t1
-zext.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 1
-# CHECK-S-OBJ: rev.p t0, t1
-rev.p x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 2
-# CHECK-S-OBJ: rev2.n t0, t1
-rev2.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 3
-# CHECK-S-OBJ: rev.n t0, t1
-rev.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 4
-# CHECK-S-OBJ: rev4.b t0, t1
-rev4.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 6
-# CHECK-S-OBJ: rev2.b t0, t1
-rev2.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: brev8 t0, t1
-# CHECK-S-OBJ: rev.b t0, t1
-rev.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 8
-# CHECK-S-OBJ: rev8.h t0, t1
-rev8.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 12
-# CHECK-S-OBJ: rev4.h t0, t1
-rev4.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 14
-# CHECK-S-OBJ: rev2.h t0, t1
-rev2.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 15
-# CHECK-S-OBJ: rev.h t0, t1
-rev.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 16
-# CHECK-S-OBJ: rev16 t0, t1
-rev16 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: rev8 t0, t1
-# CHECK-S-OBJ: rev8 t0, t1
-rev8 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 28
-# CHECK-S-OBJ: rev4 t0, t1
-rev4 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 30
-# CHECK-S-OBJ: rev2 t0, t1
-rev2 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 31
-# CHECK-S-OBJ: rev t0, t1
-rev x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 1
-# CHECK-S-OBJ: zip.n t0, t1
-zip.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 1
-# CHECK-S-OBJ: unzip.n t0, t1
-unzip.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 2
-# CHECK-S-OBJ: zip2.b t0, t1
-zip2.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 2
-# CHECK-S-OBJ: unzip2.b t0, t1
-unzip2.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 3
-# CHECK-S-OBJ: zip.b t0, t1
-zip.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 3
-# CHECK-S-OBJ: unzip.b t0, t1
-unzip.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 4
-# CHECK-S-OBJ: zip4.h t0, t1
-zip4.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 4
-# CHECK-S-OBJ: unzip4.h t0, t1
-unzip4.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 6
-# CHECK-S-OBJ: zip2.h t0, t1
-zip2.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 6
-# CHECK-S-OBJ: unzip2.h t0, t1
-unzip2.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 7
-# CHECK-S-OBJ: zip.h t0, t1
-zip.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 7
-# CHECK-S-OBJ: unzip.h t0, t1
-unzip.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 8
-# CHECK-S-OBJ: zip8 t0, t1
-zip8 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 8
-# CHECK-S-OBJ: unzip8 t0, t1
-unzip8 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 12
-# CHECK-S-OBJ: zip4 t0, t1
-zip4 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 12
-# CHECK-S-OBJ: unzip4 t0, t1
-unzip4 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 14
-# CHECK-S-OBJ: zip2 t0, t1
-zip2 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 14
-# CHECK-S-OBJ: unzip2 t0, t1
-unzip2 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 1
-# CHECK-S-OBJ: orc.p t0, t1
-orc.p x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 2
-# CHECK-S-OBJ: orc2.n t0, t1
-orc2.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 3
-# CHECK-S-OBJ: orc.n t0, t1
-orc.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 4
-# CHECK-S-OBJ: orc4.b t0, t1
-orc4.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 6
-# CHECK-S-OBJ: orc2.b t0, t1
-orc2.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: orc.b t0, t1
-# CHECK-S-OBJ: orc.b t0, t1
-orc.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 8
-# CHECK-S-OBJ: orc8.h t0, t1
-orc8.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 12
-# CHECK-S-OBJ: orc4.h t0, t1
-orc4.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 14
-# CHECK-S-OBJ: orc2.h t0, t1
-orc2.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 15
-# CHECK-S-OBJ: orc.h t0, t1
-orc.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 16
-# CHECK-S-OBJ: orc16 t0, t1
-orc16 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 24
-# CHECK-S-OBJ: orc8 t0, t1
-orc8 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 28
-# CHECK-S-OBJ: orc4 t0, t1
-orc4 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 30
-# CHECK-S-OBJ: orc2 t0, t1
-orc2 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 31
-# CHECK-S-OBJ: orc t0, t1
-orc x5, x6
-
-# CHECK-S-OBJ-NOALIAS: rori t0, t1, 8
-# CHECK-S-OBJ: rori t0, t1, 8
-ror x5, x6, 8
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 13
-# CHECK-S-OBJ: grevi t0, t1, 13
-grev x5, x6, 13
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 13
-# CHECK-S-OBJ: gorci t0, t1, 13
-gorc x5, x6, 13
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 13
-# CHECK-S-OBJ: shfli t0, t1, 13
-shfl x5, x6, 13
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 13
-# CHECK-S-OBJ: unshfli t0, t1, 13
-unshfl x5, x6, 13

diff  --git a/llvm/test/MC/RISCV/rv32zbp-invalid.s b/llvm/test/MC/RISCV/rv32zbp-invalid.s
deleted file mode 100644
index 543a724a8ca3..000000000000
--- a/llvm/test/MC/RISCV/rv32zbp-invalid.s
+++ /dev/null
@@ -1,51 +0,0 @@
-# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zbp < %s 2>&1 | FileCheck %s
-
-# Too few operands
-gorc t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-grev t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-gorci t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-gorci t0, t1, 32 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
-gorci t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
-# Too few operands
-grevi t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-grevi t0, t1, 32 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
-grevi t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
-# Too few operands
-shfl t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-unshfl t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-shfli t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-shfli t0, t1, 16 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 15]
-shfli t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 15]
-# Too few operands
-unshfli t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-unshfli t0, t1, 16 # CHECK: :[[@LINE]]:17: error: immediate must be an integer in the range [0, 15]
-unshfli t0, t1, -1 # CHECK: :[[@LINE]]:17: error: immediate must be an integer in the range [0, 15]
-# Too few operands
-pack t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-packu t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-packh t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-xperm.n t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-xperm.b t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-xperm.h t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-gorcw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
-grevw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
-gorciw t0, t1, 0 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
-greviw t0, t1, 0 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
-shflw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
-unshflw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
-packw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
-packuw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
-xperm.w t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}

diff  --git a/llvm/test/MC/RISCV/rv32zbp-only-valid.s b/llvm/test/MC/RISCV/rv32zbp-only-valid.s
deleted file mode 100644
index 42f462e98a21..000000000000
--- a/llvm/test/MC/RISCV/rv32zbp-only-valid.s
+++ /dev/null
@@ -1,21 +0,0 @@
-# With Bitmanip permutation extension:
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zbp -show-encoding \
-# RUN:     | FileCheck -check-prefix=CHECK-ASM %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zbp < %s \
-# RUN:     | llvm-objdump --mattr=+experimental-zbp -d -r - \
-# RUN:     | FileCheck --check-prefix=CHECK-OBJ %s
-
-# CHECK-ASM: pack t0, t1, zero
-# CHECK-OBJ: zext.h t0, t1
-# CHECK-ASM: encoding: [0xb3,0x42,0x03,0x08]
-pack t0, t1, x0
-# CHECK-ASM: grevi t0, t1, 24
-# CHECK-OBJ: rev8 t0, t1
-# CHECK-ASM: encoding: [0x93,0x52,0x83,0x69]
-grevi t0, t1, 24
-# CHECK-ASM-AND-OBJ: zip t0, t1
-# CHECK-ASM: encoding: [0x93,0x12,0xf3,0x08]
-zip x5, x6
-# CHECK-ASM-AND-OBJ: unzip t0, t1
-# CHECK-ASM: encoding: [0x93,0x52,0xf3,0x08]
-unzip x5, x6

diff  --git a/llvm/test/MC/RISCV/rv32zbp-valid.s b/llvm/test/MC/RISCV/rv32zbp-valid.s
deleted file mode 100644
index ec28f0e4c8f7..000000000000
--- a/llvm/test/MC/RISCV/rv32zbp-valid.s
+++ /dev/null
@@ -1,56 +0,0 @@
-# With Bitmanip permutation extension:
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zbp -show-encoding \
-# RUN:     | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zbp < %s \
-# RUN:     | llvm-objdump --mattr=+experimental-zbp -d -r - \
-# RUN:     | FileCheck --check-prefixes=CHECK-OBJ,CHECK-ASM-AND-OBJ %s
-
-# CHECK-ASM-AND-OBJ: gorc t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x52,0x73,0x28]
-gorc t0, t1, t2
-# CHECK-ASM-AND-OBJ: grev t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x52,0x73,0x68]
-grev t0, t1, t2
-# CHECK-ASM-AND-OBJ: gorci t0, t1, 0
-# CHECK-ASM: encoding: [0x93,0x52,0x03,0x28]
-gorci t0, t1, 0
-# CHECK-ASM-AND-OBJ: grevi t0, t1, 0
-# CHECK-ASM: encoding: [0x93,0x52,0x03,0x68]
-grevi t0, t1, 0
-# CHECK-ASM-AND-OBJ: shfl t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x12,0x73,0x08]
-shfl t0, t1, t2
-# CHECK-ASM-AND-OBJ: unshfl t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x52,0x73,0x08]
-unshfl t0, t1, t2
-# CHECK-ASM-AND-OBJ: shfli t0, t1, 0
-# CHECK-ASM: encoding: [0x93,0x12,0x03,0x08]
-shfli t0, t1, 0
-# CHECK-ASM-AND-OBJ: unshfli t0, t1, 0
-# CHECK-ASM: encoding: [0x93,0x52,0x03,0x08]
-unshfli t0, t1, 0
-# CHECK-ASM-AND-OBJ: pack t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x42,0x73,0x08]
-pack t0, t1, t2
-# CHECK-ASM-AND-OBJ: packu t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x42,0x73,0x48]
-packu t0, t1, t2
-# CHECK-ASM-AND-OBJ: packh t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x72,0x73,0x08]
-packh t0, t1, t2
-# CHECK-ASM: gorci t0, t1, 7
-# CHECK-OBJ: orc.b t0, t1
-# CHECK-ASM: encoding: [0x93,0x52,0x73,0x28]
-gorci t0, t1, 7
-# CHECK-ASM-AND-OBJ: xperm.n t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x22,0x73,0x28]
-xperm.n t0, t1, t2
-# CHECK-ASM-AND-OBJ: xperm.b t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x42,0x73,0x28]
-xperm.b t0, t1, t2
-# CHECK-ASM-AND-OBJ: xperm.h t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x62,0x73,0x28]
-xperm.h t0, t1, t2
-# CHECK-ASM-AND-OBJ: zext.h t0, t1
-# CHECK-ASM: encoding: [0xb3,0x42,0x03,0x08]
-zext.h t0, t1

diff  --git a/llvm/test/MC/RISCV/rv64zbb-invalid.s b/llvm/test/MC/RISCV/rv64zbb-invalid.s
index 28efc0fd95e8..c4e5bacb7c39 100644
--- a/llvm/test/MC/RISCV/rv64zbb-invalid.s
+++ b/llvm/test/MC/RISCV/rv64zbb-invalid.s
@@ -6,3 +6,14 @@ clzw t0, t1, t2 # CHECK: :[[@LINE]]:14: error: invalid operand for instruction
 ctzw t0, t1, t2 # CHECK: :[[@LINE]]:14: error: invalid operand for instruction
 # Too many operands
 cpopw t0, t1, t2 # CHECK: :[[@LINE]]:15: error: invalid operand for instruction
+# Too few operands
+rolw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+rorw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Too few operands
+roriw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
+# Immediate operand out of range
+roriw t0, t1, 32 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
+roriw t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
+rori t0, t1, 64 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 63]
+rori t0, t1, -1 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 63]

diff  --git a/llvm/test/MC/RISCV/rv64zbb-valid.s b/llvm/test/MC/RISCV/rv64zbb-valid.s
index bd186de8f00c..16ec53aec17d 100644
--- a/llvm/test/MC/RISCV/rv64zbb-valid.s
+++ b/llvm/test/MC/RISCV/rv64zbb-valid.s
@@ -53,3 +53,19 @@ li t0, -5764607523034234886
 # CHECK-ASM-AND-OBJ: addi t0, zero, -18
 # CHECK-ASM-AND-OBJ: rori t0, t0, 37
 li t0, -2281701377
+
+# CHECK-ASM-AND-OBJ: rolw t0, t1, t2
+# CHECK-ASM: encoding: [0xbb,0x12,0x73,0x60]
+rolw t0, t1, t2
+# CHECK-ASM-AND-OBJ: rorw t0, t1, t2
+# CHECK-ASM: encoding: [0xbb,0x52,0x73,0x60]
+rorw t0, t1, t2
+# CHECK-ASM-AND-OBJ: roriw t0, t1, 31
+# CHECK-ASM: encoding: [0x9b,0x52,0xf3,0x61]
+roriw t0, t1, 31
+# CHECK-ASM-AND-OBJ: roriw t0, t1, 0
+# CHECK-ASM: encoding: [0x9b,0x52,0x03,0x60]
+roriw t0, t1, 0
+# CHECK-ASM-AND-OBJ: rev8 t0, t1
+# CHECK-ASM: encoding: [0x93,0x52,0x83,0x6b]
+rev8 t0, t1

diff  --git a/llvm/test/MC/RISCV/rv64zbbp-invalid.s b/llvm/test/MC/RISCV/rv64zbbp-invalid.s
deleted file mode 100644
index 04d9b9984169..000000000000
--- a/llvm/test/MC/RISCV/rv64zbbp-invalid.s
+++ /dev/null
@@ -1,17 +0,0 @@
-# RUN: not llvm-mc -triple riscv64 -mattr=+zbb,+experimental-zbp < %s 2>&1 | FileCheck %s
-
-# Too few operands
-rolw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-rorw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-roriw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-roriw t0, t1, 32 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
-roriw t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
-rori t0, t1, 64 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 63]
-rori t0, t1, -1 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 63]
-# Too few operands
-packw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-packuw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction

diff  --git a/llvm/test/MC/RISCV/rv64zbbp-valid.s b/llvm/test/MC/RISCV/rv64zbbp-valid.s
deleted file mode 100644
index 58ed58dddee1..000000000000
--- a/llvm/test/MC/RISCV/rv64zbbp-valid.s
+++ /dev/null
@@ -1,32 +0,0 @@
-# With Bitmanip base extension:
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+zbb -show-encoding \
-# RUN:     | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zbb < %s \
-# RUN:     | llvm-objdump --mattr=+zbb -d -r - \
-# RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-
-# With Bitmanip permutation extension:
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zbp -show-encoding \
-# RUN:     | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zbp < %s \
-# RUN:     | llvm-objdump --mattr=+experimental-zbp -d -r - \
-# RUN:     | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-
-# CHECK-ASM-AND-OBJ: rolw t0, t1, t2
-# CHECK-ASM: encoding: [0xbb,0x12,0x73,0x60]
-rolw t0, t1, t2
-# CHECK-ASM-AND-OBJ: rorw t0, t1, t2
-# CHECK-ASM: encoding: [0xbb,0x52,0x73,0x60]
-rorw t0, t1, t2
-# CHECK-ASM-AND-OBJ: roriw t0, t1, 31
-# CHECK-ASM: encoding: [0x9b,0x52,0xf3,0x61]
-roriw t0, t1, 31
-# CHECK-ASM-AND-OBJ: roriw t0, t1, 0
-# CHECK-ASM: encoding: [0x9b,0x52,0x03,0x60]
-roriw t0, t1, 0
-# CHECK-ASM-AND-OBJ: zext.h t0, t1
-# CHECK-ASM: encoding: [0xbb,0x42,0x03,0x08]
-zext.h t0, t1
-# CHECK-ASM-AND-OBJ: rev8 t0, t1
-# CHECK-ASM: encoding: [0x93,0x52,0x83,0x6b]
-rev8 t0, t1

diff  --git a/llvm/test/MC/RISCV/rv64zbp-aliases-valid.s b/llvm/test/MC/RISCV/rv64zbp-aliases-valid.s
deleted file mode 100644
index c1474afec527..000000000000
--- a/llvm/test/MC/RISCV/rv64zbp-aliases-valid.s
+++ /dev/null
@@ -1,340 +0,0 @@
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zbp -riscv-no-aliases \
-# RUN:     | FileCheck -check-prefixes=CHECK-S-OBJ-NOALIAS %s
-# RUN: llvm-mc %s  -triple=riscv64 -mattr=+experimental-zbp \
-# RUN:     | FileCheck -check-prefixes=CHECK-S-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+experimental-zbp < %s \
-# RUN:     | llvm-objdump -d -r -M no-aliases --mattr=+experimental-zbp - \
-# RUN:     | FileCheck -check-prefixes=CHECK-S-OBJ-NOALIAS %s
-# RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+experimental-zbp < %s \
-# RUN:     | llvm-objdump -d -r --mattr=+experimental-zbp - \
-# RUN:     | FileCheck -check-prefixes=CHECK-S-OBJ %s
-
-# The following check prefixes are used in this test:
-# CHECK-S-OBJ            Match both the .s and objdumped object output with
-#                        aliases enabled
-# CHECK-S-OBJ-NOALIAS    Match both the .s and objdumped object output with
-#                        aliases disabled
-
-# CHECK-S-OBJ-NOALIAS: zext.h t0, t1
-# CHECK-S-OBJ: zext.h t0, t1
-zext.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 1
-# CHECK-S-OBJ: rev.p t0, t1
-rev.p x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 2
-# CHECK-S-OBJ: rev2.n t0, t1
-rev2.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 3
-# CHECK-S-OBJ: rev.n t0, t1
-rev.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 4
-# CHECK-S-OBJ: rev4.b t0, t1
-rev4.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 6
-# CHECK-S-OBJ: rev2.b t0, t1
-rev2.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: brev8 t0, t1
-# CHECK-S-OBJ: rev.b t0, t1
-rev.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 8
-# CHECK-S-OBJ: rev8.h t0, t1
-rev8.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 12
-# CHECK-S-OBJ: rev4.h t0, t1
-rev4.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 14
-# CHECK-S-OBJ: rev2.h t0, t1
-rev2.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 15
-# CHECK-S-OBJ: rev.h t0, t1
-rev.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 16
-# CHECK-S-OBJ: rev16.w t0, t1
-rev16.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 24
-# CHECK-S-OBJ: rev8.w t0, t1
-rev8.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 28
-# CHECK-S-OBJ: rev4.w t0, t1
-rev4.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 30
-# CHECK-S-OBJ: rev2.w t0, t1
-rev2.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 31
-# CHECK-S-OBJ: rev.w t0, t1
-rev.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 32
-# CHECK-S-OBJ: rev32 t0, t1
-rev32 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 48
-# CHECK-S-OBJ: rev16 t0, t1
-rev16 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: rev8 t0, t1
-# CHECK-S-OBJ: rev8 t0, t1
-rev8 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 60
-# CHECK-S-OBJ: rev4 t0, t1
-rev4 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 62
-# CHECK-S-OBJ: rev2 t0, t1
-rev2 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 63
-# CHECK-S-OBJ: rev t0, t1
-rev x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 1
-# CHECK-S-OBJ: zip.n t0, t1
-zip.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 1
-# CHECK-S-OBJ: unzip.n t0, t1
-unzip.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 2
-# CHECK-S-OBJ: zip2.b t0, t1
-zip2.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 2
-# CHECK-S-OBJ: unzip2.b t0, t1
-unzip2.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 3
-# CHECK-S-OBJ: zip.b t0, t1
-zip.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 3
-# CHECK-S-OBJ: unzip.b t0, t1
-unzip.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 4
-# CHECK-S-OBJ: zip4.h t0, t1
-zip4.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 4
-# CHECK-S-OBJ: unzip4.h t0, t1
-unzip4.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 6
-# CHECK-S-OBJ: zip2.h t0, t1
-zip2.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 6
-# CHECK-S-OBJ: unzip2.h t0, t1
-unzip2.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 7
-# CHECK-S-OBJ: zip.h t0, t1
-zip.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 7
-# CHECK-S-OBJ: unzip.h t0, t1
-unzip.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 8
-# CHECK-S-OBJ: zip8.w t0, t1
-zip8.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 8
-# CHECK-S-OBJ: unzip8.w t0, t1
-unzip8.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 12
-# CHECK-S-OBJ: zip4.w t0, t1
-zip4.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 12
-# CHECK-S-OBJ: unzip4.w t0, t1
-unzip4.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 14
-# CHECK-S-OBJ: zip2.w t0, t1
-zip2.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 14
-# CHECK-S-OBJ: unzip2.w t0, t1
-unzip2.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 15
-# CHECK-S-OBJ: zip.w t0, t1
-zip.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 15
-# CHECK-S-OBJ: unzip.w t0, t1
-unzip.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 16
-# CHECK-S-OBJ: zip16 t0, t1
-zip16 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 16
-# CHECK-S-OBJ: unzip16 t0, t1
-unzip16 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 24
-# CHECK-S-OBJ: zip8 t0, t1
-zip8 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 24
-# CHECK-S-OBJ: unzip8 t0, t1
-unzip8 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 28
-# CHECK-S-OBJ: zip4 t0, t1
-zip4 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 28
-# CHECK-S-OBJ: unzip4 t0, t1
-unzip4 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 30
-# CHECK-S-OBJ: zip2 t0, t1
-zip2 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 30
-# CHECK-S-OBJ: unzip2 t0, t1
-unzip2 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 31
-# CHECK-S-OBJ: zip t0, t1
-zip x5, x6
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 31
-# CHECK-S-OBJ: unzip t0, t1
-unzip x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 1
-# CHECK-S-OBJ: orc.p t0, t1
-orc.p x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 2
-# CHECK-S-OBJ: orc2.n t0, t1
-orc2.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 3
-# CHECK-S-OBJ: orc.n t0, t1
-orc.n x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 4
-# CHECK-S-OBJ: orc4.b t0, t1
-orc4.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 6
-# CHECK-S-OBJ: orc2.b t0, t1
-orc2.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: orc.b t0, t1
-# CHECK-S-OBJ: orc.b t0, t1
-orc.b x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 8
-# CHECK-S-OBJ: orc8.h t0, t1
-orc8.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 12
-# CHECK-S-OBJ: orc4.h t0, t1
-orc4.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 14
-# CHECK-S-OBJ: orc2.h t0, t1
-orc2.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 15
-# CHECK-S-OBJ: orc.h t0, t1
-orc.h x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 16
-# CHECK-S-OBJ: orc16.w t0, t1
-orc16.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 24
-# CHECK-S-OBJ: orc8.w t0, t1
-orc8.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 28
-# CHECK-S-OBJ: orc4.w t0, t1
-orc4.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 30
-# CHECK-S-OBJ: orc2.w t0, t1
-orc2.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 31
-# CHECK-S-OBJ: orc.w t0, t1
-orc.w x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 32
-# CHECK-S-OBJ: orc32 t0, t1
-orc32 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 48
-# CHECK-S-OBJ: orc16 t0, t1
-orc16 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 56
-# CHECK-S-OBJ: orc8 t0, t1
-orc8 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 60
-# CHECK-S-OBJ: orc4 t0, t1
-orc4 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 62
-# CHECK-S-OBJ: orc2 t0, t1
-orc2 x5, x6
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 63
-# CHECK-S-OBJ: orc t0, t1
-orc x5, x6
-
-# CHECK-S-OBJ-NOALIAS: rori t0, t1, 8
-# CHECK-S-OBJ: rori t0, t1, 8
-ror x5, x6, 8
-
-# CHECK-S-OBJ-NOALIAS: roriw t0, t1, 8
-# CHECK-S-OBJ: roriw t0, t1, 8
-rorw x5, x6, 8
-
-# CHECK-S-OBJ-NOALIAS: grevi t0, t1, 13
-# CHECK-S-OBJ: grevi t0, t1, 13
-grev x5, x6, 13
-
-# CHECK-S-OBJ-NOALIAS: gorci t0, t1, 13
-# CHECK-S-OBJ: gorci t0, t1, 13
-gorc x5, x6, 13
-
-# CHECK-S-OBJ-NOALIAS: shfli t0, t1, 13
-# CHECK-S-OBJ: shfli t0, t1, 13
-shfl x5, x6, 13
-
-# CHECK-S-OBJ-NOALIAS: unshfli t0, t1, 13
-# CHECK-S-OBJ: unshfli t0, t1, 13
-unshfl x5, x6, 13
-
-# CHECK-S-OBJ-NOALIAS: greviw t0, t1, 13
-# CHECK-S-OBJ: greviw t0, t1, 13
-grevw x5, x6, 13
-
-# CHECK-S-OBJ-NOALIAS: gorciw t0, t1, 13
-# CHECK-S-OBJ: gorciw t0, t1, 13
-gorcw x5, x6, 13

diff  --git a/llvm/test/MC/RISCV/rv64zbp-invalid.s b/llvm/test/MC/RISCV/rv64zbp-invalid.s
deleted file mode 100644
index 32511fc8dadd..000000000000
--- a/llvm/test/MC/RISCV/rv64zbp-invalid.s
+++ /dev/null
@@ -1,34 +0,0 @@
-# RUN: not llvm-mc -triple riscv64 -mattr=+experimental-zbp < %s 2>&1 | FileCheck %s
-
-# Too few operands
-gorcw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-grevw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-gorciw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-gorciw t0, t1, 32 # CHECK: :[[@LINE]]:16: error: immediate must be an integer in the range [0, 31]
-gorciw t0, t1, -1 # CHECK: :[[@LINE]]:16: error: immediate must be an integer in the range [0, 31]
-# Too few operands
-greviw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-greviw t0, t1, 32 # CHECK: :[[@LINE]]:16: error: immediate must be an integer in the range [0, 31]
-greviw t0, t1, -1 # CHECK: :[[@LINE]]:16: error: immediate must be an integer in the range [0, 31]
-# Too few operands
-shflw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-unshflw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Too few operands
-xperm.w t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
-# Immediate operand out of range
-gorci t0, t1, 64 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 63]
-gorci t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 63]
-# Immediate operand out of range
-grevi t0, t1, 64 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 63]
-grevi t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 63]
-# Immediate operand out of range
-shfli t0, t1, 32 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
-shfli t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31]
-# Immediate operand out of range
-unshfli t0, t1, 32 # CHECK: :[[@LINE]]:17: error: immediate must be an integer in the range [0, 31]
-unshfli t0, t1, -1 # CHECK: :[[@LINE]]:17: error: immediate must be an integer in the range [0, 31]

diff  --git a/llvm/test/MC/RISCV/rv64zbp-valid.s b/llvm/test/MC/RISCV/rv64zbp-valid.s
deleted file mode 100644
index 5743cae5f93d..000000000000
--- a/llvm/test/MC/RISCV/rv64zbp-valid.s
+++ /dev/null
@@ -1,92 +0,0 @@
-# With Bitmanip permutation extension:
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zbp -show-encoding \
-# RUN:     | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zbp < %s \
-# RUN:     | llvm-objdump --mattr=+experimental-zbp -d -r - \
-# RUN:     | FileCheck --check-prefixes=CHECK-OBJ,CHECK-ASM-AND-OBJ %s
-
-# CHECK-ASM-AND-OBJ: gorc t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x52,0x73,0x28]
-gorc t0, t1, t2
-# CHECK-ASM-AND-OBJ: grev t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x52,0x73,0x68]
-grev t0, t1, t2
-# CHECK-ASM-AND-OBJ: gorci t0, t1, 0
-# CHECK-ASM: encoding: [0x93,0x52,0x03,0x28]
-gorci t0, t1, 0
-# CHECK-ASM-AND-OBJ: grevi t0, t1, 0
-# CHECK-ASM: encoding: [0x93,0x52,0x03,0x68]
-grevi t0, t1, 0
-# CHECK-ASM-AND-OBJ: shfl t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x12,0x73,0x08]
-shfl t0, t1, t2
-# CHECK-ASM-AND-OBJ: unshfl t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x52,0x73,0x08]
-unshfl t0, t1, t2
-# CHECK-ASM-AND-OBJ: shfli t0, t1, 0
-# CHECK-ASM: encoding: [0x93,0x12,0x03,0x08]
-shfli t0, t1, 0
-# CHECK-ASM-AND-OBJ: unshfli t0, t1, 0
-# CHECK-ASM: encoding: [0x93,0x52,0x03,0x08]
-unshfli t0, t1, 0
-# CHECK-ASM-AND-OBJ: pack t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x42,0x73,0x08]
-pack t0, t1, t2
-# CHECK-ASM-AND-OBJ: packu t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x42,0x73,0x48]
-packu t0, t1, t2
-# CHECK-ASM-AND-OBJ: packh t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x72,0x73,0x08]
-packh t0, t1, t2
-# CHECK-ASM: gorci t0, t1, 7
-# CHECK-OBJ: orc.b t0, t1
-# CHECK-ASM: encoding: [0x93,0x52,0x73,0x28]
-gorci t0, t1, 7
-# CHECK-ASM-AND-OBJ: xperm.n t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x22,0x73,0x28]
-xperm.n t0, t1, t2
-# CHECK-ASM-AND-OBJ: xperm.b t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x42,0x73,0x28]
-xperm.b t0, t1, t2
-# CHECK-ASM-AND-OBJ: xperm.h t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x62,0x73,0x28]
-xperm.h t0, t1, t2
-
-# CHECK-ASM-AND-OBJ: gorcw t0, t1, t2
-# CHECK-ASM: encoding: [0xbb,0x52,0x73,0x28]
-gorcw t0, t1, t2
-# CHECK-ASM-AND-OBJ: grevw t0, t1, t2
-# CHECK-ASM: encoding: [0xbb,0x52,0x73,0x68]
-grevw t0, t1, t2
-# CHECK-ASM-AND-OBJ: gorciw t0, t1, 0
-# CHECK-ASM: encoding: [0x9b,0x52,0x03,0x28]
-gorciw t0, t1, 0
-# CHECK-ASM-AND-OBJ: greviw t0, t1, 0
-# CHECK-ASM: encoding: [0x9b,0x52,0x03,0x68]
-greviw t0, t1, 0
-# CHECK-ASM-AND-OBJ: shflw t0, t1, t2
-# CHECK-ASM: encoding: [0xbb,0x12,0x73,0x08]
-shflw t0, t1, t2
-# CHECK-ASM-AND-OBJ: unshflw t0, t1, t2
-# CHECK-ASM: encoding: [0xbb,0x52,0x73,0x08]
-unshflw t0, t1, t2
-# CHECK-ASM-AND-OBJ: packw t0, t1, t2
-# CHECK-ASM: encoding: [0xbb,0x42,0x73,0x08]
-packw t0, t1, t2
-# CHECK-ASM-AND-OBJ: packuw t0, t1, t2
-# CHECK-ASM: encoding: [0xbb,0x42,0x73,0x48]
-packuw t0, t1, t2
-# CHECK-ASM: packw t0, t1, zero
-# CHECK-OBJ: zext.h t0, t1
-# CHECK-ASM: encoding: [0xbb,0x42,0x03,0x08]
-packw t0, t1, x0
-# CHECK-ASM: grevi t0, t1, 56
-# CHECK-OBJ: rev8 t0, t1
-# CHECK-ASM: encoding: [0x93,0x52,0x83,0x6b]
-grevi t0, t1, 56
-# CHECK-ASM-AND-OBJ: xperm.w t0, t1, t2
-# CHECK-ASM: encoding: [0xb3,0x02,0x73,0x28]
-xperm.w t0, t1, t2
-# CHECK-ASM-AND-OBJ: zext.h t0, t1
-# CHECK-ASM: encoding: [0xbb,0x42,0x03,0x08]
-zext.h t0, t1


        


More information about the llvm-commits mailing list