[llvm] 47b9da7 - [VP][RISCV] Add vp.bitreverse and RISC-V support.

Yeting Kuo via llvm-commits llvm-commits at lists.llvm.org
Sun Dec 11 18:58:52 PST 2022


Author: Yeting Kuo
Date: 2022-12-12T10:58:44+08:00
New Revision: 47b9da72e032f8042d0fdbfef75ecfbb3c6960eb

URL: https://github.com/llvm/llvm-project/commit/47b9da72e032f8042d0fdbfef75ecfbb3c6960eb
DIFF: https://github.com/llvm/llvm-project/commit/47b9da72e032f8042d0fdbfef75ecfbb3c6960eb.diff

LOG: [VP][RISCV] Add vp.bitreverse and RISC-V support.

The patch also added function expandVPBITREVERSE to expand ISD::VP_BITREVERSE nodes.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D139697

Added: 
    llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll

Modified: 
    llvm/docs/LangRef.rst
    llvm/include/llvm/CodeGen/TargetLowering.h
    llvm/include/llvm/IR/Intrinsics.td
    llvm/include/llvm/IR/VPIntrinsics.def
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
    llvm/unittests/IR/VPIntrinsicTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 5b7fea983c6a2..556afe9b02a75 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15216,6 +15216,8 @@ Bit Manipulation Intrinsics
 LLVM provides intrinsics for a few important bit manipulation
 operations. These allow efficient code generation for some algorithms.
 
+.. _int_bitreverse:
+
 '``llvm.bitreverse.*``' Intrinsics
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -22066,6 +22068,53 @@ Examples:
       %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a)
       %also.r = select <4 x i1> %mask, <4 x float> %t, <4 x float> poison
 
+.. _int_vp_bitreverse:
+
+'``llvm.vp.bitreverse.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic.
+
+::
+
+      declare <16 x i32>  @llvm.vp.bitreverse.v16i32 (<16 x i32> <op>, <16 x i1> <mask>, i32 <vector_length>)
+      declare <vscale x 4 x i32>  @llvm.vp.bitreverse.nxv4i32 (<vscale x 4 x i32> <op>, <vscale x 4 x i1> <mask>, i32 <vector_length>)
+      declare <256 x i64>  @llvm.vp.bitreverse.v256i64 (<256 x i64> <op>, <256 x i1> <mask>, i32 <vector_length>)
+
+Overview:
+"""""""""
+
+Predicated bitreverse of a vector of integers.
+
+
+Arguments:
+""""""""""
+
+The first operand and the result have the same vector of integer type. The
+second operand is the vector mask and has the same number of elements as the
+result vector type. The third operand is the explicit vector length of the
+operation.
+
+Semantics:
+""""""""""
+
+The '``llvm.vp.bitreverse``' intrinsic performs bitreverse (:ref:`bitreverse <int_bitreverse>`) of the first operand on each
+enabled lane.  The result on disabled lanes is a :ref:`poison value <poisonvalues>`.
+
+Examples:
+"""""""""
+
+.. code-block:: llvm
+
+      %r = call <4 x i32> @llvm.vp.bitreverse.v4i32(<4 x i32> %a, <4 x i1> %mask, i32 %evl)
+      ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r
+
+      %t = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a)
+      %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison
+
+
 .. _int_vp_bswap:
 
 '``llvm.vp.bswap.*``' Intrinsics

diff  --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 0a5d99f21ccca..c1936dcfcb5ab 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -4959,6 +4959,11 @@ class TargetLowering : public TargetLoweringBase {
   /// \returns The expansion result or SDValue() if it fails.
   SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const;
 
+  /// Expand VP_BITREVERSE nodes. Expands VP_BITREVERSE nodes with
+  /// i8/i16/i32/i64 scalar types. \param N Node to expand \returns The
+  /// expansion result or SDValue() if it fails.
+  SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const;
+
   /// Turn load of vector type into a load of the individual elements.
   /// \param LD load to expand
   /// \returns BUILD_VECTOR and TokenFactor nodes.

diff  --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 3a27a2a22fd68..ca443bfa48e1a 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1562,6 +1562,10 @@ let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn] in {
                              [ LLVMMatchType<0>,
                                LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                llvm_i32_ty]>;
+  def int_vp_bitreverse : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
+                             [ LLVMMatchType<0>,
+                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                               llvm_i32_ty]>;
   def int_vp_fshl : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                              [ LLVMMatchType<0>,
                                LLVMMatchType<0>,

diff  --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def
index 07f04b6b89eb3..aae00482db9d2 100644
--- a/llvm/include/llvm/IR/VPIntrinsics.def
+++ b/llvm/include/llvm/IR/VPIntrinsics.def
@@ -220,6 +220,10 @@ END_REGISTER_VP(vp_umax, VP_UMAX)
 BEGIN_REGISTER_VP(vp_bswap, 1, 2, VP_BSWAP, -1)
 END_REGISTER_VP(vp_bswap, VP_BSWAP)
 
+// llvm.vp.bitreverse(x,mask,vlen)
+BEGIN_REGISTER_VP(vp_bitreverse, 1, 2, VP_BITREVERSE, -1)
+END_REGISTER_VP(vp_bitreverse, VP_BITREVERSE)
+
 // llvm.vp.fshl(x,y,z,mask,vlen)
 BEGIN_REGISTER_VP(vp_fshl, 3, 4, VP_FSHL, -1)
 END_REGISTER_VP(vp_fshl, VP_FSHL)

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 0b12b9aeb20e2..b66e710c701ae 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -788,6 +788,12 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
   case ISD::BITREVERSE:
     ExpandBITREVERSE(Node, Results);
     return;
+  case ISD::VP_BITREVERSE:
+    if (SDValue Expanded = TLI.expandVPBITREVERSE(Node, DAG)) {
+      Results.push_back(Expanded);
+      return;
+    }
+    break;
   case ISD::CTPOP:
     if (SDValue Expanded = TLI.expandCTPOP(Node, DAG)) {
       Results.push_back(Expanded);

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 6ce11e58d840c..a4eb634e4ae2b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1012,6 +1012,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
 
   case ISD::ABS:
   case ISD::BITREVERSE:
+  case ISD::VP_BITREVERSE:
   case ISD::BSWAP:
   case ISD::VP_BSWAP:
   case ISD::CTLZ:
@@ -4091,6 +4092,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
 
   case ISD::ABS:
   case ISD::BITREVERSE:
+  case ISD::VP_BITREVERSE:
   case ISD::BSWAP:
   case ISD::VP_BSWAP:
   case ISD::CTLZ:

diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 5027d7aca0bd9..5ad4eebbfc2cf 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -8630,6 +8630,68 @@ SDValue TargetLowering::expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const {
   return Tmp;
 }
 
+SDValue TargetLowering::expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const {
+  assert(N->getOpcode() == ISD::VP_BITREVERSE);
+
+  SDLoc dl(N);
+  EVT VT = N->getValueType(0);
+  SDValue Op = N->getOperand(0);
+  SDValue Mask = N->getOperand(1);
+  SDValue EVL = N->getOperand(2);
+  EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout());
+  unsigned Sz = VT.getScalarSizeInBits();
+
+  SDValue Tmp, Tmp2, Tmp3;
+
+  // If we can, perform BSWAP first and then the mask+swap the i4, then i2
+  // and finally the i1 pairs.
+  // TODO: We can easily support i4/i2 legal types if any target ever does.
+  if (Sz >= 8 && isPowerOf2_32(Sz)) {
+    // Create the masks - repeating the pattern every byte.
+    APInt Mask4 = APInt::getSplat(Sz, APInt(8, 0x0F));
+    APInt Mask2 = APInt::getSplat(Sz, APInt(8, 0x33));
+    APInt Mask1 = APInt::getSplat(Sz, APInt(8, 0x55));
+
+    // BSWAP if the type is wider than a single byte.
+    Tmp = (Sz > 8 ? DAG.getNode(ISD::VP_BSWAP, dl, VT, Op, Mask, EVL) : Op);
+
+    // swap i4: ((V >> 4) & 0x0F) | ((V & 0x0F) << 4)
+    Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Tmp, DAG.getConstant(4, dl, SHVT),
+                       Mask, EVL);
+    Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2,
+                       DAG.getConstant(Mask4, dl, VT), Mask, EVL);
+    Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp, DAG.getConstant(Mask4, dl, VT),
+                       Mask, EVL);
+    Tmp3 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp3, DAG.getConstant(4, dl, SHVT),
+                       Mask, EVL);
+    Tmp = DAG.getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
+
+    // swap i2: ((V >> 2) & 0x33) | ((V & 0x33) << 2)
+    Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Tmp, DAG.getConstant(2, dl, SHVT),
+                       Mask, EVL);
+    Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2,
+                       DAG.getConstant(Mask2, dl, VT), Mask, EVL);
+    Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp, DAG.getConstant(Mask2, dl, VT),
+                       Mask, EVL);
+    Tmp3 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp3, DAG.getConstant(2, dl, SHVT),
+                       Mask, EVL);
+    Tmp = DAG.getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
+
+    // swap i1: ((V >> 1) & 0x55) | ((V & 0x55) << 1)
+    Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Tmp, DAG.getConstant(1, dl, SHVT),
+                       Mask, EVL);
+    Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2,
+                       DAG.getConstant(Mask1, dl, VT), Mask, EVL);
+    Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp, DAG.getConstant(Mask1, dl, VT),
+                       Mask, EVL);
+    Tmp3 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp3, DAG.getConstant(1, dl, SHVT),
+                       Mask, EVL);
+    Tmp = DAG.getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
+    return Tmp;
+  }
+  return SDValue();
+}
+
 std::pair<SDValue, SDValue>
 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
                                     SelectionDAG &DAG) const {

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 76315d5489b5b..0cf6ea50e70c6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -613,7 +613,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, VT, Expand);
 
       setOperationAction(ISD::BSWAP, VT, Expand);
-      setOperationAction(ISD::VP_BSWAP, VT, Expand);
+      setOperationAction({ISD::VP_BSWAP, ISD::VP_BITREVERSE}, VT, Expand);
       setOperationAction({ISD::VP_FSHL, ISD::VP_FSHR}, VT, Expand);
 
       // Custom-lower extensions and truncations from/to mask types.

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 3b0763fd6a813..f015e951bf037 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -641,6 +641,41 @@ static const CostTblEntry VectorIntrinsicCostTable[]{
     {Intrinsic::bitreverse, MVT::nxv2i64, 52},
     {Intrinsic::bitreverse, MVT::nxv4i64, 52},
     {Intrinsic::bitreverse, MVT::nxv8i64, 52},
+    {Intrinsic::vp_bitreverse, MVT::v2i8, 17},
+    {Intrinsic::vp_bitreverse, MVT::v4i8, 17},
+    {Intrinsic::vp_bitreverse, MVT::v8i8, 17},
+    {Intrinsic::vp_bitreverse, MVT::v16i8, 17},
+    {Intrinsic::vp_bitreverse, MVT::nxv1i8, 17},
+    {Intrinsic::vp_bitreverse, MVT::nxv2i8, 17},
+    {Intrinsic::vp_bitreverse, MVT::nxv4i8, 17},
+    {Intrinsic::vp_bitreverse, MVT::nxv8i8, 17},
+    {Intrinsic::vp_bitreverse, MVT::nxv16i8, 17},
+    {Intrinsic::vp_bitreverse, MVT::v2i16, 24},
+    {Intrinsic::vp_bitreverse, MVT::v4i16, 24},
+    {Intrinsic::vp_bitreverse, MVT::v8i16, 24},
+    {Intrinsic::vp_bitreverse, MVT::v16i16, 24},
+    {Intrinsic::vp_bitreverse, MVT::nxv1i16, 24},
+    {Intrinsic::vp_bitreverse, MVT::nxv2i16, 24},
+    {Intrinsic::vp_bitreverse, MVT::nxv4i16, 24},
+    {Intrinsic::vp_bitreverse, MVT::nxv8i16, 24},
+    {Intrinsic::vp_bitreverse, MVT::nxv16i16, 24},
+    {Intrinsic::vp_bitreverse, MVT::v2i32, 33},
+    {Intrinsic::vp_bitreverse, MVT::v4i32, 33},
+    {Intrinsic::vp_bitreverse, MVT::v8i32, 33},
+    {Intrinsic::vp_bitreverse, MVT::v16i32, 33},
+    {Intrinsic::vp_bitreverse, MVT::nxv1i32, 33},
+    {Intrinsic::vp_bitreverse, MVT::nxv2i32, 33},
+    {Intrinsic::vp_bitreverse, MVT::nxv4i32, 33},
+    {Intrinsic::vp_bitreverse, MVT::nxv8i32, 33},
+    {Intrinsic::vp_bitreverse, MVT::nxv16i32, 33},
+    {Intrinsic::vp_bitreverse, MVT::v2i64, 52},
+    {Intrinsic::vp_bitreverse, MVT::v4i64, 52},
+    {Intrinsic::vp_bitreverse, MVT::v8i64, 52},
+    {Intrinsic::vp_bitreverse, MVT::v16i64, 52},
+    {Intrinsic::vp_bitreverse, MVT::nxv1i64, 52},
+    {Intrinsic::vp_bitreverse, MVT::nxv2i64, 52},
+    {Intrinsic::vp_bitreverse, MVT::nxv4i64, 52},
+    {Intrinsic::vp_bitreverse, MVT::nxv8i64, 52},
     {Intrinsic::ctpop, MVT::v2i8, 12},
     {Intrinsic::ctpop, MVT::v4i8, 12},
     {Intrinsic::ctpop, MVT::v8i8, 12},

diff  --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
new file mode 100644
index 0000000000000..ec9792ce8089e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -0,0 +1,3798 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i8> @llvm.vp.bitreverse.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @vp_bitreverse_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15, v0.t
+; CHECK-NEXT:    vsll.vi v9, v9, 4, v0.t
+; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT:    vor.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 2, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 1 x i8> @llvm.vp.bitreverse.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vp_bitreverse_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv1i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x i8> @llvm.vp.bitreverse.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i8> %v
+}
+
+declare <vscale x 2 x i8> @llvm.vp.bitreverse.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @vp_bitreverse_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15, v0.t
+; CHECK-NEXT:    vsll.vi v9, v9, 4, v0.t
+; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT:    vor.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 2, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.bitreverse.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vp_bitreverse_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv2i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x i8> @llvm.vp.bitreverse.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i8> %v
+}
+
+declare <vscale x 4 x i8> @llvm.vp.bitreverse.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @vp_bitreverse_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15, v0.t
+; CHECK-NEXT:    vsll.vi v9, v9, 4, v0.t
+; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT:    vor.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 2, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 4 x i8> @llvm.vp.bitreverse.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vp_bitreverse_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv4i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x i8> @llvm.vp.bitreverse.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i8> %v
+}
+
+declare <vscale x 8 x i8> @llvm.vp.bitreverse.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @vp_bitreverse_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15, v0.t
+; CHECK-NEXT:    vsll.vi v9, v9, 4, v0.t
+; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT:    vor.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 2, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 8 x i8> @llvm.vp.bitreverse.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vp_bitreverse_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv8i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x i8> @llvm.vp.bitreverse.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i8> %v
+}
+
+declare <vscale x 16 x i8> @llvm.vp.bitreverse.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @vp_bitreverse_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vand.vi v10, v8, 15, v0.t
+; CHECK-NEXT:    vsll.vi v10, v10, 4, v0.t
+; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT:    vor.vv v8, v8, v10, v0.t
+; CHECK-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v10, v10, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 2, v0.t
+; CHECK-NEXT:    vor.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v10, v10, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
+; CHECK-NEXT:    vor.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 16 x i8> @llvm.vp.bitreverse.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
+  ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vp_bitreverse_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv16i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vand.vi v10, v8, 15
+; CHECK-NEXT:    vsll.vi v10, v10, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v10
+; CHECK-NEXT:    vsrl.vi v10, v8, 2
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v10, v10, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    vsrl.vi v10, v8, 1
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v10, v10, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x i8> @llvm.vp.bitreverse.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
+  ret <vscale x 16 x i8> %v
+}
+
+declare <vscale x 32 x i8> @llvm.vp.bitreverse.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @vp_bitreverse_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vand.vi v12, v8, 15, v0.t
+; CHECK-NEXT:    vsll.vi v12, v12, 4, v0.t
+; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT:    vor.vv v8, v8, v12, v0.t
+; CHECK-NEXT:    vsrl.vi v12, v8, 2, v0.t
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v12, v12, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 2, v0.t
+; CHECK-NEXT:    vor.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    vsrl.vi v12, v8, 1, v0.t
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v12, v12, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
+; CHECK-NEXT:    vor.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 32 x i8> @llvm.vp.bitreverse.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
+  ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vp_bitreverse_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv32i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vand.vi v12, v8, 15
+; CHECK-NEXT:    vsll.vi v12, v12, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v12
+; CHECK-NEXT:    vsrl.vi v12, v8, 2
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v12, v12, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    vsrl.vi v12, v8, 1
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v12, v12, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x i8> @llvm.vp.bitreverse.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
+  ret <vscale x 32 x i8> %v
+}
+
+declare <vscale x 64 x i8> @llvm.vp.bitreverse.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i1>, i32)
+
+define <vscale x 64 x i8> @vp_bitreverse_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vand.vi v16, v8, 15, v0.t
+; CHECK-NEXT:    vsll.vi v16, v16, 4, v0.t
+; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT:    vor.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v16, v16, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 2, v0.t
+; CHECK-NEXT:    vor.vv v8, v16, v8, v0.t
+; CHECK-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v16, v16, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
+; CHECK-NEXT:    vor.vv v8, v16, v8, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 64 x i8> @llvm.vp.bitreverse.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
+  ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vp_bitreverse_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_nxv64i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vand.vi v16, v8, 15
+; CHECK-NEXT:    vsll.vi v16, v16, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    vsrl.vi v16, v8, 2
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v16, v16, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    vsrl.vi v16, v8, 1
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v16, v16, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+  %v = call <vscale x 64 x i8> @llvm.vp.bitreverse.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
+  ret <vscale x 64 x i8> %v
+}
+
+declare <vscale x 1 x i16> @llvm.vp.bitreverse.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @vp_bitreverse_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv1i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv1i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 1 x i16> @llvm.vp.bitreverse.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vp_bitreverse_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv1i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv1i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x i16> @llvm.vp.bitreverse.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i16> %v
+}
+
+declare <vscale x 2 x i16> @llvm.vp.bitreverse.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @vp_bitreverse_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i16> @llvm.vp.bitreverse.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vp_bitreverse_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv2i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv2i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x i16> @llvm.vp.bitreverse.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i16> %v
+}
+
+declare <vscale x 4 x i16> @llvm.vp.bitreverse.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @vp_bitreverse_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 4 x i16> @llvm.vp.bitreverse.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vp_bitreverse_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv4i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv4i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x i16> @llvm.vp.bitreverse.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i16> %v
+}
+
+declare <vscale x 8 x i16> @llvm.vp.bitreverse.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @vp_bitreverse_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; RV32-NEXT:    vsrl.vi v10, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; RV64-NEXT:    vsrl.vi v10, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 8 x i16> @llvm.vp.bitreverse.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vp_bitreverse_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv8i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; RV32-NEXT:    vsrl.vi v10, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vsrl.vi v10, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv8i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; RV64-NEXT:    vsrl.vi v10, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vsrl.vi v10, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x i16> @llvm.vp.bitreverse.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i16> %v
+}
+
+declare <vscale x 16 x i16> @llvm.vp.bitreverse.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i16> @vp_bitreverse_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv16i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; RV32-NEXT:    vsrl.vi v12, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv16i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; RV64-NEXT:    vsrl.vi v12, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 16 x i16> @llvm.vp.bitreverse.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
+  ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vp_bitreverse_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv16i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; RV32-NEXT:    vsrl.vi v12, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vsrl.vi v12, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv16i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; RV64-NEXT:    vsrl.vi v12, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vsrl.vi v12, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x i16> @llvm.vp.bitreverse.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
+  ret <vscale x 16 x i16> %v
+}
+
+declare <vscale x 32 x i16> @llvm.vp.bitreverse.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i16> @vp_bitreverse_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv32i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV32-NEXT:    vsrl.vi v16, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv32i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV64-NEXT:    vsrl.vi v16, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 32 x i16> @llvm.vp.bitreverse.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
+  ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vp_bitreverse_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv32i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV32-NEXT:    vsrl.vi v16, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vsrl.vi v16, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv32i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV64-NEXT:    vsrl.vi v16, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vsrl.vi v16, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x i16> @llvm.vp.bitreverse.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
+  ret <vscale x 32 x i16> %v
+}
+
+declare <vscale x 1 x i32> @llvm.vp.bitreverse.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @vp_bitreverse_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv1i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV32-NEXT:    vand.vx v10, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v10, v10, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv1i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV64-NEXT:    vand.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v10, v10, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 1 x i32> @llvm.vp.bitreverse.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vp_bitreverse_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv1i32_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vsrl.vi v10, v8, 24
+; RV32-NEXT:    vor.vv v9, v9, v10
+; RV32-NEXT:    vand.vx v10, v8, a0
+; RV32-NEXT:    vsll.vi v10, v10, 8
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv1i32_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vsrl.vi v10, v8, 24
+; RV64-NEXT:    vor.vv v9, v9, v10
+; RV64-NEXT:    vand.vx v10, v8, a0
+; RV64-NEXT:    vsll.vi v10, v10, 8
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x i32> @llvm.vp.bitreverse.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i32> %v
+}
+
+declare <vscale x 2 x i32> @llvm.vp.bitreverse.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @vp_bitreverse_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV32-NEXT:    vand.vx v10, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v10, v10, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV64-NEXT:    vand.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v10, v10, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i32> @llvm.vp.bitreverse.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vp_bitreverse_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv2i32_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vsrl.vi v10, v8, 24
+; RV32-NEXT:    vor.vv v9, v9, v10
+; RV32-NEXT:    vand.vx v10, v8, a0
+; RV32-NEXT:    vsll.vi v10, v10, 8
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv2i32_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vsrl.vi v10, v8, 24
+; RV64-NEXT:    vor.vv v9, v9, v10
+; RV64-NEXT:    vand.vx v10, v8, a0
+; RV64-NEXT:    vsll.vi v10, v10, 8
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x i32> @llvm.vp.bitreverse.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i32> %v
+}
+
+declare <vscale x 4 x i32> @llvm.vp.bitreverse.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @vp_bitreverse_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; RV32-NEXT:    vsrl.vi v10, v8, 8, v0.t
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v10, v10, v12, v0.t
+; RV32-NEXT:    vand.vx v12, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v12, v12, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; RV64-NEXT:    vsrl.vi v10, v8, 8, v0.t
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v10, v10, v12, v0.t
+; RV64-NEXT:    vand.vx v12, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v12, v12, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 4 x i32> @llvm.vp.bitreverse.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vp_bitreverse_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv4i32_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; RV32-NEXT:    vsrl.vi v10, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vsrl.vi v12, v8, 24
+; RV32-NEXT:    vor.vv v10, v10, v12
+; RV32-NEXT:    vand.vx v12, v8, a0
+; RV32-NEXT:    vsll.vi v12, v12, 8
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vsrl.vi v10, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv4i32_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; RV64-NEXT:    vsrl.vi v10, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vsrl.vi v12, v8, 24
+; RV64-NEXT:    vor.vv v10, v10, v12
+; RV64-NEXT:    vand.vx v12, v8, a0
+; RV64-NEXT:    vsll.vi v12, v12, 8
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vsrl.vi v10, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x i32> @llvm.vp.bitreverse.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i32> %v
+}
+
+declare <vscale x 8 x i32> @llvm.vp.bitreverse.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @vp_bitreverse_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; RV32-NEXT:    vsrl.vi v12, v8, 8, v0.t
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v12, v12, v16, v0.t
+; RV32-NEXT:    vand.vx v16, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; RV64-NEXT:    vsrl.vi v12, v8, 8, v0.t
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v12, v12, v16, v0.t
+; RV64-NEXT:    vand.vx v16, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 8 x i32> @llvm.vp.bitreverse.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vp_bitreverse_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv8i32_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; RV32-NEXT:    vsrl.vi v12, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vsrl.vi v16, v8, 24
+; RV32-NEXT:    vor.vv v12, v12, v16
+; RV32-NEXT:    vand.vx v16, v8, a0
+; RV32-NEXT:    vsll.vi v16, v16, 8
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vsrl.vi v12, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv8i32_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; RV64-NEXT:    vsrl.vi v12, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vsrl.vi v16, v8, 24
+; RV64-NEXT:    vor.vv v12, v12, v16
+; RV64-NEXT:    vand.vx v16, v8, a0
+; RV64-NEXT:    vsll.vi v16, v16, 8
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vsrl.vi v12, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x i32> @llvm.vp.bitreverse.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i32> %v
+}
+
+declare <vscale x 16 x i32> @llvm.vp.bitreverse.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i32> @vp_bitreverse_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv16i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT:    vsrl.vi v16, v8, 8, v0.t
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV32-NEXT:    vsrl.vi v24, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV32-NEXT:    vand.vx v24, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v24, v24, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v24, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv16i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; RV64-NEXT:    vsrl.vi v16, v8, 8, v0.t
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    vsrl.vi v24, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV64-NEXT:    vand.vx v24, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v24, v24, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v24, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 16 x i32> @llvm.vp.bitreverse.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
+  ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vp_bitreverse_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv16i32_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT:    vsrl.vi v16, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vsrl.vi v24, v8, 24
+; RV32-NEXT:    vor.vv v16, v16, v24
+; RV32-NEXT:    vand.vx v24, v8, a0
+; RV32-NEXT:    vsll.vi v24, v24, 8
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vsrl.vi v16, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv16i32_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; RV64-NEXT:    vsrl.vi v16, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vsrl.vi v24, v8, 24
+; RV64-NEXT:    vor.vv v16, v16, v24
+; RV64-NEXT:    vand.vx v24, v8, a0
+; RV64-NEXT:    vsll.vi v24, v24, 8
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v24
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vsrl.vi v16, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x i32> @llvm.vp.bitreverse.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
+  ret <vscale x 16 x i32> %v
+}
+
+declare <vscale x 1 x i64> @llvm.vp.bitreverse.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i64> @vp_bitreverse_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    li a2, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vsll.vx v9, v8, a2, v0.t
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a3, a1, -256
+; RV32-NEXT:    vand.vx v10, v8, a3, v0.t
+; RV32-NEXT:    li a4, 40
+; RV32-NEXT:    vsll.vx v10, v10, a4, v0.t
+; RV32-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV32-NEXT:    lui a5, 4080
+; RV32-NEXT:    vand.vx v10, v8, a5, v0.t
+; RV32-NEXT:    vsll.vi v10, v10, 24, v0.t
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vsetvli a6, zero, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v11, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v12, v8, v11, v0.t
+; RV32-NEXT:    vsll.vi v12, v12, 8, v0.t
+; RV32-NEXT:    vor.vv v10, v10, v12, v0.t
+; RV32-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV32-NEXT:    vsrl.vx v10, v8, a2, v0.t
+; RV32-NEXT:    vsrl.vx v12, v8, a4, v0.t
+; RV32-NEXT:    vand.vx v12, v12, a3, v0.t
+; RV32-NEXT:    vor.vv v10, v12, v10, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 24, v0.t
+; RV32-NEXT:    vand.vx v12, v12, a5, v0.t
+; RV32-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v11, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v9, v9, v10, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v9, v9, v10, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v9, v9, v10, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT:    vand.vx v9, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v9, v9, 24, v0.t
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v10, v10, 8, v0.t
+; RV64-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v10, v8, a2, v0.t
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v11, v8, a3, v0.t
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v11, v11, a4, v0.t
+; RV64-NEXT:    vor.vv v10, v10, v11, v0.t
+; RV64-NEXT:    vor.vv v9, v10, v9, v0.t
+; RV64-NEXT:    vsrl.vx v10, v8, a2, v0.t
+; RV64-NEXT:    vsrl.vx v11, v8, a4, v0.t
+; RV64-NEXT:    vand.vx v11, v11, a3, v0.t
+; RV64-NEXT:    vor.vv v10, v11, v10, v0.t
+; RV64-NEXT:    vsrl.vi v11, v8, 24, v0.t
+; RV64-NEXT:    vand.vx v11, v11, a1, v0.t
+; RV64-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v11, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI36_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI36_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI36_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI36_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI36_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI36_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 1 x i64> @llvm.vp.bitreverse.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vp_bitreverse_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv1i64_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vsll.vx v9, v8, a1
+; RV32-NEXT:    lui a2, 16
+; RV32-NEXT:    addi a2, a2, -256
+; RV32-NEXT:    vand.vx v10, v8, a2
+; RV32-NEXT:    li a3, 40
+; RV32-NEXT:    vsll.vx v10, v10, a3
+; RV32-NEXT:    vor.vv v9, v9, v10
+; RV32-NEXT:    lui a4, 4080
+; RV32-NEXT:    vand.vx v10, v8, a4
+; RV32-NEXT:    vsll.vi v10, v10, 24
+; RV32-NEXT:    addi a5, sp, 8
+; RV32-NEXT:    vsetvli a6, zero, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v11, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v12, v8, v11
+; RV32-NEXT:    vsll.vi v12, v12, 8
+; RV32-NEXT:    vor.vv v10, v10, v12
+; RV32-NEXT:    vor.vv v9, v9, v10
+; RV32-NEXT:    vsrl.vx v10, v8, a1
+; RV32-NEXT:    vsrl.vx v12, v8, a3
+; RV32-NEXT:    vand.vx v12, v12, a2
+; RV32-NEXT:    vor.vv v10, v12, v10
+; RV32-NEXT:    vsrl.vi v12, v8, 24
+; RV32-NEXT:    vand.vx v12, v12, a4
+; RV32-NEXT:    vsrl.vi v8, v8, 8
+; RV32-NEXT:    vand.vv v8, v8, v11
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v9, v9, v10
+; RV32-NEXT:    vand.vv v8, v8, v10
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v9, v9, v10
+; RV32-NEXT:    vand.vv v8, v8, v10
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v9, v9, v10
+; RV32-NEXT:    vand.vv v8, v8, v10
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv1i64_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT:    vand.vx v9, v8, a1
+; RV64-NEXT:    vsll.vi v9, v9, 24
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v10, v8, a0
+; RV64-NEXT:    vsll.vi v10, v10, 8
+; RV64-NEXT:    vor.vv v9, v9, v10
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v10, v8, a2
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v11, v8, a3
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v11, v11, a4
+; RV64-NEXT:    vor.vv v10, v10, v11
+; RV64-NEXT:    vor.vv v9, v10, v9
+; RV64-NEXT:    vsrl.vx v10, v8, a2
+; RV64-NEXT:    vsrl.vx v11, v8, a4
+; RV64-NEXT:    vand.vx v11, v11, a3
+; RV64-NEXT:    vor.vv v10, v11, v10
+; RV64-NEXT:    vsrl.vi v11, v8, 24
+; RV64-NEXT:    vand.vx v11, v11, a1
+; RV64-NEXT:    vsrl.vi v8, v8, 8
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v8, v11
+; RV64-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI37_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI37_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI37_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI37_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI37_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x i64> @llvm.vp.bitreverse.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i64> %v
+}
+
+declare <vscale x 2 x i64> @llvm.vp.bitreverse.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i64> @vp_bitreverse_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    li a2, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vsll.vx v10, v8, a2, v0.t
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a3, a1, -256
+; RV32-NEXT:    vand.vx v12, v8, a3, v0.t
+; RV32-NEXT:    li a4, 40
+; RV32-NEXT:    vsll.vx v12, v12, a4, v0.t
+; RV32-NEXT:    vor.vv v10, v10, v12, v0.t
+; RV32-NEXT:    lui a5, 4080
+; RV32-NEXT:    vand.vx v12, v8, a5, v0.t
+; RV32-NEXT:    vsll.vi v12, v12, 24, v0.t
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vsetvli a6, zero, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v14, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v16, v8, v14, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT:    vor.vv v12, v12, v16, v0.t
+; RV32-NEXT:    vor.vv v10, v10, v12, v0.t
+; RV32-NEXT:    vsrl.vx v12, v8, a2, v0.t
+; RV32-NEXT:    vsrl.vx v16, v8, a4, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV32-NEXT:    vor.vv v12, v16, v12, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 24, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a5, v0.t
+; RV32-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v14, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v12, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v12, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v12, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT:    vand.vx v10, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v10, v10, 24, v0.t
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v12, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v12, v12, 8, v0.t
+; RV64-NEXT:    vor.vv v10, v10, v12, v0.t
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v12, v8, a2, v0.t
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v14, v8, a3, v0.t
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v14, v14, a4, v0.t
+; RV64-NEXT:    vor.vv v12, v12, v14, v0.t
+; RV64-NEXT:    vor.vv v10, v12, v10, v0.t
+; RV64-NEXT:    vsrl.vx v12, v8, a2, v0.t
+; RV64-NEXT:    vsrl.vx v14, v8, a4, v0.t
+; RV64-NEXT:    vand.vx v14, v14, a3, v0.t
+; RV64-NEXT:    vor.vv v12, v14, v12, v0.t
+; RV64-NEXT:    vsrl.vi v14, v8, 24, v0.t
+; RV64-NEXT:    vand.vx v14, v14, a1, v0.t
+; RV64-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v14, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI38_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI38_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI38_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI38_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI38_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI38_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i64> @llvm.vp.bitreverse.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vp_bitreverse_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv2i64_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vsll.vx v10, v8, a1
+; RV32-NEXT:    lui a2, 16
+; RV32-NEXT:    addi a2, a2, -256
+; RV32-NEXT:    vand.vx v12, v8, a2
+; RV32-NEXT:    li a3, 40
+; RV32-NEXT:    vsll.vx v12, v12, a3
+; RV32-NEXT:    vor.vv v10, v10, v12
+; RV32-NEXT:    lui a4, 4080
+; RV32-NEXT:    vand.vx v12, v8, a4
+; RV32-NEXT:    vsll.vi v12, v12, 24
+; RV32-NEXT:    addi a5, sp, 8
+; RV32-NEXT:    vsetvli a6, zero, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v14, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v16, v8, v14
+; RV32-NEXT:    vsll.vi v16, v16, 8
+; RV32-NEXT:    vor.vv v12, v12, v16
+; RV32-NEXT:    vor.vv v10, v10, v12
+; RV32-NEXT:    vsrl.vx v12, v8, a1
+; RV32-NEXT:    vsrl.vx v16, v8, a3
+; RV32-NEXT:    vand.vx v16, v16, a2
+; RV32-NEXT:    vor.vv v12, v16, v12
+; RV32-NEXT:    vsrl.vi v16, v8, 24
+; RV32-NEXT:    vand.vx v16, v16, a4
+; RV32-NEXT:    vsrl.vi v8, v8, 8
+; RV32-NEXT:    vand.vv v8, v8, v14
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 4
+; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v12
+; RV32-NEXT:    vand.vv v8, v8, v12
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 2
+; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v12
+; RV32-NEXT:    vand.vv v8, v8, v12
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 1
+; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v12
+; RV32-NEXT:    vand.vv v8, v8, v12
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv2i64_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT:    vand.vx v10, v8, a1
+; RV64-NEXT:    vsll.vi v10, v10, 24
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v12, v8, a0
+; RV64-NEXT:    vsll.vi v12, v12, 8
+; RV64-NEXT:    vor.vv v10, v10, v12
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v12, v8, a2
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v14, v8, a3
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v14, v14, a4
+; RV64-NEXT:    vor.vv v12, v12, v14
+; RV64-NEXT:    vor.vv v10, v12, v10
+; RV64-NEXT:    vsrl.vx v12, v8, a2
+; RV64-NEXT:    vsrl.vx v14, v8, a4
+; RV64-NEXT:    vand.vx v14, v14, a3
+; RV64-NEXT:    vor.vv v12, v14, v12
+; RV64-NEXT:    vsrl.vi v14, v8, 24
+; RV64-NEXT:    vand.vx v14, v14, a1
+; RV64-NEXT:    vsrl.vi v8, v8, 8
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v8, v14
+; RV64-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI39_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 4
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI39_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI39_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 2
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI39_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI39_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 1
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x i64> @llvm.vp.bitreverse.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i64> %v
+}
+
+declare <vscale x 4 x i64> @llvm.vp.bitreverse.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i64> @vp_bitreverse_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    li a2, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vsll.vx v12, v8, a2, v0.t
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a3, a1, -256
+; RV32-NEXT:    vand.vx v16, v8, a3, v0.t
+; RV32-NEXT:    li a4, 40
+; RV32-NEXT:    vsll.vx v16, v16, a4, v0.t
+; RV32-NEXT:    vor.vv v16, v12, v16, v0.t
+; RV32-NEXT:    lui a5, 4080
+; RV32-NEXT:    vand.vx v12, v8, a5, v0.t
+; RV32-NEXT:    vsll.vi v20, v12, 24, v0.t
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vsetvli a6, zero, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v24, v8, v12, v0.t
+; RV32-NEXT:    vsll.vi v24, v24, 8, v0.t
+; RV32-NEXT:    vor.vv v20, v20, v24, v0.t
+; RV32-NEXT:    vor.vv v16, v16, v20, v0.t
+; RV32-NEXT:    vsrl.vx v20, v8, a2, v0.t
+; RV32-NEXT:    vsrl.vx v24, v8, a4, v0.t
+; RV32-NEXT:    vand.vx v24, v24, a3, v0.t
+; RV32-NEXT:    vor.vv v20, v24, v20, v0.t
+; RV32-NEXT:    vsrl.vi v24, v8, 24, v0.t
+; RV32-NEXT:    vand.vx v24, v24, a5, v0.t
+; RV32-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v24, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v20, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 4, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v16, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 2, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v16, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 1, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v16, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT:    vand.vx v12, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v12, v12, 24, v0.t
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v16, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 8, v0.t
+; RV64-NEXT:    vor.vv v12, v12, v16, v0.t
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v16, v8, a2, v0.t
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v20, v8, a3, v0.t
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v20, v20, a4, v0.t
+; RV64-NEXT:    vor.vv v16, v16, v20, v0.t
+; RV64-NEXT:    vor.vv v12, v16, v12, v0.t
+; RV64-NEXT:    vsrl.vx v16, v8, a2, v0.t
+; RV64-NEXT:    vsrl.vx v20, v8, a4, v0.t
+; RV64-NEXT:    vand.vx v20, v20, a3, v0.t
+; RV64-NEXT:    vor.vv v16, v20, v16, v0.t
+; RV64-NEXT:    vsrl.vi v20, v8, 24, v0.t
+; RV64-NEXT:    vand.vx v20, v20, a1, v0.t
+; RV64-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v20, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI40_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI40_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 4, v0.t
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI40_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI40_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 2, v0.t
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI40_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI40_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 4 x i64> @llvm.vp.bitreverse.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vp_bitreverse_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv4i64_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vsll.vx v12, v8, a1
+; RV32-NEXT:    lui a2, 16
+; RV32-NEXT:    addi a2, a2, -256
+; RV32-NEXT:    vand.vx v16, v8, a2
+; RV32-NEXT:    li a3, 40
+; RV32-NEXT:    vsll.vx v16, v16, a3
+; RV32-NEXT:    vor.vv v12, v12, v16
+; RV32-NEXT:    lui a4, 4080
+; RV32-NEXT:    vand.vx v16, v8, a4
+; RV32-NEXT:    vsll.vi v16, v16, 24
+; RV32-NEXT:    addi a5, sp, 8
+; RV32-NEXT:    vsetvli a6, zero, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v20, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v24, v8, v20
+; RV32-NEXT:    vsll.vi v24, v24, 8
+; RV32-NEXT:    vor.vv v16, v16, v24
+; RV32-NEXT:    vor.vv v12, v12, v16
+; RV32-NEXT:    vsrl.vx v16, v8, a1
+; RV32-NEXT:    vsrl.vx v24, v8, a3
+; RV32-NEXT:    vand.vx v24, v24, a2
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    vsrl.vi v24, v8, 24
+; RV32-NEXT:    vand.vx v24, v24, a4
+; RV32-NEXT:    vsrl.vi v8, v8, 8
+; RV32-NEXT:    vand.vv v8, v8, v20
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 4
+; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v16
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 2
+; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v16
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 1
+; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v16
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv4i64_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT:    vand.vx v12, v8, a1
+; RV64-NEXT:    vsll.vi v12, v12, 24
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v16, v8, a0
+; RV64-NEXT:    vsll.vi v16, v16, 8
+; RV64-NEXT:    vor.vv v12, v12, v16
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v16, v8, a2
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v20, v8, a3
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v20, v20, a4
+; RV64-NEXT:    vor.vv v16, v16, v20
+; RV64-NEXT:    vor.vv v12, v16, v12
+; RV64-NEXT:    vsrl.vx v16, v8, a2
+; RV64-NEXT:    vsrl.vx v20, v8, a4
+; RV64-NEXT:    vand.vx v20, v20, a3
+; RV64-NEXT:    vor.vv v16, v20, v16
+; RV64-NEXT:    vsrl.vi v20, v8, 24
+; RV64-NEXT:    vand.vx v20, v20, a1
+; RV64-NEXT:    vsrl.vi v8, v8, 8
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v8, v20
+; RV64-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI41_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 4
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI41_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI41_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 2
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI41_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI41_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 1
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x i64> @llvm.vp.bitreverse.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i64> %v
+}
+
+declare <vscale x 7 x i64> @llvm.vp.bitreverse.nxv7i64(<vscale x 7 x i64>, <vscale x 7 x i1>, i32)
+
+define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv7i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    li a2, 24
+; RV32-NEXT:    mul a1, a1, a2
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    li a2, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vsll.vx v16, v8, a2, v0.t
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a3, a1, -256
+; RV32-NEXT:    vand.vx v24, v8, a3, v0.t
+; RV32-NEXT:    li a4, 40
+; RV32-NEXT:    vsll.vx v24, v24, a4, v0.t
+; RV32-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    lui a5, 4080
+; RV32-NEXT:    vand.vx v16, v8, a5, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 24, v0.t
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 4
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vsetvli a6, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a1), zero
+; RV32-NEXT:    csrr a6, vlenb
+; RV32-NEXT:    slli a6, a6, 3
+; RV32-NEXT:    add a6, sp, a6
+; RV32-NEXT:    addi a6, a6, 16
+; RV32-NEXT:    vs8r.v v16, (a6) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v8, v16, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT:    csrr a6, vlenb
+; RV32-NEXT:    slli a6, a6, 4
+; RV32-NEXT:    add a6, sp, a6
+; RV32-NEXT:    addi a6, a6, 16
+; RV32-NEXT:    vl8r.v v24, (a6) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v16, v24, v16, v0.t
+; RV32-NEXT:    addi a6, sp, 16
+; RV32-NEXT:    vl8r.v v24, (a6) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v16, v24, v16, v0.t
+; RV32-NEXT:    csrr a6, vlenb
+; RV32-NEXT:    slli a6, a6, 4
+; RV32-NEXT:    add a6, sp, a6
+; RV32-NEXT:    addi a6, a6, 16
+; RV32-NEXT:    vs8r.v v16, (a6) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vx v24, v8, a2, v0.t
+; RV32-NEXT:    vsrl.vx v16, v8, a4, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV32-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vi v24, v8, 24, v0.t
+; RV32-NEXT:    vand.vx v24, v24, a5, v0.t
+; RV32-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    addi a2, a2, 16
+; RV32-NEXT:    vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT:    vand.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v24, v0.t
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    slli a2, a2, 4
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    addi a2, a2, 16
+; RV32-NEXT:    vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v24, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v24, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v24, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a1, 24
+; RV32-NEXT:    mul a0, a0, a1
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv7i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vand.vx v16, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 24, v0.t
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v24, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v24, v24, 8, v0.t
+; RV64-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v24, v8, a2, v0.t
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vand.vx v16, v8, a3, v0.t
+; RV64-NEXT:    vsll.vx v16, v16, a4, v0.t
+; RV64-NEXT:    vor.vv v16, v24, v16, v0.t
+; RV64-NEXT:    addi a5, sp, 16
+; RV64-NEXT:    vl8r.v v24, (a5) # Unknown-size Folded Reload
+; RV64-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV64-NEXT:    addi a5, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV64-NEXT:    vsrl.vx v24, v8, a2, v0.t
+; RV64-NEXT:    vsrl.vx v16, v8, a4, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV64-NEXT:    vor.vv v24, v16, v24, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 24, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a1, v0.t
+; RV64-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI42_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI42_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v24, v0.t
+; RV64-NEXT:    addi a1, sp, 16
+; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vor.vv v16, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v8, v16, 4, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI42_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI42_1)(a0)
+; RV64-NEXT:    vsll.vi v16, v16, 4, v0.t
+; RV64-NEXT:    vor.vv v16, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v8, v16, 2, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI42_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI42_2)(a0)
+; RV64-NEXT:    vsll.vi v16, v16, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 3
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %v = call <vscale x 7 x i64> @llvm.vp.bitreverse.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 %evl)
+  ret <vscale x 7 x i64> %v
+}
+
+define <vscale x 7 x i64> @vp_bitreverse_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv7i64_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vsll.vx v16, v8, a1
+; RV32-NEXT:    lui a2, 16
+; RV32-NEXT:    addi a2, a2, -256
+; RV32-NEXT:    vand.vx v24, v8, a2
+; RV32-NEXT:    li a3, 40
+; RV32-NEXT:    vsll.vx v24, v24, a3
+; RV32-NEXT:    vor.vv v16, v16, v24
+; RV32-NEXT:    addi a4, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT:    lui a4, 4080
+; RV32-NEXT:    vand.vx v16, v8, a4
+; RV32-NEXT:    vsll.vi v0, v16, 24
+; RV32-NEXT:    addi a5, sp, 8
+; RV32-NEXT:    vsetvli a6, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v24, v8, v16
+; RV32-NEXT:    vsll.vi v24, v24, 8
+; RV32-NEXT:    vor.vv v24, v0, v24
+; RV32-NEXT:    addi a6, sp, 16
+; RV32-NEXT:    vl8r.v v0, (a6) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v24, v0, v24
+; RV32-NEXT:    addi a6, sp, 16
+; RV32-NEXT:    vs8r.v v24, (a6) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vx v0, v8, a3
+; RV32-NEXT:    vand.vx v0, v0, a2
+; RV32-NEXT:    vsrl.vx v24, v8, a1
+; RV32-NEXT:    vor.vv v24, v0, v24
+; RV32-NEXT:    vsrl.vi v0, v8, 8
+; RV32-NEXT:    vand.vv v16, v0, v16
+; RV32-NEXT:    vsrl.vi v8, v8, 24
+; RV32-NEXT:    vand.vx v8, v8, a4
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 4
+; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 2
+; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 1
+; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv7i64_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vand.vx v16, v8, a1
+; RV64-NEXT:    vsll.vi v16, v16, 24
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v24, v8, a0
+; RV64-NEXT:    vsll.vi v24, v24, 8
+; RV64-NEXT:    vor.vv v16, v16, v24
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v24, v8, a2
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v0, v8, a3
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v0, v0, a4
+; RV64-NEXT:    vor.vv v24, v24, v0
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsrl.vx v24, v8, a2
+; RV64-NEXT:    vsrl.vx v0, v8, a4
+; RV64-NEXT:    vand.vx v0, v0, a3
+; RV64-NEXT:    vor.vv v24, v0, v24
+; RV64-NEXT:    vsrl.vi v0, v8, 24
+; RV64-NEXT:    vand.vx v0, v0, a1
+; RV64-NEXT:    vsrl.vi v8, v8, 8
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v8, v0
+; RV64-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI43_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v24
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 4
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI43_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI43_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 2
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI43_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI43_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 1
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x i64> @llvm.vp.bitreverse.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 %evl)
+  ret <vscale x 7 x i64> %v
+}
+
+declare <vscale x 8 x i64> @llvm.vp.bitreverse.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    li a2, 24
+; RV32-NEXT:    mul a1, a1, a2
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    li a2, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vsll.vx v16, v8, a2, v0.t
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a3, a1, -256
+; RV32-NEXT:    vand.vx v24, v8, a3, v0.t
+; RV32-NEXT:    li a4, 40
+; RV32-NEXT:    vsll.vx v24, v24, a4, v0.t
+; RV32-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    lui a5, 4080
+; RV32-NEXT:    vand.vx v16, v8, a5, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 24, v0.t
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 4
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vsetvli a6, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a1), zero
+; RV32-NEXT:    csrr a6, vlenb
+; RV32-NEXT:    slli a6, a6, 3
+; RV32-NEXT:    add a6, sp, a6
+; RV32-NEXT:    addi a6, a6, 16
+; RV32-NEXT:    vs8r.v v16, (a6) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v8, v16, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT:    csrr a6, vlenb
+; RV32-NEXT:    slli a6, a6, 4
+; RV32-NEXT:    add a6, sp, a6
+; RV32-NEXT:    addi a6, a6, 16
+; RV32-NEXT:    vl8r.v v24, (a6) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v16, v24, v16, v0.t
+; RV32-NEXT:    addi a6, sp, 16
+; RV32-NEXT:    vl8r.v v24, (a6) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v16, v24, v16, v0.t
+; RV32-NEXT:    csrr a6, vlenb
+; RV32-NEXT:    slli a6, a6, 4
+; RV32-NEXT:    add a6, sp, a6
+; RV32-NEXT:    addi a6, a6, 16
+; RV32-NEXT:    vs8r.v v16, (a6) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vx v24, v8, a2, v0.t
+; RV32-NEXT:    vsrl.vx v16, v8, a4, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV32-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vi v24, v8, 24, v0.t
+; RV32-NEXT:    vand.vx v24, v24, a5, v0.t
+; RV32-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    addi a2, a2, 16
+; RV32-NEXT:    vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT:    vand.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v24, v0.t
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    slli a2, a2, 4
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    addi a2, a2, 16
+; RV32-NEXT:    vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v24, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v24, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a1), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v24, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a1, 24
+; RV32-NEXT:    mul a0, a0, a1
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vand.vx v16, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 24, v0.t
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v24, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v24, v24, 8, v0.t
+; RV64-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v24, v8, a2, v0.t
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vand.vx v16, v8, a3, v0.t
+; RV64-NEXT:    vsll.vx v16, v16, a4, v0.t
+; RV64-NEXT:    vor.vv v16, v24, v16, v0.t
+; RV64-NEXT:    addi a5, sp, 16
+; RV64-NEXT:    vl8r.v v24, (a5) # Unknown-size Folded Reload
+; RV64-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV64-NEXT:    addi a5, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV64-NEXT:    vsrl.vx v24, v8, a2, v0.t
+; RV64-NEXT:    vsrl.vx v16, v8, a4, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV64-NEXT:    vor.vv v24, v16, v24, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 24, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a1, v0.t
+; RV64-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI44_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v24, v0.t
+; RV64-NEXT:    addi a1, sp, 16
+; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vor.vv v16, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v8, v16, 4, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI44_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI44_1)(a0)
+; RV64-NEXT:    vsll.vi v16, v16, 4, v0.t
+; RV64-NEXT:    vor.vv v16, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v8, v16, 2, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI44_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI44_2)(a0)
+; RV64-NEXT:    vsll.vi v16, v16, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 3
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %v = call <vscale x 8 x i64> @llvm.vp.bitreverse.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vp_bitreverse_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv8i64_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vsll.vx v16, v8, a1
+; RV32-NEXT:    lui a2, 16
+; RV32-NEXT:    addi a2, a2, -256
+; RV32-NEXT:    vand.vx v24, v8, a2
+; RV32-NEXT:    li a3, 40
+; RV32-NEXT:    vsll.vx v24, v24, a3
+; RV32-NEXT:    vor.vv v16, v16, v24
+; RV32-NEXT:    addi a4, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT:    lui a4, 4080
+; RV32-NEXT:    vand.vx v16, v8, a4
+; RV32-NEXT:    vsll.vi v0, v16, 24
+; RV32-NEXT:    addi a5, sp, 8
+; RV32-NEXT:    vsetvli a6, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v24, v8, v16
+; RV32-NEXT:    vsll.vi v24, v24, 8
+; RV32-NEXT:    vor.vv v24, v0, v24
+; RV32-NEXT:    addi a6, sp, 16
+; RV32-NEXT:    vl8r.v v0, (a6) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v24, v0, v24
+; RV32-NEXT:    addi a6, sp, 16
+; RV32-NEXT:    vs8r.v v24, (a6) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vx v0, v8, a3
+; RV32-NEXT:    vand.vx v0, v0, a2
+; RV32-NEXT:    vsrl.vx v24, v8, a1
+; RV32-NEXT:    vor.vv v24, v0, v24
+; RV32-NEXT:    vsrl.vi v0, v8, 8
+; RV32-NEXT:    vand.vv v16, v0, v16
+; RV32-NEXT:    vsrl.vi v8, v8, 24
+; RV32-NEXT:    vand.vx v8, v8, a4
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 4
+; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 2
+; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 1
+; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a5), zero
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv8i64_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vand.vx v16, v8, a1
+; RV64-NEXT:    vsll.vi v16, v16, 24
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v24, v8, a0
+; RV64-NEXT:    vsll.vi v24, v24, 8
+; RV64-NEXT:    vor.vv v16, v16, v24
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v24, v8, a2
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v0, v8, a3
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v0, v0, a4
+; RV64-NEXT:    vor.vv v24, v24, v0
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsrl.vx v24, v8, a2
+; RV64-NEXT:    vsrl.vx v0, v8, a4
+; RV64-NEXT:    vand.vx v0, v0, a3
+; RV64-NEXT:    vor.vv v24, v0, v24
+; RV64-NEXT:    vsrl.vi v0, v8, 24
+; RV64-NEXT:    vand.vx v0, v0, a1
+; RV64-NEXT:    vsrl.vi v8, v8, 8
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v8, v0
+; RV64-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI45_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v24
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 4
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI45_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI45_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 2
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI45_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI45_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 1
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x i64> @llvm.vp.bitreverse.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+; Test splitting. Use i16 version for easier check.
+declare <vscale x 64 x i16> @llvm.vp.bitreverse.nxv64i16(<vscale x 64 x i16>, <vscale x 64 x i1>, i32)
+
+define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv64i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 4
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV32-NEXT:    vmv1r.v v24, v0
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    srli a2, a1, 1
+; RV32-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vx v0, v0, a2
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    sub a2, a0, a1
+; RV32-NEXT:    sltu a3, a0, a2
+; RV32-NEXT:    addi a3, a3, -1
+; RV32-NEXT:    and a2, a3, a2
+; RV32-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
+; RV32-NEXT:    vsrl.vi v8, v16, 8, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT:    vor.vv v16, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v8, v16, 4, v0.t
+; RV32-NEXT:    lui a2, 1
+; RV32-NEXT:    addi a2, a2, -241
+; RV32-NEXT:    vand.vx v8, v8, a2, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a2, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 4, v0.t
+; RV32-NEXT:    vor.vv v16, v8, v16, v0.t
+; RV32-NEXT:    vsrl.vi v8, v16, 2, v0.t
+; RV32-NEXT:    lui a3, 3
+; RV32-NEXT:    addi a3, a3, 819
+; RV32-NEXT:    vand.vx v8, v8, a3, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 2, v0.t
+; RV32-NEXT:    vor.vv v16, v8, v16, v0.t
+; RV32-NEXT:    vsrl.vi v8, v16, 1, v0.t
+; RV32-NEXT:    lui a4, 5
+; RV32-NEXT:    addi a4, a4, 1365
+; RV32-NEXT:    vand.vx v8, v8, a4, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a4, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    addi a5, sp, 16
+; RV32-NEXT:    vs8r.v v8, (a5) # Unknown-size Folded Spill
+; RV32-NEXT:    bltu a0, a1, .LBB46_2
+; RV32-NEXT:  # %bb.1:
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:  .LBB46_2:
+; RV32-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV32-NEXT:    vmv1r.v v0, v24
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vsrl.vi v16, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a2, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a2, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a3, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a4, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a4, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv64i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 4
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT:    vmv1r.v v24, v0
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 16
+; RV64-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    srli a2, a1, 1
+; RV64-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vx v0, v0, a2
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    sub a2, a0, a1
+; RV64-NEXT:    sltu a3, a0, a2
+; RV64-NEXT:    addi a3, a3, -1
+; RV64-NEXT:    and a2, a3, a2
+; RV64-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
+; RV64-NEXT:    vsrl.vi v8, v16, 8, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 8, v0.t
+; RV64-NEXT:    vor.vv v16, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v8, v16, 4, v0.t
+; RV64-NEXT:    lui a2, 1
+; RV64-NEXT:    addiw a2, a2, -241
+; RV64-NEXT:    vand.vx v8, v8, a2, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a2, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 4, v0.t
+; RV64-NEXT:    vor.vv v16, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v8, v16, 2, v0.t
+; RV64-NEXT:    lui a3, 3
+; RV64-NEXT:    addiw a3, a3, 819
+; RV64-NEXT:    vand.vx v8, v8, a3, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 2, v0.t
+; RV64-NEXT:    vor.vv v16, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v8, v16, 1, v0.t
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 1365
+; RV64-NEXT:    vand.vx v8, v8, a4, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a4, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    addi a5, sp, 16
+; RV64-NEXT:    vs8r.v v8, (a5) # Unknown-size Folded Spill
+; RV64-NEXT:    bltu a0, a1, .LBB46_2
+; RV64-NEXT:  # %bb.1:
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB46_2:
+; RV64-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV64-NEXT:    vmv1r.v v0, v24
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 3
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    addi a0, a0, 16
+; RV64-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT:    vsrl.vi v16, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a2, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a2, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a3, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a4, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a4, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    addi a0, sp, 16
+; RV64-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 4
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %v = call <vscale x 64 x i16> @llvm.vp.bitreverse.nxv64i16(<vscale x 64 x i16> %va, <vscale x 64 x i1> %m, i32 %evl)
+  ret <vscale x 64 x i16> %v
+}
+
+define <vscale x 64 x i16> @vp_bitreverse_nxv64i16_unmasked(<vscale x 64 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_nxv64i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    sub a2, a0, a1
+; RV32-NEXT:    sltu a3, a0, a2
+; RV32-NEXT:    addi a3, a3, -1
+; RV32-NEXT:    and a2, a3, a2
+; RV32-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
+; RV32-NEXT:    vsrl.vi v24, v16, 8
+; RV32-NEXT:    vsll.vi v16, v16, 8
+; RV32-NEXT:    vor.vv v16, v16, v24
+; RV32-NEXT:    vsrl.vi v24, v16, 4
+; RV32-NEXT:    lui a2, 1
+; RV32-NEXT:    addi a2, a2, -241
+; RV32-NEXT:    vand.vx v24, v24, a2
+; RV32-NEXT:    vand.vx v16, v16, a2
+; RV32-NEXT:    vsll.vi v16, v16, 4
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    vsrl.vi v24, v16, 2
+; RV32-NEXT:    lui a3, 3
+; RV32-NEXT:    addi a3, a3, 819
+; RV32-NEXT:    vand.vx v24, v24, a3
+; RV32-NEXT:    vand.vx v16, v16, a3
+; RV32-NEXT:    vsll.vi v16, v16, 2
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    vsrl.vi v24, v16, 1
+; RV32-NEXT:    lui a4, 5
+; RV32-NEXT:    addi a4, a4, 1365
+; RV32-NEXT:    vand.vx v24, v24, a4
+; RV32-NEXT:    vand.vx v16, v16, a4
+; RV32-NEXT:    vadd.vv v16, v16, v16
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    bltu a0, a1, .LBB47_2
+; RV32-NEXT:  # %bb.1:
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:  .LBB47_2:
+; RV32-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV32-NEXT:    vsrl.vi v24, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    vsrl.vi v24, v8, 4
+; RV32-NEXT:    vand.vx v24, v24, a2
+; RV32-NEXT:    vand.vx v8, v8, a2
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    vsrl.vi v24, v8, 2
+; RV32-NEXT:    vand.vx v24, v24, a3
+; RV32-NEXT:    vand.vx v8, v8, a3
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    vsrl.vi v24, v8, 1
+; RV32-NEXT:    vand.vx v24, v24, a4
+; RV32-NEXT:    vand.vx v8, v8, a4
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_nxv64i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    sub a2, a0, a1
+; RV64-NEXT:    sltu a3, a0, a2
+; RV64-NEXT:    addi a3, a3, -1
+; RV64-NEXT:    and a2, a3, a2
+; RV64-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
+; RV64-NEXT:    vsrl.vi v24, v16, 8
+; RV64-NEXT:    vsll.vi v16, v16, 8
+; RV64-NEXT:    vor.vv v16, v16, v24
+; RV64-NEXT:    vsrl.vi v24, v16, 4
+; RV64-NEXT:    lui a2, 1
+; RV64-NEXT:    addiw a2, a2, -241
+; RV64-NEXT:    vand.vx v24, v24, a2
+; RV64-NEXT:    vand.vx v16, v16, a2
+; RV64-NEXT:    vsll.vi v16, v16, 4
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsrl.vi v24, v16, 2
+; RV64-NEXT:    lui a3, 3
+; RV64-NEXT:    addiw a3, a3, 819
+; RV64-NEXT:    vand.vx v24, v24, a3
+; RV64-NEXT:    vand.vx v16, v16, a3
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsrl.vi v24, v16, 1
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 1365
+; RV64-NEXT:    vand.vx v24, v24, a4
+; RV64-NEXT:    vand.vx v16, v16, a4
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    bltu a0, a1, .LBB47_2
+; RV64-NEXT:  # %bb.1:
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB47_2:
+; RV64-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV64-NEXT:    vsrl.vi v24, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v24
+; RV64-NEXT:    vsrl.vi v24, v8, 4
+; RV64-NEXT:    vand.vx v24, v24, a2
+; RV64-NEXT:    vand.vx v8, v8, a2
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v24, v8
+; RV64-NEXT:    vsrl.vi v24, v8, 2
+; RV64-NEXT:    vand.vx v24, v24, a3
+; RV64-NEXT:    vand.vx v8, v8, a3
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v24, v8
+; RV64-NEXT:    vsrl.vi v24, v8, 1
+; RV64-NEXT:    vand.vx v24, v24, a4
+; RV64-NEXT:    vand.vx v8, v8, a4
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v24, v8
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+  %v = call <vscale x 64 x i16> @llvm.vp.bitreverse.nxv64i16(<vscale x 64 x i16> %va, <vscale x 64 x i1> %m, i32 %evl)
+  ret <vscale x 64 x i16> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
new file mode 100644
index 0000000000000..4c74ff4310a12
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
@@ -0,0 +1,3197 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <2 x i8> @llvm.vp.bitreverse.v2i8(<2 x i8>, <2 x i1>, i32)
+
+define <2 x i8> @vp_bitreverse_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15, v0.t
+; CHECK-NEXT:    vsll.vi v9, v9, 4, v0.t
+; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT:    vor.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 2, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %v = call <2 x i8> @llvm.vp.bitreverse.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl)
+  ret <2 x i8> %v
+}
+
+define <2 x i8> @vp_bitreverse_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_v2i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.bitreverse.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl)
+  ret <2 x i8> %v
+}
+
+declare <4 x i8> @llvm.vp.bitreverse.v4i8(<4 x i8>, <4 x i1>, i32)
+
+define <4 x i8> @vp_bitreverse_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15, v0.t
+; CHECK-NEXT:    vsll.vi v9, v9, 4, v0.t
+; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT:    vor.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 2, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %v = call <4 x i8> @llvm.vp.bitreverse.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl)
+  ret <4 x i8> %v
+}
+
+define <4 x i8> @vp_bitreverse_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_v4i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.bitreverse.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl)
+  ret <4 x i8> %v
+}
+
+declare <8 x i8> @llvm.vp.bitreverse.v8i8(<8 x i8>, <8 x i1>, i32)
+
+define <8 x i8> @vp_bitreverse_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15, v0.t
+; CHECK-NEXT:    vsll.vi v9, v9, 4, v0.t
+; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT:    vor.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 2, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %v = call <8 x i8> @llvm.vp.bitreverse.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl)
+  ret <8 x i8> %v
+}
+
+define <8 x i8> @vp_bitreverse_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_v8i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.bitreverse.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl)
+  ret <8 x i8> %v
+}
+
+declare <16 x i8> @llvm.vp.bitreverse.v16i8(<16 x i8>, <16 x i1>, i32)
+
+define <16 x i8> @vp_bitreverse_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15, v0.t
+; CHECK-NEXT:    vsll.vi v9, v9, 4, v0.t
+; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT:    vor.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 2, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
+; CHECK-NEXT:    vor.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+  %v = call <16 x i8> @llvm.vp.bitreverse.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl)
+  ret <16 x i8> %v
+}
+
+define <16 x i8> @vp_bitreverse_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_bitreverse_v16i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    li a0, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    li a0, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.bitreverse.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl)
+  ret <16 x i8> %v
+}
+
+declare <2 x i16> @llvm.vp.bitreverse.v2i16(<2 x i16>, <2 x i1>, i32)
+
+define <2 x i16> @vp_bitreverse_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <2 x i16> @llvm.vp.bitreverse.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl)
+  ret <2 x i16> %v
+}
+
+define <2 x i16> @vp_bitreverse_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v2i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v2i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.bitreverse.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl)
+  ret <2 x i16> %v
+}
+
+declare <4 x i16> @llvm.vp.bitreverse.v4i16(<4 x i16>, <4 x i1>, i32)
+
+define <4 x i16> @vp_bitreverse_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <4 x i16> @llvm.vp.bitreverse.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl)
+  ret <4 x i16> %v
+}
+
+define <4 x i16> @vp_bitreverse_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v4i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v4i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.bitreverse.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl)
+  ret <4 x i16> %v
+}
+
+declare <8 x i16> @llvm.vp.bitreverse.v8i16(<8 x i16>, <8 x i1>, i32)
+
+define <8 x i16> @vp_bitreverse_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <8 x i16> @llvm.vp.bitreverse.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl)
+  ret <8 x i16> %v
+}
+
+define <8 x i16> @vp_bitreverse_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v8i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v8i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.bitreverse.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl)
+  ret <8 x i16> %v
+}
+
+declare <16 x i16> @llvm.vp.bitreverse.v16i16(<16 x i16>, <16 x i1>, i32)
+
+define <16 x i16> @vp_bitreverse_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v16i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; RV32-NEXT:    vsrl.vi v10, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v16i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; RV64-NEXT:    vsrl.vi v10, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <16 x i16> @llvm.vp.bitreverse.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl)
+  ret <16 x i16> %v
+}
+
+define <16 x i16> @vp_bitreverse_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v16i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; RV32-NEXT:    vsrl.vi v10, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vsrl.vi v10, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v16i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; RV64-NEXT:    vsrl.vi v10, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vsrl.vi v10, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    ret
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.bitreverse.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl)
+  ret <16 x i16> %v
+}
+
+declare <2 x i32> @llvm.vp.bitreverse.v2i32(<2 x i32>, <2 x i1>, i32)
+
+define <2 x i32> @vp_bitreverse_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV32-NEXT:    vand.vx v10, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v10, v10, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV64-NEXT:    vand.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v10, v10, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <2 x i32> @llvm.vp.bitreverse.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl)
+  ret <2 x i32> %v
+}
+
+define <2 x i32> @vp_bitreverse_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v2i32_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vsrl.vi v10, v8, 24
+; RV32-NEXT:    vor.vv v9, v9, v10
+; RV32-NEXT:    vand.vx v10, v8, a0
+; RV32-NEXT:    vsll.vi v10, v10, 8
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v2i32_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vsrl.vi v10, v8, 24
+; RV64-NEXT:    vor.vv v9, v9, v10
+; RV64-NEXT:    vand.vx v10, v8, a0
+; RV64-NEXT:    vsll.vi v10, v10, 8
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.bitreverse.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl)
+  ret <2 x i32> %v
+}
+
+declare <4 x i32> @llvm.vp.bitreverse.v4i32(<4 x i32>, <4 x i1>, i32)
+
+define <4 x i32> @vp_bitreverse_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV32-NEXT:    vand.vx v10, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v10, v10, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8, v0.t
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV64-NEXT:    vand.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v10, v10, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v9, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <4 x i32> @llvm.vp.bitreverse.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl)
+  ret <4 x i32> %v
+}
+
+define <4 x i32> @vp_bitreverse_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v4i32_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vsrl.vi v10, v8, 24
+; RV32-NEXT:    vor.vv v9, v9, v10
+; RV32-NEXT:    vand.vx v10, v8, a0
+; RV32-NEXT:    vsll.vi v10, v10, 8
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v4i32_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vsrl.vi v10, v8, 24
+; RV64-NEXT:    vor.vv v9, v9, v10
+; RV64-NEXT:    vand.vx v10, v8, a0
+; RV64-NEXT:    vsll.vi v10, v10, 8
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.bitreverse.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl)
+  ret <4 x i32> %v
+}
+
+declare <8 x i32> @llvm.vp.bitreverse.v8i32(<8 x i32>, <8 x i1>, i32)
+
+define <8 x i32> @vp_bitreverse_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; RV32-NEXT:    vsrl.vi v10, v8, 8, v0.t
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v10, v10, v12, v0.t
+; RV32-NEXT:    vand.vx v12, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v12, v12, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; RV64-NEXT:    vsrl.vi v10, v8, 8, v0.t
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v10, v10, v12, v0.t
+; RV64-NEXT:    vand.vx v12, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v12, v12, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <8 x i32> @llvm.vp.bitreverse.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl)
+  ret <8 x i32> %v
+}
+
+define <8 x i32> @vp_bitreverse_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v8i32_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; RV32-NEXT:    vsrl.vi v10, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vsrl.vi v12, v8, 24
+; RV32-NEXT:    vor.vv v10, v10, v12
+; RV32-NEXT:    vand.vx v12, v8, a0
+; RV32-NEXT:    vsll.vi v12, v12, 8
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vsrl.vi v10, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v8i32_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; RV64-NEXT:    vsrl.vi v10, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vsrl.vi v12, v8, 24
+; RV64-NEXT:    vor.vv v10, v10, v12
+; RV64-NEXT:    vand.vx v12, v8, a0
+; RV64-NEXT:    vsll.vi v12, v12, 8
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vsrl.vi v10, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.bitreverse.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl)
+  ret <8 x i32> %v
+}
+
+declare <16 x i32> @llvm.vp.bitreverse.v16i32(<16 x i32>, <16 x i1>, i32)
+
+define <16 x i32> @vp_bitreverse_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v16i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; RV32-NEXT:    vsrl.vi v12, v8, 8, v0.t
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v12, v12, v16, v0.t
+; RV32-NEXT:    vand.vx v16, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 4, v0.t
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 2, v0.t
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 1, v0.t
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v16i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; RV64-NEXT:    vsrl.vi v12, v8, 8, v0.t
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v12, v12, v16, v0.t
+; RV64-NEXT:    vand.vx v16, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 24, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 4, v0.t
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 2, v0.t
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 1, v0.t
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <16 x i32> @llvm.vp.bitreverse.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl)
+  ret <16 x i32> %v
+}
+
+define <16 x i32> @vp_bitreverse_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v16i32_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; RV32-NEXT:    vsrl.vi v12, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vsrl.vi v16, v8, 24
+; RV32-NEXT:    vor.vv v12, v12, v16
+; RV32-NEXT:    vand.vx v16, v8, a0
+; RV32-NEXT:    vsll.vi v16, v16, 8
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vsrl.vi v12, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v16i32_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; RV64-NEXT:    vsrl.vi v12, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vsrl.vi v16, v8, 24
+; RV64-NEXT:    vor.vv v12, v12, v16
+; RV64-NEXT:    vand.vx v16, v8, a0
+; RV64-NEXT:    vsll.vi v16, v16, 8
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vsrl.vi v12, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    ret
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.bitreverse.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl)
+  ret <16 x i32> %v
+}
+
+declare <2 x i64> @llvm.vp.bitreverse.v2i64(<2 x i64>, <2 x i1>, i32)
+
+define <2 x i64> @vp_bitreverse_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vmv1r.v v9, v0
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vsrl.vx v10, v8, a1, v0.t
+; RV32-NEXT:    li a2, 40
+; RV32-NEXT:    vsrl.vx v11, v8, a2, v0.t
+; RV32-NEXT:    lui a3, 16
+; RV32-NEXT:    addi a3, a3, -256
+; RV32-NEXT:    vand.vx v11, v11, a3, v0.t
+; RV32-NEXT:    vor.vv v10, v11, v10, v0.t
+; RV32-NEXT:    vsrl.vi v11, v8, 24, v0.t
+; RV32-NEXT:    lui a4, 4080
+; RV32-NEXT:    vand.vx v11, v11, a4, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 8, v0.t
+; RV32-NEXT:    li a5, 5
+; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT:    vmv.s.x v0, a5
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.i v13, 0
+; RV32-NEXT:    lui a5, 1044480
+; RV32-NEXT:    vmerge.vxm v13, v13, a5, v0
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    vand.vv v12, v12, v13, v0.t
+; RV32-NEXT:    vor.vv v11, v12, v11, v0.t
+; RV32-NEXT:    vor.vv v10, v11, v10, v0.t
+; RV32-NEXT:    vsll.vx v11, v8, a1, v0.t
+; RV32-NEXT:    vand.vx v12, v8, a3, v0.t
+; RV32-NEXT:    vsll.vx v12, v12, a2, v0.t
+; RV32-NEXT:    vor.vv v11, v11, v12, v0.t
+; RV32-NEXT:    vand.vx v12, v8, a4, v0.t
+; RV32-NEXT:    vsll.vi v12, v12, 24, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v13, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    vor.vv v8, v11, v8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v11, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v11, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v11, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v11, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v11, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v11, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v11, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v11, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v11, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT:    vand.vx v9, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v9, v9, 24, v0.t
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v10, v10, 8, v0.t
+; RV64-NEXT:    vor.vv v9, v9, v10, v0.t
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v10, v8, a2, v0.t
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v11, v8, a3, v0.t
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v11, v11, a4, v0.t
+; RV64-NEXT:    vor.vv v10, v10, v11, v0.t
+; RV64-NEXT:    vor.vv v9, v10, v9, v0.t
+; RV64-NEXT:    vsrl.vx v10, v8, a2, v0.t
+; RV64-NEXT:    vsrl.vx v11, v8, a4, v0.t
+; RV64-NEXT:    vand.vx v11, v11, a3, v0.t
+; RV64-NEXT:    vor.vv v10, v11, v10, v0.t
+; RV64-NEXT:    vsrl.vi v11, v8, 24, v0.t
+; RV64-NEXT:    vand.vx v11, v11, a1, v0.t
+; RV64-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v11, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI24_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI24_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v10, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI24_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI24_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 2, v0.t
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI24_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI24_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v9, v9, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v9, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <2 x i64> @llvm.vp.bitreverse.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl)
+  ret <2 x i64> %v
+}
+
+define <2 x i64> @vp_bitreverse_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v2i64_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vsrl.vx v9, v8, a1
+; RV32-NEXT:    li a2, 40
+; RV32-NEXT:    vsrl.vx v10, v8, a2
+; RV32-NEXT:    lui a3, 16
+; RV32-NEXT:    addi a3, a3, -256
+; RV32-NEXT:    vand.vx v10, v10, a3
+; RV32-NEXT:    vor.vv v9, v10, v9
+; RV32-NEXT:    vsrl.vi v10, v8, 8
+; RV32-NEXT:    li a4, 5
+; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT:    vmv.s.x v0, a4
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.i v11, 0
+; RV32-NEXT:    lui a4, 1044480
+; RV32-NEXT:    vmerge.vxm v11, v11, a4, v0
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v11
+; RV32-NEXT:    vsrl.vi v12, v8, 24
+; RV32-NEXT:    lui a4, 4080
+; RV32-NEXT:    vand.vx v12, v12, a4
+; RV32-NEXT:    vor.vv v10, v10, v12
+; RV32-NEXT:    vor.vv v9, v10, v9
+; RV32-NEXT:    vsll.vx v10, v8, a1
+; RV32-NEXT:    vand.vx v12, v8, a3
+; RV32-NEXT:    vsll.vx v12, v12, a2
+; RV32-NEXT:    vor.vv v10, v10, v12
+; RV32-NEXT:    vand.vx v12, v8, a4
+; RV32-NEXT:    vsll.vi v12, v12, 24
+; RV32-NEXT:    vand.vv v8, v8, v11
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v10, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v9, v9, v10
+; RV32-NEXT:    vand.vv v8, v8, v10
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v10, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v9, v9, v10
+; RV32-NEXT:    vand.vv v8, v8, v10
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v10, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    vand.vv v9, v9, v10
+; RV32-NEXT:    vand.vv v8, v8, v10
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v2i64_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT:    vand.vx v9, v8, a1
+; RV64-NEXT:    vsll.vi v9, v9, 24
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v10, v8, a0
+; RV64-NEXT:    vsll.vi v10, v10, 8
+; RV64-NEXT:    vor.vv v9, v9, v10
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v10, v8, a2
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v11, v8, a3
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v11, v11, a4
+; RV64-NEXT:    vor.vv v10, v10, v11
+; RV64-NEXT:    vor.vv v9, v10, v9
+; RV64-NEXT:    vsrl.vx v10, v8, a2
+; RV64-NEXT:    vsrl.vx v11, v8, a4
+; RV64-NEXT:    vand.vx v11, v11, a3
+; RV64-NEXT:    vor.vv v10, v11, v10
+; RV64-NEXT:    vsrl.vi v11, v8, 24
+; RV64-NEXT:    vand.vx v11, v11, a1
+; RV64-NEXT:    vsrl.vi v8, v8, 8
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v8, v11
+; RV64-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI25_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI25_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI25_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI25_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI25_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.bitreverse.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl)
+  ret <2 x i64> %v
+}
+
+declare <4 x i64> @llvm.vp.bitreverse.v4i64(<4 x i64>, <4 x i1>, i32)
+
+define <4 x i64> @vp_bitreverse_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vsrl.vx v12, v8, a1, v0.t
+; RV32-NEXT:    li a2, 40
+; RV32-NEXT:    vsrl.vx v14, v8, a2, v0.t
+; RV32-NEXT:    lui a3, 16
+; RV32-NEXT:    addi a3, a3, -256
+; RV32-NEXT:    vand.vx v14, v14, a3, v0.t
+; RV32-NEXT:    vor.vv v12, v14, v12, v0.t
+; RV32-NEXT:    vsrl.vi v14, v8, 24, v0.t
+; RV32-NEXT:    lui a4, 4080
+; RV32-NEXT:    vand.vx v14, v14, a4, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 8, v0.t
+; RV32-NEXT:    li a5, 85
+; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT:    vmv.s.x v0, a5
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.i v18, 0
+; RV32-NEXT:    lui a5, 1044480
+; RV32-NEXT:    vmerge.vxm v18, v18, a5, v0
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    vand.vv v16, v16, v18, v0.t
+; RV32-NEXT:    vor.vv v14, v16, v14, v0.t
+; RV32-NEXT:    vor.vv v12, v14, v12, v0.t
+; RV32-NEXT:    vsll.vx v14, v8, a1, v0.t
+; RV32-NEXT:    vand.vx v16, v8, a3, v0.t
+; RV32-NEXT:    vsll.vx v16, v16, a2, v0.t
+; RV32-NEXT:    vor.vv v14, v14, v16, v0.t
+; RV32-NEXT:    vand.vx v16, v8, a4, v0.t
+; RV32-NEXT:    vsll.vi v16, v16, 24, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v18, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vor.vv v8, v14, v8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 4, v0.t
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v14, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v14, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v14, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 2, v0.t
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v14, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v14, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v14, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    vsrl.vi v12, v8, 1, v0.t
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v14, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v14, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v14, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT:    vand.vx v10, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v10, v10, 24, v0.t
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v12, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v12, v12, 8, v0.t
+; RV64-NEXT:    vor.vv v10, v10, v12, v0.t
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v12, v8, a2, v0.t
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v14, v8, a3, v0.t
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v14, v14, a4, v0.t
+; RV64-NEXT:    vor.vv v12, v12, v14, v0.t
+; RV64-NEXT:    vor.vv v10, v12, v10, v0.t
+; RV64-NEXT:    vsrl.vx v12, v8, a2, v0.t
+; RV64-NEXT:    vsrl.vx v14, v8, a4, v0.t
+; RV64-NEXT:    vand.vx v14, v14, a3, v0.t
+; RV64-NEXT:    vor.vv v12, v14, v12, v0.t
+; RV64-NEXT:    vsrl.vi v14, v8, 24, v0.t
+; RV64-NEXT:    vand.vx v14, v14, a1, v0.t
+; RV64-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v14, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI26_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v12, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI26_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI26_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 2, v0.t
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI26_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI26_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v10, v10, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v10, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <4 x i64> @llvm.vp.bitreverse.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl)
+  ret <4 x i64> %v
+}
+
+define <4 x i64> @vp_bitreverse_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v4i64_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vsrl.vx v10, v8, a1
+; RV32-NEXT:    li a2, 40
+; RV32-NEXT:    vsrl.vx v12, v8, a2
+; RV32-NEXT:    lui a3, 16
+; RV32-NEXT:    addi a3, a3, -256
+; RV32-NEXT:    vand.vx v12, v12, a3
+; RV32-NEXT:    vor.vv v10, v12, v10
+; RV32-NEXT:    vsrl.vi v12, v8, 8
+; RV32-NEXT:    li a4, 85
+; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT:    vmv.s.x v0, a4
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.i v14, 0
+; RV32-NEXT:    lui a4, 1044480
+; RV32-NEXT:    vmerge.vxm v14, v14, a4, v0
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v14
+; RV32-NEXT:    vsrl.vi v16, v8, 24
+; RV32-NEXT:    lui a4, 4080
+; RV32-NEXT:    vand.vx v16, v16, a4
+; RV32-NEXT:    vor.vv v12, v12, v16
+; RV32-NEXT:    vor.vv v10, v12, v10
+; RV32-NEXT:    vsll.vx v12, v8, a1
+; RV32-NEXT:    vand.vx v16, v8, a3
+; RV32-NEXT:    vsll.vx v16, v16, a2
+; RV32-NEXT:    vor.vv v12, v12, v16
+; RV32-NEXT:    vand.vx v16, v8, a4
+; RV32-NEXT:    vsll.vi v16, v16, 24
+; RV32-NEXT:    vand.vv v8, v8, v14
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vsrl.vi v10, v8, 4
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v12, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v12
+; RV32-NEXT:    vand.vv v8, v8, v12
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 2
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v12, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v12
+; RV32-NEXT:    vand.vv v8, v8, v12
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 1
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.x v12, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    vand.vv v10, v10, v12
+; RV32-NEXT:    vand.vv v8, v8, v12
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v4i64_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT:    vand.vx v10, v8, a1
+; RV64-NEXT:    vsll.vi v10, v10, 24
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v12, v8, a0
+; RV64-NEXT:    vsll.vi v12, v12, 8
+; RV64-NEXT:    vor.vv v10, v10, v12
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v12, v8, a2
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v14, v8, a3
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v14, v14, a4
+; RV64-NEXT:    vor.vv v12, v12, v14
+; RV64-NEXT:    vor.vv v10, v12, v10
+; RV64-NEXT:    vsrl.vx v12, v8, a2
+; RV64-NEXT:    vsrl.vx v14, v8, a4
+; RV64-NEXT:    vand.vx v14, v14, a3
+; RV64-NEXT:    vor.vv v12, v14, v12
+; RV64-NEXT:    vsrl.vi v14, v8, 24
+; RV64-NEXT:    vand.vx v14, v14, a1
+; RV64-NEXT:    vsrl.vi v8, v8, 8
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v8, v14
+; RV64-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI27_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 4
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI27_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI27_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 2
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI27_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI27_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 1
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.bitreverse.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl)
+  ret <4 x i64> %v
+}
+
+declare <8 x i64> @llvm.vp.bitreverse.v8i64(<8 x i64>, <8 x i1>, i32)
+
+define <8 x i64> @vp_bitreverse_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vmv1r.v v12, v0
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT:    li a2, 40
+; RV32-NEXT:    vsrl.vx v20, v8, a2, v0.t
+; RV32-NEXT:    lui a3, 16
+; RV32-NEXT:    addi a3, a3, -256
+; RV32-NEXT:    vand.vx v20, v20, a3, v0.t
+; RV32-NEXT:    vor.vv v16, v20, v16, v0.t
+; RV32-NEXT:    vsrl.vi v20, v8, 24, v0.t
+; RV32-NEXT:    lui a4, 4080
+; RV32-NEXT:    vand.vx v24, v20, a4, v0.t
+; RV32-NEXT:    vsrl.vi v28, v8, 8, v0.t
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 1365
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vmv.s.x v0, a5
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vmv.v.i v20, 0
+; RV32-NEXT:    lui a5, 1044480
+; RV32-NEXT:    vmerge.vxm v20, v20, a5, v0
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    vand.vv v28, v28, v20, v0.t
+; RV32-NEXT:    vor.vv v24, v28, v24, v0.t
+; RV32-NEXT:    vor.vv v16, v24, v16, v0.t
+; RV32-NEXT:    vsll.vx v24, v8, a1, v0.t
+; RV32-NEXT:    vand.vx v28, v8, a3, v0.t
+; RV32-NEXT:    vsll.vx v28, v28, a2, v0.t
+; RV32-NEXT:    vor.vv v24, v24, v28, v0.t
+; RV32-NEXT:    vand.vx v28, v8, a4, v0.t
+; RV32-NEXT:    vsll.vi v28, v28, 24, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v20, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v28, v8, v0.t
+; RV32-NEXT:    vor.vv v8, v24, v8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vmv.v.x v20, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v20, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v20, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vmv.v.x v20, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v20, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v20, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vmv.v.x v20, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v20, v0.t
+; RV32-NEXT:    vand.vv v8, v8, v20, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT:    vand.vx v12, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v12, v12, 24, v0.t
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v16, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 8, v0.t
+; RV64-NEXT:    vor.vv v12, v12, v16, v0.t
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v16, v8, a2, v0.t
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v20, v8, a3, v0.t
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v20, v20, a4, v0.t
+; RV64-NEXT:    vor.vv v16, v16, v20, v0.t
+; RV64-NEXT:    vor.vv v12, v16, v12, v0.t
+; RV64-NEXT:    vsrl.vx v16, v8, a2, v0.t
+; RV64-NEXT:    vsrl.vx v20, v8, a4, v0.t
+; RV64-NEXT:    vand.vx v20, v20, a3, v0.t
+; RV64-NEXT:    vor.vv v16, v20, v16, v0.t
+; RV64-NEXT:    vsrl.vi v20, v8, 24, v0.t
+; RV64-NEXT:    vand.vx v20, v20, a1, v0.t
+; RV64-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v20, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI28_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI28_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 4, v0.t
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI28_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI28_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 2, v0.t
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI28_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI28_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    vsrl.vi v12, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v12, v12, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v12, v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <8 x i64> @llvm.vp.bitreverse.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+define <8 x i64> @vp_bitreverse_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v8i64_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vsrl.vx v12, v8, a1
+; RV32-NEXT:    li a2, 40
+; RV32-NEXT:    vsrl.vx v16, v8, a2
+; RV32-NEXT:    lui a3, 16
+; RV32-NEXT:    addi a3, a3, -256
+; RV32-NEXT:    vand.vx v16, v16, a3
+; RV32-NEXT:    vor.vv v12, v16, v12
+; RV32-NEXT:    vsrl.vi v20, v8, 8
+; RV32-NEXT:    lui a4, 5
+; RV32-NEXT:    addi a4, a4, 1365
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vmv.s.x v0, a4
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vmv.v.i v16, 0
+; RV32-NEXT:    lui a4, 1044480
+; RV32-NEXT:    vmerge.vxm v16, v16, a4, v0
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v20, v20, v16
+; RV32-NEXT:    vsrl.vi v24, v8, 24
+; RV32-NEXT:    lui a4, 4080
+; RV32-NEXT:    vand.vx v24, v24, a4
+; RV32-NEXT:    vor.vv v20, v20, v24
+; RV32-NEXT:    vor.vv v12, v20, v12
+; RV32-NEXT:    vsll.vx v20, v8, a1
+; RV32-NEXT:    vand.vx v24, v8, a3
+; RV32-NEXT:    vsll.vx v24, v24, a2
+; RV32-NEXT:    vor.vv v20, v20, v24
+; RV32-NEXT:    vand.vx v24, v8, a4
+; RV32-NEXT:    vsll.vi v24, v24, 24
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    vor.vv v8, v20, v8
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vsrl.vi v12, v8, 4
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vmv.v.x v16, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v16
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 2
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vmv.v.x v16, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v16
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 1
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vmv.v.x v16, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    vand.vv v12, v12, v16
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v8i64_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT:    vand.vx v12, v8, a1
+; RV64-NEXT:    vsll.vi v12, v12, 24
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v16, v8, a0
+; RV64-NEXT:    vsll.vi v16, v16, 8
+; RV64-NEXT:    vor.vv v12, v12, v16
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v16, v8, a2
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v20, v8, a3
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v20, v20, a4
+; RV64-NEXT:    vor.vv v16, v16, v20
+; RV64-NEXT:    vor.vv v12, v16, v12
+; RV64-NEXT:    vsrl.vx v16, v8, a2
+; RV64-NEXT:    vsrl.vx v20, v8, a4
+; RV64-NEXT:    vand.vx v20, v20, a3
+; RV64-NEXT:    vor.vv v16, v20, v16
+; RV64-NEXT:    vsrl.vi v20, v8, 24
+; RV64-NEXT:    vand.vx v20, v20, a1
+; RV64-NEXT:    vsrl.vi v8, v8, 8
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v8, v20
+; RV64-NEXT:    lui a0, %hi(.LCPI29_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI29_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 4
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI29_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI29_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 2
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI29_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI29_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 1
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.bitreverse.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+declare <15 x i64> @llvm.vp.bitreverse.v15i64(<15 x i64>, <15 x i1>, i32)
+
+define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v15i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 5
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV32-NEXT:    vmv1r.v v1, v0
+; RV32-NEXT:    li a3, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vsrl.vx v16, v8, a3, v0.t
+; RV32-NEXT:    li a4, 40
+; RV32-NEXT:    vsrl.vx v24, v8, a4, v0.t
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a5, a1, -256
+; RV32-NEXT:    vand.vx v24, v24, a5, v0.t
+; RV32-NEXT:    vor.vv v24, v24, v16, v0.t
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 4
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vi v24, v8, 24, v0.t
+; RV32-NEXT:    lui a6, 4080
+; RV32-NEXT:    vand.vx v24, v24, a6, v0.t
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    vsrl.vi v24, v8, 8, v0.t
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vs8r.v v24, (a2) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vmv.s.x v0, a1
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    lui a7, 1044480
+; RV32-NEXT:    vmv.v.i v24, 0
+; RV32-NEXT:    vmerge.vxm v16, v24, a7, v0
+; RV32-NEXT:    csrr a7, vlenb
+; RV32-NEXT:    li t0, 24
+; RV32-NEXT:    mul a7, a7, t0
+; RV32-NEXT:    add a7, sp, a7
+; RV32-NEXT:    addi a7, a7, 16
+; RV32-NEXT:    vs8r.v v16, (a7) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vmv1r.v v0, v1
+; RV32-NEXT:    addi a7, sp, 16
+; RV32-NEXT:    vl8r.v v24, (a7) # Unknown-size Folded Reload
+; RV32-NEXT:    csrr a7, vlenb
+; RV32-NEXT:    li t0, 24
+; RV32-NEXT:    mul a7, a7, t0
+; RV32-NEXT:    add a7, sp, a7
+; RV32-NEXT:    addi a7, a7, 16
+; RV32-NEXT:    vl8r.v v16, (a7) # Unknown-size Folded Reload
+; RV32-NEXT:    vand.vv v16, v24, v16, v0.t
+; RV32-NEXT:    csrr a7, vlenb
+; RV32-NEXT:    slli a7, a7, 3
+; RV32-NEXT:    add a7, sp, a7
+; RV32-NEXT:    addi a7, a7, 16
+; RV32-NEXT:    vl8r.v v24, (a7) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV32-NEXT:    csrr a7, vlenb
+; RV32-NEXT:    slli a7, a7, 4
+; RV32-NEXT:    add a7, sp, a7
+; RV32-NEXT:    addi a7, a7, 16
+; RV32-NEXT:    vl8r.v v24, (a7) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v24, v16, v24, v0.t
+; RV32-NEXT:    csrr a7, vlenb
+; RV32-NEXT:    slli a7, a7, 4
+; RV32-NEXT:    add a7, sp, a7
+; RV32-NEXT:    addi a7, a7, 16
+; RV32-NEXT:    vs8r.v v24, (a7) # Unknown-size Folded Spill
+; RV32-NEXT:    vsll.vx v16, v8, a3, v0.t
+; RV32-NEXT:    vand.vx v24, v8, a5, v0.t
+; RV32-NEXT:    vsll.vx v24, v24, a4, v0.t
+; RV32-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV32-NEXT:    vand.vx v24, v8, a6, v0.t
+; RV32-NEXT:    vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT:    csrr a3, vlenb
+; RV32-NEXT:    slli a3, a3, 3
+; RV32-NEXT:    add a3, sp, a3
+; RV32-NEXT:    addi a3, a3, 16
+; RV32-NEXT:    vs8r.v v24, (a3) # Unknown-size Folded Spill
+; RV32-NEXT:    csrr a3, vlenb
+; RV32-NEXT:    li a4, 24
+; RV32-NEXT:    mul a3, a3, a4
+; RV32-NEXT:    add a3, sp, a3
+; RV32-NEXT:    addi a3, a3, 16
+; RV32-NEXT:    vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT:    vand.vv v8, v8, v24, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    csrr a3, vlenb
+; RV32-NEXT:    slli a3, a3, 3
+; RV32-NEXT:    add a3, sp, a3
+; RV32-NEXT:    addi a3, a3, 16
+; RV32-NEXT:    vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v24, v8, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    csrr a3, vlenb
+; RV32-NEXT:    slli a3, a3, 4
+; RV32-NEXT:    add a3, sp, a3
+; RV32-NEXT:    addi a3, a3, 16
+; RV32-NEXT:    vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v24, v8, v24, v0.t
+; RV32-NEXT:    vsrl.vi v16, v24, 4, v0.t
+; RV32-NEXT:    lui a3, 61681
+; RV32-NEXT:    addi a3, a3, -241
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a3
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v8, v0.t
+; RV32-NEXT:    vand.vv v8, v24, v8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v24, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v24, 2, v0.t
+; RV32-NEXT:    lui a3, 209715
+; RV32-NEXT:    addi a3, a3, 819
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a3
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v8, v0.t
+; RV32-NEXT:    vand.vv v8, v24, v8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v24, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v24, 1, v0.t
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v8, v0.t
+; RV32-NEXT:    vand.vv v8, v24, v8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 5
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v15i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vand.vx v16, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 24, v0.t
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v24, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v24, v24, 8, v0.t
+; RV64-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v24, v8, a2, v0.t
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vand.vx v16, v8, a3, v0.t
+; RV64-NEXT:    vsll.vx v16, v16, a4, v0.t
+; RV64-NEXT:    vor.vv v16, v24, v16, v0.t
+; RV64-NEXT:    addi a5, sp, 16
+; RV64-NEXT:    vl8r.v v24, (a5) # Unknown-size Folded Reload
+; RV64-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV64-NEXT:    addi a5, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV64-NEXT:    vsrl.vx v24, v8, a2, v0.t
+; RV64-NEXT:    vsrl.vx v16, v8, a4, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV64-NEXT:    vor.vv v24, v16, v24, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 24, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a1, v0.t
+; RV64-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI30_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI30_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v24, v0.t
+; RV64-NEXT:    addi a1, sp, 16
+; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vor.vv v16, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v8, v16, 4, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI30_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI30_1)(a0)
+; RV64-NEXT:    vsll.vi v16, v16, 4, v0.t
+; RV64-NEXT:    vor.vv v16, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v8, v16, 2, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI30_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI30_2)(a0)
+; RV64-NEXT:    vsll.vi v16, v16, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 3
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %v = call <15 x i64> @llvm.vp.bitreverse.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl)
+  ret <15 x i64> %v
+}
+
+define <15 x i64> @vp_bitreverse_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v15i64_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vsrl.vx v16, v8, a1
+; RV32-NEXT:    li a2, 40
+; RV32-NEXT:    vsrl.vx v24, v8, a2
+; RV32-NEXT:    lui a3, 16
+; RV32-NEXT:    addi a3, a3, -256
+; RV32-NEXT:    vand.vx v24, v24, a3
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    addi a4, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vi v24, v8, 8
+; RV32-NEXT:    lui a4, 349525
+; RV32-NEXT:    addi a4, a4, 1365
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vmv.s.x v0, a4
+; RV32-NEXT:    li a5, 32
+; RV32-NEXT:    vsetvli zero, a5, e32, m8, ta, ma
+; RV32-NEXT:    lui a6, 1044480
+; RV32-NEXT:    vmv.v.i v16, 0
+; RV32-NEXT:    vmerge.vxm v16, v16, a6, v0
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v24, v24, v16
+; RV32-NEXT:    lui a6, 4080
+; RV32-NEXT:    vsrl.vi v0, v8, 24
+; RV32-NEXT:    vand.vx v0, v0, a6
+; RV32-NEXT:    vor.vv v24, v24, v0
+; RV32-NEXT:    addi a7, sp, 16
+; RV32-NEXT:    vl8r.v v0, (a7) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v24, v24, v0
+; RV32-NEXT:    addi a7, sp, 16
+; RV32-NEXT:    vs8r.v v24, (a7) # Unknown-size Folded Spill
+; RV32-NEXT:    vand.vx v0, v8, a3
+; RV32-NEXT:    vsll.vx v0, v0, a2
+; RV32-NEXT:    vsll.vx v24, v8, a1
+; RV32-NEXT:    vor.vv v24, v24, v0
+; RV32-NEXT:    vand.vv v16, v8, v16
+; RV32-NEXT:    vand.vx v8, v8, a6
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vsll.vi v16, v16, 8
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vsrl.vi v16, v8, 4
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    vsetvli zero, a5, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v24, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 2
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    vsetvli zero, a5, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v24, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 1
+; RV32-NEXT:    vsetvli zero, a5, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v24, a4
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v15i64_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vand.vx v16, v8, a1
+; RV64-NEXT:    vsll.vi v16, v16, 24
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v24, v8, a0
+; RV64-NEXT:    vsll.vi v24, v24, 8
+; RV64-NEXT:    vor.vv v16, v16, v24
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v24, v8, a2
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v0, v8, a3
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v0, v0, a4
+; RV64-NEXT:    vor.vv v24, v24, v0
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsrl.vx v24, v8, a2
+; RV64-NEXT:    vsrl.vx v0, v8, a4
+; RV64-NEXT:    vand.vx v0, v0, a3
+; RV64-NEXT:    vor.vv v24, v0, v24
+; RV64-NEXT:    vsrl.vi v0, v8, 24
+; RV64-NEXT:    vand.vx v0, v0, a1
+; RV64-NEXT:    vsrl.vi v8, v8, 8
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v8, v0
+; RV64-NEXT:    lui a0, %hi(.LCPI31_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI31_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v24
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 4
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI31_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI31_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 2
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI31_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI31_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 1
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    ret
+  %head = insertelement <15 x i1> poison, i1 true, i32 0
+  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
+  %v = call <15 x i64> @llvm.vp.bitreverse.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl)
+  ret <15 x i64> %v
+}
+
+declare <16 x i64> @llvm.vp.bitreverse.v16i64(<16 x i64>, <16 x i1>, i32)
+
+define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v16i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 5
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV32-NEXT:    vmv1r.v v1, v0
+; RV32-NEXT:    li a3, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vsrl.vx v16, v8, a3, v0.t
+; RV32-NEXT:    li a4, 40
+; RV32-NEXT:    vsrl.vx v24, v8, a4, v0.t
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a5, a1, -256
+; RV32-NEXT:    vand.vx v24, v24, a5, v0.t
+; RV32-NEXT:    vor.vv v24, v24, v16, v0.t
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 4
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vi v24, v8, 24, v0.t
+; RV32-NEXT:    lui a6, 4080
+; RV32-NEXT:    vand.vx v24, v24, a6, v0.t
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    lui a1, 349525
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    vsrl.vi v24, v8, 8, v0.t
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vs8r.v v24, (a2) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vmv.s.x v0, a1
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    lui a7, 1044480
+; RV32-NEXT:    vmv.v.i v24, 0
+; RV32-NEXT:    vmerge.vxm v16, v24, a7, v0
+; RV32-NEXT:    csrr a7, vlenb
+; RV32-NEXT:    li t0, 24
+; RV32-NEXT:    mul a7, a7, t0
+; RV32-NEXT:    add a7, sp, a7
+; RV32-NEXT:    addi a7, a7, 16
+; RV32-NEXT:    vs8r.v v16, (a7) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vmv1r.v v0, v1
+; RV32-NEXT:    addi a7, sp, 16
+; RV32-NEXT:    vl8r.v v24, (a7) # Unknown-size Folded Reload
+; RV32-NEXT:    csrr a7, vlenb
+; RV32-NEXT:    li t0, 24
+; RV32-NEXT:    mul a7, a7, t0
+; RV32-NEXT:    add a7, sp, a7
+; RV32-NEXT:    addi a7, a7, 16
+; RV32-NEXT:    vl8r.v v16, (a7) # Unknown-size Folded Reload
+; RV32-NEXT:    vand.vv v16, v24, v16, v0.t
+; RV32-NEXT:    csrr a7, vlenb
+; RV32-NEXT:    slli a7, a7, 3
+; RV32-NEXT:    add a7, sp, a7
+; RV32-NEXT:    addi a7, a7, 16
+; RV32-NEXT:    vl8r.v v24, (a7) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV32-NEXT:    csrr a7, vlenb
+; RV32-NEXT:    slli a7, a7, 4
+; RV32-NEXT:    add a7, sp, a7
+; RV32-NEXT:    addi a7, a7, 16
+; RV32-NEXT:    vl8r.v v24, (a7) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v24, v16, v24, v0.t
+; RV32-NEXT:    csrr a7, vlenb
+; RV32-NEXT:    slli a7, a7, 4
+; RV32-NEXT:    add a7, sp, a7
+; RV32-NEXT:    addi a7, a7, 16
+; RV32-NEXT:    vs8r.v v24, (a7) # Unknown-size Folded Spill
+; RV32-NEXT:    vsll.vx v16, v8, a3, v0.t
+; RV32-NEXT:    vand.vx v24, v8, a5, v0.t
+; RV32-NEXT:    vsll.vx v24, v24, a4, v0.t
+; RV32-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV32-NEXT:    vand.vx v24, v8, a6, v0.t
+; RV32-NEXT:    vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT:    csrr a3, vlenb
+; RV32-NEXT:    slli a3, a3, 3
+; RV32-NEXT:    add a3, sp, a3
+; RV32-NEXT:    addi a3, a3, 16
+; RV32-NEXT:    vs8r.v v24, (a3) # Unknown-size Folded Spill
+; RV32-NEXT:    csrr a3, vlenb
+; RV32-NEXT:    li a4, 24
+; RV32-NEXT:    mul a3, a3, a4
+; RV32-NEXT:    add a3, sp, a3
+; RV32-NEXT:    addi a3, a3, 16
+; RV32-NEXT:    vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT:    vand.vv v8, v8, v24, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    csrr a3, vlenb
+; RV32-NEXT:    slli a3, a3, 3
+; RV32-NEXT:    add a3, sp, a3
+; RV32-NEXT:    addi a3, a3, 16
+; RV32-NEXT:    vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v24, v8, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    csrr a3, vlenb
+; RV32-NEXT:    slli a3, a3, 4
+; RV32-NEXT:    add a3, sp, a3
+; RV32-NEXT:    addi a3, a3, 16
+; RV32-NEXT:    vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v24, v8, v24, v0.t
+; RV32-NEXT:    vsrl.vi v16, v24, 4, v0.t
+; RV32-NEXT:    lui a3, 61681
+; RV32-NEXT:    addi a3, a3, -241
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a3
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v8, v0.t
+; RV32-NEXT:    vand.vv v8, v24, v8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v24, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v24, 2, v0.t
+; RV32-NEXT:    lui a3, 209715
+; RV32-NEXT:    addi a3, a3, 819
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a3
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v8, v0.t
+; RV32-NEXT:    vand.vv v8, v24, v8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v24, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v24, 1, v0.t
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v8, v0.t
+; RV32-NEXT:    vand.vv v8, v24, v8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 5
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v16i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vand.vx v16, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v16, v16, 24, v0.t
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v24, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v24, v24, 8, v0.t
+; RV64-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v24, v8, a2, v0.t
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vand.vx v16, v8, a3, v0.t
+; RV64-NEXT:    vsll.vx v16, v16, a4, v0.t
+; RV64-NEXT:    vor.vv v16, v24, v16, v0.t
+; RV64-NEXT:    addi a5, sp, 16
+; RV64-NEXT:    vl8r.v v24, (a5) # Unknown-size Folded Reload
+; RV64-NEXT:    vor.vv v16, v16, v24, v0.t
+; RV64-NEXT:    addi a5, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV64-NEXT:    vsrl.vx v24, v8, a2, v0.t
+; RV64-NEXT:    vsrl.vx v16, v8, a4, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV64-NEXT:    vor.vv v24, v16, v24, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 24, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a1, v0.t
+; RV64-NEXT:    vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI32_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI32_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v24, v0.t
+; RV64-NEXT:    addi a1, sp, 16
+; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vor.vv v16, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v8, v16, 4, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI32_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI32_1)(a0)
+; RV64-NEXT:    vsll.vi v16, v16, 4, v0.t
+; RV64-NEXT:    vor.vv v16, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v8, v16, 2, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    lui a0, %hi(.LCPI32_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI32_2)(a0)
+; RV64-NEXT:    vsll.vi v16, v16, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a0, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 3
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %v = call <16 x i64> @llvm.vp.bitreverse.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl)
+  ret <16 x i64> %v
+}
+
+define <16 x i64> @vp_bitreverse_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v16i64_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV32-NEXT:    li a1, 56
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vsrl.vx v16, v8, a1
+; RV32-NEXT:    li a2, 40
+; RV32-NEXT:    vsrl.vx v24, v8, a2
+; RV32-NEXT:    lui a3, 16
+; RV32-NEXT:    addi a3, a3, -256
+; RV32-NEXT:    vand.vx v24, v24, a3
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    addi a4, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vi v24, v8, 8
+; RV32-NEXT:    lui a4, 349525
+; RV32-NEXT:    addi a4, a4, 1365
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vmv.s.x v0, a4
+; RV32-NEXT:    li a5, 32
+; RV32-NEXT:    vsetvli zero, a5, e32, m8, ta, ma
+; RV32-NEXT:    lui a6, 1044480
+; RV32-NEXT:    vmv.v.i v16, 0
+; RV32-NEXT:    vmerge.vxm v16, v16, a6, v0
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v24, v24, v16
+; RV32-NEXT:    lui a6, 4080
+; RV32-NEXT:    vsrl.vi v0, v8, 24
+; RV32-NEXT:    vand.vx v0, v0, a6
+; RV32-NEXT:    vor.vv v24, v24, v0
+; RV32-NEXT:    addi a7, sp, 16
+; RV32-NEXT:    vl8r.v v0, (a7) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v24, v24, v0
+; RV32-NEXT:    addi a7, sp, 16
+; RV32-NEXT:    vs8r.v v24, (a7) # Unknown-size Folded Spill
+; RV32-NEXT:    vand.vx v0, v8, a3
+; RV32-NEXT:    vsll.vx v0, v0, a2
+; RV32-NEXT:    vsll.vx v24, v8, a1
+; RV32-NEXT:    vor.vv v24, v24, v0
+; RV32-NEXT:    vand.vv v16, v8, v16
+; RV32-NEXT:    vand.vx v8, v8, a6
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vsll.vi v16, v16, 8
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vsrl.vi v16, v8, 4
+; RV32-NEXT:    lui a1, 61681
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    vsetvli zero, a5, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v24, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 2
+; RV32-NEXT:    lui a1, 209715
+; RV32-NEXT:    addi a1, a1, 819
+; RV32-NEXT:    vsetvli zero, a5, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v24, a1
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 1
+; RV32-NEXT:    vsetvli zero, a5, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v24, a4
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vand.vv v8, v8, v24
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v16i64_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a1, 4080
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vand.vx v16, v8, a1
+; RV64-NEXT:    vsll.vi v16, v16, 24
+; RV64-NEXT:    li a0, 255
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    vand.vx v24, v8, a0
+; RV64-NEXT:    vsll.vi v24, v24, 8
+; RV64-NEXT:    vor.vv v16, v16, v24
+; RV64-NEXT:    li a2, 56
+; RV64-NEXT:    vsll.vx v24, v8, a2
+; RV64-NEXT:    lui a3, 16
+; RV64-NEXT:    addiw a3, a3, -256
+; RV64-NEXT:    vand.vx v0, v8, a3
+; RV64-NEXT:    li a4, 40
+; RV64-NEXT:    vsll.vx v0, v0, a4
+; RV64-NEXT:    vor.vv v24, v24, v0
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsrl.vx v24, v8, a2
+; RV64-NEXT:    vsrl.vx v0, v8, a4
+; RV64-NEXT:    vand.vx v0, v0, a3
+; RV64-NEXT:    vor.vv v24, v0, v24
+; RV64-NEXT:    vsrl.vi v0, v8, 24
+; RV64-NEXT:    vand.vx v0, v0, a1
+; RV64-NEXT:    vsrl.vi v8, v8, 8
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v8, v0
+; RV64-NEXT:    lui a0, %hi(.LCPI33_0)
+; RV64-NEXT:    ld a0, %lo(.LCPI33_0)(a0)
+; RV64-NEXT:    vor.vv v8, v8, v24
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 4
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI33_1)
+; RV64-NEXT:    ld a0, %lo(.LCPI33_1)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 2
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    lui a0, %hi(.LCPI33_2)
+; RV64-NEXT:    ld a0, %lo(.LCPI33_2)(a0)
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 1
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    ret
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.bitreverse.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl)
+  ret <16 x i64> %v
+}
+
+declare <128 x i16> @llvm.vp.bitreverse.v128i16(<128 x i16>, <128 x i1>, i32)
+
+define <128 x i16> @vp_bitreverse_v128i16(<128 x i16> %va, <128 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v128i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 4
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; RV32-NEXT:    li a2, 64
+; RV32-NEXT:    vslidedown.vi v24, v0, 8
+; RV32-NEXT:    mv a1, a0
+; RV32-NEXT:    bltu a0, a2, .LBB34_2
+; RV32-NEXT:  # %bb.1:
+; RV32-NEXT:    li a1, 64
+; RV32-NEXT:  .LBB34_2:
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT:    vsrl.vi v16, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT:    lui a1, 1
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    vand.vx v16, v16, a1, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a1, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT:    lui a2, 3
+; RV32-NEXT:    addi a2, a2, 819
+; RV32-NEXT:    vand.vx v16, v16, a2, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a2, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT:    lui a3, 5
+; RV32-NEXT:    addi a3, a3, 1365
+; RV32-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a3, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    addi a4, sp, 16
+; RV32-NEXT:    vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT:    addi a4, a0, -64
+; RV32-NEXT:    sltu a0, a0, a4
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    and a0, a0, a4
+; RV32-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV32-NEXT:    vmv1r.v v0, v24
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vsrl.vi v16, v8, 8, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV32-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a1, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a1, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a2, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a2, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV32-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV32-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV32-NEXT:    vand.vx v8, v8, a3, v0.t
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    vor.vv v16, v16, v8, v0.t
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v128i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 4
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 16
+; RV64-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; RV64-NEXT:    li a2, 64
+; RV64-NEXT:    vslidedown.vi v24, v0, 8
+; RV64-NEXT:    mv a1, a0
+; RV64-NEXT:    bltu a0, a2, .LBB34_2
+; RV64-NEXT:  # %bb.1:
+; RV64-NEXT:    li a1, 64
+; RV64-NEXT:  .LBB34_2:
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT:    vsrl.vi v16, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT:    lui a1, 1
+; RV64-NEXT:    addiw a1, a1, -241
+; RV64-NEXT:    vand.vx v16, v16, a1, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV64-NEXT:    lui a2, 3
+; RV64-NEXT:    addiw a2, a2, 819
+; RV64-NEXT:    vand.vx v16, v16, a2, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a2, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT:    lui a3, 5
+; RV64-NEXT:    addiw a3, a3, 1365
+; RV64-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a3, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    addi a4, sp, 16
+; RV64-NEXT:    vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV64-NEXT:    addi a4, a0, -64
+; RV64-NEXT:    sltu a0, a0, a4
+; RV64-NEXT:    addi a0, a0, -1
+; RV64-NEXT:    and a0, a0, a4
+; RV64-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV64-NEXT:    vmv1r.v v0, v24
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 3
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    addi a0, a0, 16
+; RV64-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT:    vsrl.vi v16, v8, 8, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 8, v0.t
+; RV64-NEXT:    vor.vv v8, v8, v16, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a1, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a1, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 4, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 2, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a2, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a2, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 2, v0.t
+; RV64-NEXT:    vor.vv v8, v16, v8, v0.t
+; RV64-NEXT:    vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT:    vand.vx v16, v16, a3, v0.t
+; RV64-NEXT:    vand.vx v8, v8, a3, v0.t
+; RV64-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV64-NEXT:    vor.vv v16, v16, v8, v0.t
+; RV64-NEXT:    addi a0, sp, 16
+; RV64-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 4
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %v = call <128 x i16> @llvm.vp.bitreverse.v128i16(<128 x i16> %va, <128 x i1> %m, i32 %evl)
+  ret <128 x i16> %v
+}
+
+define <128 x i16> @vp_bitreverse_v128i16_unmasked(<128 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_bitreverse_v128i16_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a2, 64
+; RV32-NEXT:    mv a1, a0
+; RV32-NEXT:    bltu a0, a2, .LBB35_2
+; RV32-NEXT:  # %bb.1:
+; RV32-NEXT:    li a1, 64
+; RV32-NEXT:  .LBB35_2:
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT:    vsrl.vi v24, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    vsrl.vi v24, v8, 4
+; RV32-NEXT:    lui a1, 1
+; RV32-NEXT:    addi a1, a1, -241
+; RV32-NEXT:    vand.vx v24, v24, a1
+; RV32-NEXT:    vand.vx v8, v8, a1
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    vsrl.vi v24, v8, 2
+; RV32-NEXT:    lui a2, 3
+; RV32-NEXT:    addi a2, a2, 819
+; RV32-NEXT:    vand.vx v24, v24, a2
+; RV32-NEXT:    vand.vx v8, v8, a2
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    vsrl.vi v24, v8, 1
+; RV32-NEXT:    lui a3, 5
+; RV32-NEXT:    addi a3, a3, 1365
+; RV32-NEXT:    vand.vx v24, v24, a3
+; RV32-NEXT:    vand.vx v8, v8, a3
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    addi a4, a0, -64
+; RV32-NEXT:    sltu a0, a0, a4
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    and a0, a0, a4
+; RV32-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV32-NEXT:    vsrl.vi v24, v16, 8
+; RV32-NEXT:    vsll.vi v16, v16, 8
+; RV32-NEXT:    vor.vv v16, v16, v24
+; RV32-NEXT:    vsrl.vi v24, v16, 4
+; RV32-NEXT:    vand.vx v24, v24, a1
+; RV32-NEXT:    vand.vx v16, v16, a1
+; RV32-NEXT:    vsll.vi v16, v16, 4
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    vsrl.vi v24, v16, 2
+; RV32-NEXT:    vand.vx v24, v24, a2
+; RV32-NEXT:    vand.vx v16, v16, a2
+; RV32-NEXT:    vsll.vi v16, v16, 2
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    vsrl.vi v24, v16, 1
+; RV32-NEXT:    vand.vx v24, v24, a3
+; RV32-NEXT:    vand.vx v16, v16, a3
+; RV32-NEXT:    vadd.vv v16, v16, v16
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vp_bitreverse_v128i16_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a2, 64
+; RV64-NEXT:    mv a1, a0
+; RV64-NEXT:    bltu a0, a2, .LBB35_2
+; RV64-NEXT:  # %bb.1:
+; RV64-NEXT:    li a1, 64
+; RV64-NEXT:  .LBB35_2:
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT:    vsrl.vi v24, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v24
+; RV64-NEXT:    vsrl.vi v24, v8, 4
+; RV64-NEXT:    lui a1, 1
+; RV64-NEXT:    addiw a1, a1, -241
+; RV64-NEXT:    vand.vx v24, v24, a1
+; RV64-NEXT:    vand.vx v8, v8, a1
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v24, v8
+; RV64-NEXT:    vsrl.vi v24, v8, 2
+; RV64-NEXT:    lui a2, 3
+; RV64-NEXT:    addiw a2, a2, 819
+; RV64-NEXT:    vand.vx v24, v24, a2
+; RV64-NEXT:    vand.vx v8, v8, a2
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v24, v8
+; RV64-NEXT:    vsrl.vi v24, v8, 1
+; RV64-NEXT:    lui a3, 5
+; RV64-NEXT:    addiw a3, a3, 1365
+; RV64-NEXT:    vand.vx v24, v24, a3
+; RV64-NEXT:    vand.vx v8, v8, a3
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v24, v8
+; RV64-NEXT:    addi a4, a0, -64
+; RV64-NEXT:    sltu a0, a0, a4
+; RV64-NEXT:    addi a0, a0, -1
+; RV64-NEXT:    and a0, a0, a4
+; RV64-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; RV64-NEXT:    vsrl.vi v24, v16, 8
+; RV64-NEXT:    vsll.vi v16, v16, 8
+; RV64-NEXT:    vor.vv v16, v16, v24
+; RV64-NEXT:    vsrl.vi v24, v16, 4
+; RV64-NEXT:    vand.vx v24, v24, a1
+; RV64-NEXT:    vand.vx v16, v16, a1
+; RV64-NEXT:    vsll.vi v16, v16, 4
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsrl.vi v24, v16, 2
+; RV64-NEXT:    vand.vx v24, v24, a2
+; RV64-NEXT:    vand.vx v16, v16, a2
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsrl.vi v24, v16, 1
+; RV64-NEXT:    vand.vx v24, v24, a3
+; RV64-NEXT:    vand.vx v16, v16, a3
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    ret
+  %head = insertelement <128 x i1> poison, i1 true, i32 0
+  %m = shufflevector <128 x i1> %head, <128 x i1> poison, <128 x i32> zeroinitializer
+  %v = call <128 x i16> @llvm.vp.bitreverse.v128i16(<128 x i16> %va, <128 x i1> %m, i32 %evl)
+  ret <128 x i16> %v
+}

diff  --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp
index b4a5e5608e3da..5fa891b2ac71f 100644
--- a/llvm/unittests/IR/VPIntrinsicTest.cpp
+++ b/llvm/unittests/IR/VPIntrinsicTest.cpp
@@ -146,6 +146,8 @@ class VPIntrinsicTest : public testing::Test {
     Str << " declare <8 x i1> @llvm.vp.icmp.v8i16"
         << "(<8 x i16>, <8 x i16>, metadata, <8 x i1>, i32) ";
 
+    Str << " declare <8 x i16> @llvm.vp.bitreverse.v8i16"
+        << "(<8 x i16>, <8 x i1>, i32) ";
     Str << " declare <8 x i16> @llvm.vp.bswap.v8i16"
         << "(<8 x i16>, <8 x i1>, i32) ";
     Str << " declare <8 x i16> @llvm.vp.fshl.v8i16"


        


More information about the llvm-commits mailing list