[llvm] ada5458 - [RISCV] Expand scalable vector bswap. Fix crash for bitreverse.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 31 10:09:15 PDT 2021


Author: Craig Topper
Date: 2021-10-31T10:01:27-07:00
New Revision: ada5458521977b0d34bf7c6530ed04deba8fd833

URL: https://github.com/llvm/llvm-project/commit/ada5458521977b0d34bf7c6530ed04deba8fd833
DIFF: https://github.com/llvm/llvm-project/commit/ada5458521977b0d34bf7c6530ed04deba8fd833.diff

LOG: [RISCV] Expand scalable vector bswap. Fix crash for bitreverse.

Fix LegalizeVectorOps to not try shuffle or unrolling expansions for
scalable vectors.

Differential Revision: https://reviews.llvm.org/D112236

Added: 
    llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 63ce03d555de5..88a28a3be53e9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1095,6 +1095,10 @@ static void createBSWAPShuffleMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) {
 SDValue VectorLegalizer::ExpandBSWAP(SDNode *Node) {
   EVT VT = Node->getValueType(0);
 
+  // Scalable vectors can't use shuffle expansion.
+  if (VT.isScalableVector())
+    return TLI.expandBSWAP(Node, DAG);
+
   // Generate a byte wise shuffle mask for the BSWAP.
   SmallVector<int, 16> ShuffleMask;
   createBSWAPShuffleMask(VT, ShuffleMask);
@@ -1124,6 +1128,12 @@ void VectorLegalizer::ExpandBITREVERSE(SDNode *Node,
                                        SmallVectorImpl<SDValue> &Results) {
   EVT VT = Node->getValueType(0);
 
+  // We can't unroll or use shuffles for scalable vectors.
+  if (VT.isScalableVector()) {
+    Results.push_back(TLI.expandBITREVERSE(Node, DAG));
+    return;
+  }
+
   // If we have the scalar operation, it's probably cheaper to unroll it.
   if (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, VT.getScalarType())) {
     SDValue Tmp = DAG.UnrollVectorOp(Node);

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 03a1ecf68b079..5e1acc2950783 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -545,6 +545,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::CTLZ, VT, Expand);
       setOperationAction(ISD::CTPOP, VT, Expand);
 
+      setOperationAction(ISD::BSWAP, VT, Expand);
+
       // Custom-lower extensions and truncations from/to mask types.
       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);

diff  --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
new file mode 100644
index 0000000000000..053b9d85de7f0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
@@ -0,0 +1,1657 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i8> @bitreverse_nxv1i8(<vscale x 1 x i8> %va) {
+; CHECK-LABEL: bitreverse_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i8> @llvm.bitreverse.nxv1i8(<vscale x 1 x i8> %va)
+  ret <vscale x 1 x i8> %a
+}
+declare <vscale x 1 x i8> @llvm.bitreverse.nxv1i8(<vscale x 1 x i8>)
+
+define <vscale x 2 x i8> @bitreverse_nxv2i8(<vscale x 2 x i8> %va) {
+; CHECK-LABEL: bitreverse_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i8> @llvm.bitreverse.nxv2i8(<vscale x 2 x i8> %va)
+  ret <vscale x 2 x i8> %a
+}
+declare <vscale x 2 x i8> @llvm.bitreverse.nxv2i8(<vscale x 2 x i8>)
+
+define <vscale x 4 x i8> @bitreverse_nxv4i8(<vscale x 4 x i8> %va) {
+; CHECK-LABEL: bitreverse_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i8> @llvm.bitreverse.nxv4i8(<vscale x 4 x i8> %va)
+  ret <vscale x 4 x i8> %a
+}
+declare <vscale x 4 x i8> @llvm.bitreverse.nxv4i8(<vscale x 4 x i8>)
+
+define <vscale x 8 x i8> @bitreverse_nxv8i8(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: bitreverse_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
+; CHECK-NEXT:    vand.vi v9, v8, 15
+; CHECK-NEXT:    vsll.vi v9, v9, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    vsrl.vi v9, v8, 2
+; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    vsrl.vi v9, v8, 1
+; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v9, v8
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i8> @llvm.bitreverse.nxv8i8(<vscale x 8 x i8> %va)
+  ret <vscale x 8 x i8> %a
+}
+declare <vscale x 8 x i8> @llvm.bitreverse.nxv8i8(<vscale x 8 x i8>)
+
+define <vscale x 16 x i8> @bitreverse_nxv16i8(<vscale x 16 x i8> %va) {
+; CHECK-LABEL: bitreverse_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
+; CHECK-NEXT:    vand.vi v10, v8, 15
+; CHECK-NEXT:    vsll.vi v10, v10, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v10
+; CHECK-NEXT:    vsrl.vi v10, v8, 2
+; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    vand.vx v10, v10, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    vsrl.vi v10, v8, 1
+; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    vand.vx v10, v10, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i8> @llvm.bitreverse.nxv16i8(<vscale x 16 x i8> %va)
+  ret <vscale x 16 x i8> %a
+}
+declare <vscale x 16 x i8> @llvm.bitreverse.nxv16i8(<vscale x 16 x i8>)
+
+define <vscale x 32 x i8> @bitreverse_nxv32i8(<vscale x 32 x i8> %va) {
+; CHECK-LABEL: bitreverse_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
+; CHECK-NEXT:    vand.vi v12, v8, 15
+; CHECK-NEXT:    vsll.vi v12, v12, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v12
+; CHECK-NEXT:    vsrl.vi v12, v8, 2
+; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    vand.vx v12, v12, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    vsrl.vi v12, v8, 1
+; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    vand.vx v12, v12, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i8> @llvm.bitreverse.nxv32i8(<vscale x 32 x i8> %va)
+  ret <vscale x 32 x i8> %a
+}
+declare <vscale x 32 x i8> @llvm.bitreverse.nxv32i8(<vscale x 32 x i8>)
+
+define <vscale x 64 x i8> @bitreverse_nxv64i8(<vscale x 64 x i8> %va) {
+; CHECK-LABEL: bitreverse_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
+; CHECK-NEXT:    vand.vi v16, v8, 15
+; CHECK-NEXT:    vsll.vi v16, v16, 4
+; CHECK-NEXT:    vsrl.vi v8, v8, 4
+; CHECK-NEXT:    vand.vi v8, v8, 15
+; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    vsrl.vi v16, v8, 2
+; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    vand.vx v16, v16, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsll.vi v8, v8, 2
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    vsrl.vi v16, v8, 1
+; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    vand.vx v16, v16, a0
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+  %a = call <vscale x 64 x i8> @llvm.bitreverse.nxv64i8(<vscale x 64 x i8> %va)
+  ret <vscale x 64 x i8> %a
+}
+declare <vscale x 64 x i8> @llvm.bitreverse.nxv64i8(<vscale x 64 x i8>)
+
+define <vscale x 1 x i16> @bitreverse_nxv1i16(<vscale x 1 x i16> %va) {
+; RV32-LABEL: bitreverse_nxv1i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv1i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 1 x i16> @llvm.bitreverse.nxv1i16(<vscale x 1 x i16> %va)
+  ret <vscale x 1 x i16> %a
+}
+declare <vscale x 1 x i16> @llvm.bitreverse.nxv1i16(<vscale x 1 x i16>)
+
+define <vscale x 2 x i16> @bitreverse_nxv2i16(<vscale x 2 x i16> %va) {
+; RV32-LABEL: bitreverse_nxv2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.bitreverse.nxv2i16(<vscale x 2 x i16> %va)
+  ret <vscale x 2 x i16> %a
+}
+declare <vscale x 2 x i16> @llvm.bitreverse.nxv2i16(<vscale x 2 x i16>)
+
+define <vscale x 4 x i16> @bitreverse_nxv4i16(<vscale x 4 x i16> %va) {
+; RV32-LABEL: bitreverse_nxv4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.bitreverse.nxv4i16(<vscale x 4 x i16> %va)
+  ret <vscale x 4 x i16> %a
+}
+declare <vscale x 4 x i16> @llvm.bitreverse.nxv4i16(<vscale x 4 x i16>)
+
+define <vscale x 8 x i16> @bitreverse_nxv8i16(<vscale x 8 x i16> %va) {
+; RV32-LABEL: bitreverse_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
+; RV32-NEXT:    vsrl.vi v10, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vsrl.vi v10, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
+; RV64-NEXT:    vsrl.vi v10, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vsrl.vi v10, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.bitreverse.nxv8i16(<vscale x 8 x i16> %va)
+  ret <vscale x 8 x i16> %a
+}
+declare <vscale x 8 x i16> @llvm.bitreverse.nxv8i16(<vscale x 8 x i16>)
+
+define <vscale x 16 x i16> @bitreverse_nxv16i16(<vscale x 16 x i16> %va) {
+; RV32-LABEL: bitreverse_nxv16i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
+; RV32-NEXT:    vsrl.vi v12, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vsrl.vi v12, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv16i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
+; RV64-NEXT:    vsrl.vi v12, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vsrl.vi v12, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.bitreverse.nxv16i16(<vscale x 16 x i16> %va)
+  ret <vscale x 16 x i16> %a
+}
+declare <vscale x 16 x i16> @llvm.bitreverse.nxv16i16(<vscale x 16 x i16>)
+
+define <vscale x 32 x i16> @bitreverse_nxv32i16(<vscale x 32 x i16> %va) {
+; RV32-LABEL: bitreverse_nxv32i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
+; RV32-NEXT:    vsrl.vi v16, v8, 8
+; RV32-NEXT:    vsll.vi v8, v8, 8
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vsrl.vi v16, v8, 4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 2
+; RV32-NEXT:    lui a0, 3
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 1
+; RV32-NEXT:    lui a0, 5
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv32i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
+; RV64-NEXT:    vsrl.vi v16, v8, 8
+; RV64-NEXT:    vsll.vi v8, v8, 8
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vsrl.vi v16, v8, 4
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 2
+; RV64-NEXT:    lui a0, 3
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 1
+; RV64-NEXT:    lui a0, 5
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 32 x i16> @llvm.bitreverse.nxv32i16(<vscale x 32 x i16> %va)
+  ret <vscale x 32 x i16> %a
+}
+declare <vscale x 32 x i16> @llvm.bitreverse.nxv32i16(<vscale x 32 x i16>)
+
+define <vscale x 1 x i32> @bitreverse_nxv1i32(<vscale x 1 x i32> %va) {
+; RV32-LABEL: bitreverse_nxv1i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vsrl.vi v10, v8, 24
+; RV32-NEXT:    vor.vv v9, v9, v10
+; RV32-NEXT:    vsll.vi v10, v8, 8
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv1i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vsrl.vi v10, v8, 24
+; RV64-NEXT:    vor.vv v9, v9, v10
+; RV64-NEXT:    vsll.vi v10, v8, 8
+; RV64-NEXT:    lui a0, 4080
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 1 x i32> @llvm.bitreverse.nxv1i32(<vscale x 1 x i32> %va)
+  ret <vscale x 1 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.bitreverse.nxv1i32(<vscale x 1 x i32>)
+
+define <vscale x 2 x i32> @bitreverse_nxv2i32(<vscale x 2 x i32> %va) {
+; RV32-LABEL: bitreverse_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vsrl.vi v10, v8, 24
+; RV32-NEXT:    vor.vv v9, v9, v10
+; RV32-NEXT:    vsll.vi v10, v8, 8
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vsrl.vi v10, v8, 24
+; RV64-NEXT:    vor.vv v9, v9, v10
+; RV64-NEXT:    vsll.vi v10, v8, 8
+; RV64-NEXT:    lui a0, 4080
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.bitreverse.nxv2i32(<vscale x 2 x i32> %va)
+  ret <vscale x 2 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.bitreverse.nxv2i32(<vscale x 2 x i32>)
+
+define <vscale x 4 x i32> @bitreverse_nxv4i32(<vscale x 4 x i32> %va) {
+; RV32-LABEL: bitreverse_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV32-NEXT:    vsrl.vi v10, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vsrl.vi v12, v8, 24
+; RV32-NEXT:    vor.vv v10, v10, v12
+; RV32-NEXT:    vsll.vi v12, v8, 8
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vsrl.vi v10, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV64-NEXT:    vsrl.vi v10, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vsrl.vi v12, v8, 24
+; RV64-NEXT:    vor.vv v10, v10, v12
+; RV64-NEXT:    vsll.vi v12, v8, 8
+; RV64-NEXT:    lui a0, 4080
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vsrl.vi v10, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.bitreverse.nxv4i32(<vscale x 4 x i32> %va)
+  ret <vscale x 4 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.bitreverse.nxv4i32(<vscale x 4 x i32>)
+
+define <vscale x 8 x i32> @bitreverse_nxv8i32(<vscale x 8 x i32> %va) {
+; RV32-LABEL: bitreverse_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsrl.vi v12, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vsrl.vi v16, v8, 24
+; RV32-NEXT:    vor.vv v12, v12, v16
+; RV32-NEXT:    vsll.vi v16, v8, 8
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vsrl.vi v12, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; RV64-NEXT:    vsrl.vi v12, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vsrl.vi v16, v8, 24
+; RV64-NEXT:    vor.vv v12, v12, v16
+; RV64-NEXT:    vsll.vi v16, v8, 8
+; RV64-NEXT:    lui a0, 4080
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vsrl.vi v12, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.bitreverse.nxv8i32(<vscale x 8 x i32> %va)
+  ret <vscale x 8 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.bitreverse.nxv8i32(<vscale x 8 x i32>)
+
+define <vscale x 16 x i32> @bitreverse_nxv16i32(<vscale x 16 x i32> %va) {
+; RV32-LABEL: bitreverse_nxv16i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
+; RV32-NEXT:    vsrl.vi v16, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vsrl.vi v24, v8, 24
+; RV32-NEXT:    vor.vv v16, v16, v24
+; RV32-NEXT:    vsll.vi v24, v8, 8
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    vand.vx v24, v24, a0
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vsrl.vi v16, v8, 4
+; RV32-NEXT:    lui a0, 61681
+; RV32-NEXT:    addi a0, a0, -241
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 2
+; RV32-NEXT:    lui a0, 209715
+; RV32-NEXT:    addi a0, a0, 819
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vsrl.vi v16, v8, 1
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    addi a0, a0, 1365
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vand.vx v8, v8, a0
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv16i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
+; RV64-NEXT:    vsrl.vi v16, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vsrl.vi v24, v8, 24
+; RV64-NEXT:    vor.vv v16, v16, v24
+; RV64-NEXT:    vsll.vi v24, v8, 8
+; RV64-NEXT:    lui a0, 4080
+; RV64-NEXT:    vand.vx v24, v24, a0
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v24
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vsrl.vi v16, v8, 4
+; RV64-NEXT:    lui a0, 61681
+; RV64-NEXT:    addiw a0, a0, -241
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 2
+; RV64-NEXT:    lui a0, 209715
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 1
+; RV64-NEXT:    lui a0, 349525
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 16 x i32> @llvm.bitreverse.nxv16i32(<vscale x 16 x i32> %va)
+  ret <vscale x 16 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.bitreverse.nxv16i32(<vscale x 16 x i32>)
+
+define <vscale x 1 x i64> @bitreverse_nxv1i64(<vscale x 1 x i64> %va) {
+; RV32-LABEL: bitreverse_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a0, 1044480
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a1, a1, -256
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a2, 61681
+; RV32-NEXT:    addi a2, a2, -241
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    lui a2, 209715
+; RV32-NEXT:    addi a2, a2, 819
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    lui a2, 349525
+; RV32-NEXT:    addi a2, a2, 1365
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    vsetvli a3, zero, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v9, v8, a2
+; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    vsrl.vx v10, v8, a3
+; RV32-NEXT:    vand.vx v10, v10, a1
+; RV32-NEXT:    vor.vv v9, v10, v9
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vlse64.v v10, (a1), zero
+; RV32-NEXT:    vsrl.vi v11, v8, 24
+; RV32-NEXT:    vand.vx v11, v11, a0
+; RV32-NEXT:    vsrl.vi v12, v8, 8
+; RV32-NEXT:    vand.vv v10, v12, v10
+; RV32-NEXT:    vor.vv v10, v10, v11
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vor.vv v9, v10, v9
+; RV32-NEXT:    vsll.vx v10, v8, a2
+; RV32-NEXT:    vsll.vx v12, v8, a3
+; RV32-NEXT:    vand.vv v11, v12, v11
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vor.vv v10, v10, v11
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vsll.vi v13, v8, 8
+; RV32-NEXT:    vand.vv v12, v13, v12
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vand.vv v8, v8, v11
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    vsrl.vi v9, v8, 4
+; RV32-NEXT:    vand.vv v9, v9, v11
+; RV32-NEXT:    vand.vv v8, v8, v11
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 2
+; RV32-NEXT:    vand.vv v9, v9, v10
+; RV32-NEXT:    vand.vv v8, v8, v10
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    vsrl.vi v9, v8, 1
+; RV32-NEXT:    vand.vv v9, v9, v10
+; RV32-NEXT:    vand.vv v8, v8, v10
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v9, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; RV64-NEXT:    vsrl.vx v9, v8, a0
+; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    vsrl.vx v10, v8, a1
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, -256
+; RV64-NEXT:    vand.vx v10, v10, a2
+; RV64-NEXT:    vor.vv v9, v10, v9
+; RV64-NEXT:    vsrl.vi v10, v8, 24
+; RV64-NEXT:    lui a2, 4080
+; RV64-NEXT:    vand.vx v10, v10, a2
+; RV64-NEXT:    vsrl.vi v11, v8, 8
+; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    slli a3, a2, 24
+; RV64-NEXT:    vand.vx v11, v11, a3
+; RV64-NEXT:    vor.vv v10, v11, v10
+; RV64-NEXT:    vor.vv v9, v10, v9
+; RV64-NEXT:    vsll.vi v10, v8, 8
+; RV64-NEXT:    slli a3, a2, 32
+; RV64-NEXT:    vand.vx v10, v10, a3
+; RV64-NEXT:    vsll.vi v11, v8, 24
+; RV64-NEXT:    slli a3, a2, 40
+; RV64-NEXT:    vand.vx v11, v11, a3
+; RV64-NEXT:    vor.vv v10, v11, v10
+; RV64-NEXT:    vsll.vx v11, v8, a0
+; RV64-NEXT:    vsll.vx v8, v8, a1
+; RV64-NEXT:    slli a0, a2, 48
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v11, v8
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    vsrl.vi v9, v8, 4
+; RV64-NEXT:    lui a0, 3855
+; RV64-NEXT:    addiw a0, a0, 241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, -241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, -241
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 2
+; RV64-NEXT:    lui a0, 13107
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    vsrl.vi v9, v8, 1
+; RV64-NEXT:    lui a0, 21845
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v9, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.bitreverse.nxv1i64(<vscale x 1 x i64> %va)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.bitreverse.nxv1i64(<vscale x 1 x i64>)
+
+define <vscale x 2 x i64> @bitreverse_nxv2i64(<vscale x 2 x i64> %va) {
+; RV32-LABEL: bitreverse_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a0, 1044480
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a1, a1, -256
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a2, 61681
+; RV32-NEXT:    addi a2, a2, -241
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    lui a2, 209715
+; RV32-NEXT:    addi a2, a2, 819
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    lui a2, 349525
+; RV32-NEXT:    addi a2, a2, 1365
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    vsetvli a3, zero, e64, m2, ta, mu
+; RV32-NEXT:    vsrl.vx v10, v8, a2
+; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    vsrl.vx v12, v8, a3
+; RV32-NEXT:    vand.vx v12, v12, a1
+; RV32-NEXT:    vor.vv v10, v12, v10
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vlse64.v v12, (a1), zero
+; RV32-NEXT:    vsrl.vi v14, v8, 24
+; RV32-NEXT:    vand.vx v14, v14, a0
+; RV32-NEXT:    vsrl.vi v16, v8, 8
+; RV32-NEXT:    vand.vv v12, v16, v12
+; RV32-NEXT:    vor.vv v12, v12, v14
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v14, (a0), zero
+; RV32-NEXT:    vor.vv v10, v12, v10
+; RV32-NEXT:    vsll.vx v12, v8, a2
+; RV32-NEXT:    vsll.vx v16, v8, a3
+; RV32-NEXT:    vand.vv v14, v16, v14
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vor.vv v12, v12, v14
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v14, (a0), zero
+; RV32-NEXT:    vsll.vi v18, v8, 8
+; RV32-NEXT:    vand.vv v16, v18, v16
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vand.vv v8, v8, v14
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v14, (a0), zero
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vsrl.vi v10, v8, 4
+; RV32-NEXT:    vand.vv v10, v10, v14
+; RV32-NEXT:    vand.vv v8, v8, v14
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 2
+; RV32-NEXT:    vand.vv v10, v10, v12
+; RV32-NEXT:    vand.vv v8, v8, v12
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vsrl.vi v10, v8, 1
+; RV32-NEXT:    vand.vv v10, v10, v12
+; RV32-NEXT:    vand.vv v8, v8, v12
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; RV64-NEXT:    vsrl.vx v10, v8, a0
+; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    vsrl.vx v12, v8, a1
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, -256
+; RV64-NEXT:    vand.vx v12, v12, a2
+; RV64-NEXT:    vor.vv v10, v12, v10
+; RV64-NEXT:    vsrl.vi v12, v8, 24
+; RV64-NEXT:    lui a2, 4080
+; RV64-NEXT:    vand.vx v12, v12, a2
+; RV64-NEXT:    vsrl.vi v14, v8, 8
+; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    slli a3, a2, 24
+; RV64-NEXT:    vand.vx v14, v14, a3
+; RV64-NEXT:    vor.vv v12, v14, v12
+; RV64-NEXT:    vor.vv v10, v12, v10
+; RV64-NEXT:    vsll.vi v12, v8, 8
+; RV64-NEXT:    slli a3, a2, 32
+; RV64-NEXT:    vand.vx v12, v12, a3
+; RV64-NEXT:    vsll.vi v14, v8, 24
+; RV64-NEXT:    slli a3, a2, 40
+; RV64-NEXT:    vand.vx v14, v14, a3
+; RV64-NEXT:    vor.vv v12, v14, v12
+; RV64-NEXT:    vsll.vx v14, v8, a0
+; RV64-NEXT:    vsll.vx v8, v8, a1
+; RV64-NEXT:    slli a0, a2, 48
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v14, v8
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vsrl.vi v10, v8, 4
+; RV64-NEXT:    lui a0, 3855
+; RV64-NEXT:    addiw a0, a0, 241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, -241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, -241
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 2
+; RV64-NEXT:    lui a0, 13107
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    vsrl.vi v10, v8, 1
+; RV64-NEXT:    lui a0, 21845
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v10, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.bitreverse.nxv2i64(<vscale x 2 x i64> %va)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.bitreverse.nxv2i64(<vscale x 2 x i64>)
+
+define <vscale x 4 x i64> @bitreverse_nxv4i64(<vscale x 4 x i64> %va) {
+; RV32-LABEL: bitreverse_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a0, 1044480
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a1, a1, -256
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a2, 61681
+; RV32-NEXT:    addi a2, a2, -241
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    lui a2, 209715
+; RV32-NEXT:    addi a2, a2, 819
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    lui a2, 349525
+; RV32-NEXT:    addi a2, a2, 1365
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    vsetvli a3, zero, e64, m4, ta, mu
+; RV32-NEXT:    vsrl.vx v12, v8, a2
+; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    vsrl.vx v16, v8, a3
+; RV32-NEXT:    vand.vx v16, v16, a1
+; RV32-NEXT:    vor.vv v12, v16, v12
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a1), zero
+; RV32-NEXT:    vsrl.vi v20, v8, 24
+; RV32-NEXT:    vand.vx v20, v20, a0
+; RV32-NEXT:    vsrl.vi v24, v8, 8
+; RV32-NEXT:    vand.vv v16, v24, v16
+; RV32-NEXT:    vor.vv v16, v16, v20
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v20, (a0), zero
+; RV32-NEXT:    vor.vv v12, v16, v12
+; RV32-NEXT:    vsll.vx v16, v8, a2
+; RV32-NEXT:    vsll.vx v24, v8, a3
+; RV32-NEXT:    vand.vv v20, v24, v20
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vor.vv v16, v16, v20
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v20, (a0), zero
+; RV32-NEXT:    vsll.vi v28, v8, 8
+; RV32-NEXT:    vand.vv v24, v28, v24
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vand.vv v8, v8, v20
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v20, (a0), zero
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vsrl.vi v12, v8, 4
+; RV32-NEXT:    vand.vv v12, v12, v20
+; RV32-NEXT:    vand.vv v8, v8, v20
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 2
+; RV32-NEXT:    vand.vv v12, v12, v16
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vsrl.vi v12, v8, 1
+; RV32-NEXT:    vand.vv v12, v12, v16
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
+; RV64-NEXT:    vsrl.vx v12, v8, a0
+; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    vsrl.vx v16, v8, a1
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, -256
+; RV64-NEXT:    vand.vx v16, v16, a2
+; RV64-NEXT:    vor.vv v12, v16, v12
+; RV64-NEXT:    vsrl.vi v16, v8, 24
+; RV64-NEXT:    lui a2, 4080
+; RV64-NEXT:    vand.vx v16, v16, a2
+; RV64-NEXT:    vsrl.vi v20, v8, 8
+; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    slli a3, a2, 24
+; RV64-NEXT:    vand.vx v20, v20, a3
+; RV64-NEXT:    vor.vv v16, v20, v16
+; RV64-NEXT:    vor.vv v12, v16, v12
+; RV64-NEXT:    vsll.vi v16, v8, 8
+; RV64-NEXT:    slli a3, a2, 32
+; RV64-NEXT:    vand.vx v16, v16, a3
+; RV64-NEXT:    vsll.vi v20, v8, 24
+; RV64-NEXT:    slli a3, a2, 40
+; RV64-NEXT:    vand.vx v20, v20, a3
+; RV64-NEXT:    vor.vv v16, v20, v16
+; RV64-NEXT:    vsll.vx v20, v8, a0
+; RV64-NEXT:    vsll.vx v8, v8, a1
+; RV64-NEXT:    slli a0, a2, 48
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v20, v8
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vsrl.vi v12, v8, 4
+; RV64-NEXT:    lui a0, 3855
+; RV64-NEXT:    addiw a0, a0, 241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, -241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, -241
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 2
+; RV64-NEXT:    lui a0, 13107
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    vsrl.vi v12, v8, 1
+; RV64-NEXT:    lui a0, 21845
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v12, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.bitreverse.nxv4i64(<vscale x 4 x i64> %va)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.bitreverse.nxv4i64(<vscale x 4 x i64>)
+
+define <vscale x 8 x i64> @bitreverse_nxv8i64(<vscale x 8 x i64> %va) {
+; RV32-LABEL: bitreverse_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a0, 1044480
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a1, a1, -256
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a2, 61681
+; RV32-NEXT:    addi a2, a2, -241
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    lui a2, 209715
+; RV32-NEXT:    addi a2, a2, 819
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    lui a2, 349525
+; RV32-NEXT:    addi a2, a2, 1365
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    vsetvli a3, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsrl.vx v16, v8, a2
+; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    vsrl.vx v24, v8, a3
+; RV32-NEXT:    addi a4, sp, 8
+; RV32-NEXT:    vlse64.v v0, (a4), zero
+; RV32-NEXT:    vand.vx v24, v24, a1
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vi v24, v8, 8
+; RV32-NEXT:    vand.vv v24, v24, v0
+; RV32-NEXT:    vsrl.vi v0, v8, 24
+; RV32-NEXT:    vand.vx v0, v0, a0
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vor.vv v24, v24, v0
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl8re8.v v0, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v24, v24, v0
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT:    vsll.vx v24, v8, a3
+; RV32-NEXT:    vand.vv v16, v24, v16
+; RV32-NEXT:    vsll.vx v24, v8, a2
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v0, (a0), zero
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsll.vi v24, v8, 8
+; RV32-NEXT:    vand.vv v24, v24, v0
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vl8re8.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl8re8.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    vsrl.vi v24, v8, 4
+; RV32-NEXT:    vand.vv v24, v24, v16
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsll.vi v8, v8, 4
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    vsrl.vi v24, v8, 2
+; RV32-NEXT:    vand.vv v24, v24, v16
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    vsrl.vi v24, v8, 1
+; RV32-NEXT:    vand.vv v24, v24, v16
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    vadd.vv v8, v8, v8
+; RV32-NEXT:    vor.vv v8, v24, v8
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bitreverse_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsrl.vx v16, v8, a0
+; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    vsrl.vx v24, v8, a1
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, -256
+; RV64-NEXT:    vand.vx v24, v24, a2
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsrl.vi v24, v8, 24
+; RV64-NEXT:    lui a2, 4080
+; RV64-NEXT:    vand.vx v24, v24, a2
+; RV64-NEXT:    vsrl.vi v0, v8, 8
+; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    slli a3, a2, 24
+; RV64-NEXT:    vand.vx v0, v0, a3
+; RV64-NEXT:    vor.vv v24, v0, v24
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsll.vi v24, v8, 8
+; RV64-NEXT:    slli a3, a2, 32
+; RV64-NEXT:    vand.vx v24, v24, a3
+; RV64-NEXT:    vsll.vi v0, v8, 24
+; RV64-NEXT:    slli a3, a2, 40
+; RV64-NEXT:    vand.vx v0, v0, a3
+; RV64-NEXT:    vor.vv v24, v0, v24
+; RV64-NEXT:    vsll.vx v0, v8, a0
+; RV64-NEXT:    vsll.vx v8, v8, a1
+; RV64-NEXT:    slli a0, a2, 48
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v0, v8
+; RV64-NEXT:    vor.vv v8, v8, v24
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vsrl.vi v16, v8, 4
+; RV64-NEXT:    lui a0, 3855
+; RV64-NEXT:    addiw a0, a0, 241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, -241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 241
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, -241
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 4
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 2
+; RV64-NEXT:    lui a0, 13107
+; RV64-NEXT:    addiw a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 819
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vsll.vi v8, v8, 2
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    vsrl.vi v16, v8, 1
+; RV64-NEXT:    lui a0, 21845
+; RV64-NEXT:    addiw a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    addi a0, a0, 1365
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vadd.vv v8, v8, v8
+; RV64-NEXT:    vor.vv v8, v16, v8
+; RV64-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.bitreverse.nxv8i64(<vscale x 8 x i64> %va)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.bitreverse.nxv8i64(<vscale x 8 x i64>)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
new file mode 100644
index 0000000000000..6c02583847b25
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
@@ -0,0 +1,670 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i16> @bswap_nxv1i16(<vscale x 1 x i16> %va) {
+; CHECK-LABEL: bswap_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vsrl.vi v9, v8, 8
+; CHECK-NEXT:    vsll.vi v8, v8, 8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i16> @llvm.bswap.nxv1i16(<vscale x 1 x i16> %va)
+  ret <vscale x 1 x i16> %a
+}
+declare <vscale x 1 x i16> @llvm.bswap.nxv1i16(<vscale x 1 x i16>)
+
+define <vscale x 2 x i16> @bswap_nxv2i16(<vscale x 2 x i16> %va) {
+; CHECK-LABEL: bswap_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vsrl.vi v9, v8, 8
+; CHECK-NEXT:    vsll.vi v8, v8, 8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.bswap.nxv2i16(<vscale x 2 x i16> %va)
+  ret <vscale x 2 x i16> %a
+}
+declare <vscale x 2 x i16> @llvm.bswap.nxv2i16(<vscale x 2 x i16>)
+
+define <vscale x 4 x i16> @bswap_nxv4i16(<vscale x 4 x i16> %va) {
+; CHECK-LABEL: bswap_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
+; CHECK-NEXT:    vsrl.vi v9, v8, 8
+; CHECK-NEXT:    vsll.vi v8, v8, 8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.bswap.nxv4i16(<vscale x 4 x i16> %va)
+  ret <vscale x 4 x i16> %a
+}
+declare <vscale x 4 x i16> @llvm.bswap.nxv4i16(<vscale x 4 x i16>)
+
+define <vscale x 8 x i16> @bswap_nxv8i16(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: bswap_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vsrl.vi v10, v8, 8
+; CHECK-NEXT:    vsll.vi v8, v8, 8
+; CHECK-NEXT:    vor.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16> %va)
+  ret <vscale x 8 x i16> %a
+}
+declare <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16>)
+
+define <vscale x 16 x i16> @bswap_nxv16i16(<vscale x 16 x i16> %va) {
+; CHECK-LABEL: bswap_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
+; CHECK-NEXT:    vsrl.vi v12, v8, 8
+; CHECK-NEXT:    vsll.vi v8, v8, 8
+; CHECK-NEXT:    vor.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.bswap.nxv16i16(<vscale x 16 x i16> %va)
+  ret <vscale x 16 x i16> %a
+}
+declare <vscale x 16 x i16> @llvm.bswap.nxv16i16(<vscale x 16 x i16>)
+
+define <vscale x 32 x i16> @bswap_nxv32i16(<vscale x 32 x i16> %va) {
+; CHECK-LABEL: bswap_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
+; CHECK-NEXT:    vsrl.vi v16, v8, 8
+; CHECK-NEXT:    vsll.vi v8, v8, 8
+; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i16> @llvm.bswap.nxv32i16(<vscale x 32 x i16> %va)
+  ret <vscale x 32 x i16> %a
+}
+declare <vscale x 32 x i16> @llvm.bswap.nxv32i16(<vscale x 32 x i16>)
+
+define <vscale x 1 x i32> @bswap_nxv1i32(<vscale x 1 x i32> %va) {
+; RV32-LABEL: bswap_nxv1i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vsrl.vi v10, v8, 24
+; RV32-NEXT:    vor.vv v9, v9, v10
+; RV32-NEXT:    vsll.vi v10, v8, 8
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bswap_nxv1i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vsrl.vi v10, v8, 24
+; RV64-NEXT:    vor.vv v9, v9, v10
+; RV64-NEXT:    vsll.vi v10, v8, 8
+; RV64-NEXT:    lui a0, 4080
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %a = call <vscale x 1 x i32> @llvm.bswap.nxv1i32(<vscale x 1 x i32> %va)
+  ret <vscale x 1 x i32> %a
+}
+declare <vscale x 1 x i32> @llvm.bswap.nxv1i32(<vscale x 1 x i32>)
+
+define <vscale x 2 x i32> @bswap_nxv2i32(<vscale x 2 x i32> %va) {
+; RV32-LABEL: bswap_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV32-NEXT:    vsrl.vi v9, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v9, v9, a0
+; RV32-NEXT:    vsrl.vi v10, v8, 24
+; RV32-NEXT:    vor.vv v9, v9, v10
+; RV32-NEXT:    vsll.vi v10, v8, 8
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bswap_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV64-NEXT:    vsrl.vi v9, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v9, v9, a0
+; RV64-NEXT:    vsrl.vi v10, v8, 24
+; RV64-NEXT:    vor.vv v9, v9, v10
+; RV64-NEXT:    vsll.vi v10, v8, 8
+; RV64-NEXT:    lui a0, 4080
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.bswap.nxv2i32(<vscale x 2 x i32> %va)
+  ret <vscale x 2 x i32> %a
+}
+declare <vscale x 2 x i32> @llvm.bswap.nxv2i32(<vscale x 2 x i32>)
+
+define <vscale x 4 x i32> @bswap_nxv4i32(<vscale x 4 x i32> %va) {
+; RV32-LABEL: bswap_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV32-NEXT:    vsrl.vi v10, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v10, v10, a0
+; RV32-NEXT:    vsrl.vi v12, v8, 24
+; RV32-NEXT:    vor.vv v10, v10, v12
+; RV32-NEXT:    vsll.vi v12, v8, 8
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bswap_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV64-NEXT:    vsrl.vi v10, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v10, v10, a0
+; RV64-NEXT:    vsrl.vi v12, v8, 24
+; RV64-NEXT:    vor.vv v10, v10, v12
+; RV64-NEXT:    vsll.vi v12, v8, 8
+; RV64-NEXT:    lui a0, 4080
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32> %va)
+  ret <vscale x 4 x i32> %a
+}
+declare <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32>)
+
+define <vscale x 8 x i32> @bswap_nxv8i32(<vscale x 8 x i32> %va) {
+; RV32-LABEL: bswap_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsrl.vi v12, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v12, v12, a0
+; RV32-NEXT:    vsrl.vi v16, v8, 24
+; RV32-NEXT:    vor.vv v12, v12, v16
+; RV32-NEXT:    vsll.vi v16, v8, 8
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bswap_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; RV64-NEXT:    vsrl.vi v12, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v12, v12, a0
+; RV64-NEXT:    vsrl.vi v16, v8, 24
+; RV64-NEXT:    vor.vv v12, v12, v16
+; RV64-NEXT:    vsll.vi v16, v8, 8
+; RV64-NEXT:    lui a0, 4080
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.bswap.nxv8i32(<vscale x 8 x i32> %va)
+  ret <vscale x 8 x i32> %a
+}
+declare <vscale x 8 x i32> @llvm.bswap.nxv8i32(<vscale x 8 x i32>)
+
+define <vscale x 16 x i32> @bswap_nxv16i32(<vscale x 16 x i32> %va) {
+; RV32-LABEL: bswap_nxv16i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
+; RV32-NEXT:    vsrl.vi v16, v8, 8
+; RV32-NEXT:    lui a0, 16
+; RV32-NEXT:    addi a0, a0, -256
+; RV32-NEXT:    vand.vx v16, v16, a0
+; RV32-NEXT:    vsrl.vi v24, v8, 24
+; RV32-NEXT:    vor.vv v16, v16, v24
+; RV32-NEXT:    vsll.vi v24, v8, 8
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    vand.vx v24, v24, a0
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bswap_nxv16i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
+; RV64-NEXT:    vsrl.vi v16, v8, 8
+; RV64-NEXT:    lui a0, 16
+; RV64-NEXT:    addiw a0, a0, -256
+; RV64-NEXT:    vand.vx v16, v16, a0
+; RV64-NEXT:    vsrl.vi v24, v8, 24
+; RV64-NEXT:    vor.vv v16, v16, v24
+; RV64-NEXT:    vsll.vi v24, v8, 8
+; RV64-NEXT:    lui a0, 4080
+; RV64-NEXT:    vand.vx v24, v24, a0
+; RV64-NEXT:    vsll.vi v8, v8, 24
+; RV64-NEXT:    vor.vv v8, v8, v24
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    ret
+  %a = call <vscale x 16 x i32> @llvm.bswap.nxv16i32(<vscale x 16 x i32> %va)
+  ret <vscale x 16 x i32> %a
+}
+declare <vscale x 16 x i32> @llvm.bswap.nxv16i32(<vscale x 16 x i32>)
+
+define <vscale x 1 x i64> @bswap_nxv1i64(<vscale x 1 x i64> %va) {
+; RV32-LABEL: bswap_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a0, 1044480
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a1, a1, -256
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    vsetvli a3, zero, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v9, v8, a2
+; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    vsrl.vx v10, v8, a3
+; RV32-NEXT:    vand.vx v10, v10, a1
+; RV32-NEXT:    vor.vv v9, v10, v9
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vlse64.v v10, (a1), zero
+; RV32-NEXT:    vsrl.vi v11, v8, 24
+; RV32-NEXT:    vand.vx v11, v11, a0
+; RV32-NEXT:    vsrl.vi v12, v8, 8
+; RV32-NEXT:    vand.vv v10, v12, v10
+; RV32-NEXT:    vor.vv v10, v10, v11
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vor.vv v9, v10, v9
+; RV32-NEXT:    vsll.vx v10, v8, a2
+; RV32-NEXT:    vsll.vx v12, v8, a3
+; RV32-NEXT:    vand.vv v11, v12, v11
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vor.vv v10, v10, v11
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vsll.vi v13, v8, 8
+; RV32-NEXT:    vand.vv v12, v13, v12
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vand.vv v8, v8, v11
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    vor.vv v8, v10, v8
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bswap_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; RV64-NEXT:    vsrl.vx v9, v8, a0
+; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    vsrl.vx v10, v8, a1
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, -256
+; RV64-NEXT:    vand.vx v10, v10, a2
+; RV64-NEXT:    vor.vv v9, v10, v9
+; RV64-NEXT:    vsrl.vi v10, v8, 24
+; RV64-NEXT:    lui a2, 4080
+; RV64-NEXT:    vand.vx v10, v10, a2
+; RV64-NEXT:    vsrl.vi v11, v8, 8
+; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    slli a3, a2, 24
+; RV64-NEXT:    vand.vx v11, v11, a3
+; RV64-NEXT:    vor.vv v10, v11, v10
+; RV64-NEXT:    vor.vv v9, v10, v9
+; RV64-NEXT:    vsll.vi v10, v8, 8
+; RV64-NEXT:    slli a3, a2, 32
+; RV64-NEXT:    vand.vx v10, v10, a3
+; RV64-NEXT:    vsll.vi v11, v8, 24
+; RV64-NEXT:    slli a3, a2, 40
+; RV64-NEXT:    vand.vx v11, v11, a3
+; RV64-NEXT:    vor.vv v10, v11, v10
+; RV64-NEXT:    vsll.vx v11, v8, a0
+; RV64-NEXT:    vsll.vx v8, v8, a1
+; RV64-NEXT:    slli a0, a2, 48
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v11, v8
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    vor.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.bswap.nxv1i64(<vscale x 1 x i64> %va)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.bswap.nxv1i64(<vscale x 1 x i64>)
+
+define <vscale x 2 x i64> @bswap_nxv2i64(<vscale x 2 x i64> %va) {
+; RV32-LABEL: bswap_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a0, 1044480
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a1, a1, -256
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    vsetvli a3, zero, e64, m2, ta, mu
+; RV32-NEXT:    vsrl.vx v10, v8, a2
+; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    vsrl.vx v12, v8, a3
+; RV32-NEXT:    vand.vx v12, v12, a1
+; RV32-NEXT:    vor.vv v10, v12, v10
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vlse64.v v12, (a1), zero
+; RV32-NEXT:    vsrl.vi v14, v8, 24
+; RV32-NEXT:    vand.vx v14, v14, a0
+; RV32-NEXT:    vsrl.vi v16, v8, 8
+; RV32-NEXT:    vand.vv v12, v16, v12
+; RV32-NEXT:    vor.vv v12, v12, v14
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v14, (a0), zero
+; RV32-NEXT:    vor.vv v10, v12, v10
+; RV32-NEXT:    vsll.vx v12, v8, a2
+; RV32-NEXT:    vsll.vx v16, v8, a3
+; RV32-NEXT:    vand.vv v14, v16, v14
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vor.vv v12, v12, v14
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v14, (a0), zero
+; RV32-NEXT:    vsll.vi v18, v8, 8
+; RV32-NEXT:    vand.vv v16, v18, v16
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vand.vv v8, v8, v14
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    vor.vv v8, v12, v8
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bswap_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; RV64-NEXT:    vsrl.vx v10, v8, a0
+; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    vsrl.vx v12, v8, a1
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, -256
+; RV64-NEXT:    vand.vx v12, v12, a2
+; RV64-NEXT:    vor.vv v10, v12, v10
+; RV64-NEXT:    vsrl.vi v12, v8, 24
+; RV64-NEXT:    lui a2, 4080
+; RV64-NEXT:    vand.vx v12, v12, a2
+; RV64-NEXT:    vsrl.vi v14, v8, 8
+; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    slli a3, a2, 24
+; RV64-NEXT:    vand.vx v14, v14, a3
+; RV64-NEXT:    vor.vv v12, v14, v12
+; RV64-NEXT:    vor.vv v10, v12, v10
+; RV64-NEXT:    vsll.vi v12, v8, 8
+; RV64-NEXT:    slli a3, a2, 32
+; RV64-NEXT:    vand.vx v12, v12, a3
+; RV64-NEXT:    vsll.vi v14, v8, 24
+; RV64-NEXT:    slli a3, a2, 40
+; RV64-NEXT:    vand.vx v14, v14, a3
+; RV64-NEXT:    vor.vv v12, v14, v12
+; RV64-NEXT:    vsll.vx v14, v8, a0
+; RV64-NEXT:    vsll.vx v8, v8, a1
+; RV64-NEXT:    slli a0, a2, 48
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v14, v8
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64> %va)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64>)
+
+define <vscale x 4 x i64> @bswap_nxv4i64(<vscale x 4 x i64> %va) {
+; RV32-LABEL: bswap_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a0, 1044480
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a1, a1, -256
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    vsetvli a3, zero, e64, m4, ta, mu
+; RV32-NEXT:    vsrl.vx v12, v8, a2
+; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    vsrl.vx v16, v8, a3
+; RV32-NEXT:    vand.vx v16, v16, a1
+; RV32-NEXT:    vor.vv v12, v16, v12
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a1), zero
+; RV32-NEXT:    vsrl.vi v20, v8, 24
+; RV32-NEXT:    vand.vx v20, v20, a0
+; RV32-NEXT:    vsrl.vi v24, v8, 8
+; RV32-NEXT:    vand.vv v16, v24, v16
+; RV32-NEXT:    vor.vv v16, v16, v20
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v20, (a0), zero
+; RV32-NEXT:    vor.vv v12, v16, v12
+; RV32-NEXT:    vsll.vx v16, v8, a2
+; RV32-NEXT:    vsll.vx v24, v8, a3
+; RV32-NEXT:    vand.vv v20, v24, v20
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vor.vv v16, v16, v20
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v20, (a0), zero
+; RV32-NEXT:    vsll.vi v28, v8, 8
+; RV32-NEXT:    vand.vv v24, v28, v24
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vand.vv v8, v8, v20
+; RV32-NEXT:    vor.vv v8, v8, v24
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bswap_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
+; RV64-NEXT:    vsrl.vx v12, v8, a0
+; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    vsrl.vx v16, v8, a1
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, -256
+; RV64-NEXT:    vand.vx v16, v16, a2
+; RV64-NEXT:    vor.vv v12, v16, v12
+; RV64-NEXT:    vsrl.vi v16, v8, 24
+; RV64-NEXT:    lui a2, 4080
+; RV64-NEXT:    vand.vx v16, v16, a2
+; RV64-NEXT:    vsrl.vi v20, v8, 8
+; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    slli a3, a2, 24
+; RV64-NEXT:    vand.vx v20, v20, a3
+; RV64-NEXT:    vor.vv v16, v20, v16
+; RV64-NEXT:    vor.vv v12, v16, v12
+; RV64-NEXT:    vsll.vi v16, v8, 8
+; RV64-NEXT:    slli a3, a2, 32
+; RV64-NEXT:    vand.vx v16, v16, a3
+; RV64-NEXT:    vsll.vi v20, v8, 24
+; RV64-NEXT:    slli a3, a2, 40
+; RV64-NEXT:    vand.vx v20, v20, a3
+; RV64-NEXT:    vor.vv v16, v20, v16
+; RV64-NEXT:    vsll.vx v20, v8, a0
+; RV64-NEXT:    vsll.vx v8, v8, a1
+; RV64-NEXT:    slli a0, a2, 48
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v20, v8
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    vor.vv v8, v8, v12
+; RV64-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.bswap.nxv4i64(<vscale x 4 x i64> %va)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.bswap.nxv4i64(<vscale x 4 x i64>)
+
+define <vscale x 8 x i64> @bswap_nxv8i64(<vscale x 8 x i64> %va) {
+; RV32-LABEL: bswap_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    lui a0, 1044480
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    lui a0, 4080
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lui a1, 16
+; RV32-NEXT:    addi a1, a1, -256
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    vsetvli a3, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsrl.vx v16, v8, a2
+; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    vsrl.vx v24, v8, a3
+; RV32-NEXT:    addi a4, sp, 8
+; RV32-NEXT:    vlse64.v v0, (a4), zero
+; RV32-NEXT:    vand.vx v24, v24, a1
+; RV32-NEXT:    vor.vv v16, v24, v16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vsrl.vi v24, v8, 8
+; RV32-NEXT:    vand.vv v24, v24, v0
+; RV32-NEXT:    vsrl.vi v0, v8, 24
+; RV32-NEXT:    vand.vx v0, v0, a0
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vor.vv v24, v24, v0
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl8re8.v v0, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v24, v24, v0
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT:    vsll.vx v0, v8, a3
+; RV32-NEXT:    vand.vv v16, v0, v16
+; RV32-NEXT:    vsll.vx v0, v8, a2
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vor.vv v16, v0, v16
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v0, (a0), zero
+; RV32-NEXT:    vsll.vi v16, v8, 8
+; RV32-NEXT:    vand.vv v16, v16, v24
+; RV32-NEXT:    vsll.vi v8, v8, 24
+; RV32-NEXT:    vand.vv v8, v8, v0
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vl8re8.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v16, v8
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    vl8re8.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bswap_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsrl.vx v16, v8, a0
+; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    vsrl.vx v24, v8, a1
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, -256
+; RV64-NEXT:    vand.vx v24, v24, a2
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsrl.vi v24, v8, 24
+; RV64-NEXT:    lui a2, 4080
+; RV64-NEXT:    vand.vx v24, v24, a2
+; RV64-NEXT:    vsrl.vi v0, v8, 8
+; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    slli a3, a2, 24
+; RV64-NEXT:    vand.vx v0, v0, a3
+; RV64-NEXT:    vor.vv v24, v0, v24
+; RV64-NEXT:    vor.vv v16, v24, v16
+; RV64-NEXT:    vsll.vi v24, v8, 8
+; RV64-NEXT:    slli a3, a2, 32
+; RV64-NEXT:    vand.vx v24, v24, a3
+; RV64-NEXT:    vsll.vi v0, v8, 24
+; RV64-NEXT:    slli a3, a2, 40
+; RV64-NEXT:    vand.vx v0, v0, a3
+; RV64-NEXT:    vor.vv v24, v0, v24
+; RV64-NEXT:    vsll.vx v0, v8, a0
+; RV64-NEXT:    vsll.vx v8, v8, a1
+; RV64-NEXT:    slli a0, a2, 48
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    vor.vv v8, v0, v8
+; RV64-NEXT:    vor.vv v8, v8, v24
+; RV64-NEXT:    vor.vv v8, v8, v16
+; RV64-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.bswap.nxv8i64(<vscale x 8 x i64> %va)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.bswap.nxv8i64(<vscale x 8 x i64>)


        


More information about the llvm-commits mailing list