[llvm] 33a83c5 - [RISCV] Add SDNode patterns for vrol.[vv,vx] and vror.[vv,vx,vi]

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 21 02:22:51 PDT 2023


Author: Luke Lau
Date: 2023-07-21T10:22:46+01:00
New Revision: 33a83c5486d599e00f4c6ba35b12c1e74bc0554b

URL: https://github.com/llvm/llvm-project/commit/33a83c5486d599e00f4c6ba35b12c1e74bc0554b
DIFF: https://github.com/llvm/llvm-project/commit/33a83c5486d599e00f4c6ba35b12c1e74bc0554b.diff

LOG: [RISCV] Add SDNode patterns for vrol.[vv,vx] and vror.[vv,vx,vi]

These correspond to ROTL/ROTR nodes

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D155439

Added: 
    llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 8204c5f09f3b8f..cafce628cf6a22 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -2999,7 +2999,8 @@ bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
       });
 }
 
-bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
+bool RISCVDAGToDAGISel::selectVSplatUimm(SDValue N, unsigned Bits,
+                                         SDValue &SplatVal) {
   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
       !isa<ConstantSDNode>(N.getOperand(1)))
     return false;
@@ -3007,7 +3008,7 @@ bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
   int64_t SplatImm =
       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
 
-  if (!isUInt<5>(SplatImm))
+  if (!isUIntN(Bits, SplatImm))
     return false;
 
   SplatVal =

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index e99f8d69ebe12c..281719c12e7032 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -128,7 +128,10 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
 
   bool selectVSplat(SDValue N, SDValue &SplatVal);
   bool selectVSplatSimm5(SDValue N, SDValue &SplatVal);
-  bool selectVSplatUimm5(SDValue N, SDValue &SplatVal);
+  bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal);
+  template <unsigned Bits> bool selectVSplatUimmBits(SDValue N, SDValue &Val) {
+    return selectVSplatUimm(N, Bits, Val);
+  }
   bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal);
   bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal);
   bool selectExtOneUseVSplat(SDValue N, SDValue &SplatVal);

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b45387bf9cc437..b7aff1ab550d03 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -678,8 +678,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
                          Legal);
 
-      setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand);
-
       setOperationAction({ISD::VP_FSHL, ISD::VP_FSHR}, VT, Expand);
 
       // Custom-lower extensions and truncations from/to mask types.
@@ -769,6 +767,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                               ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ_ZERO_UNDEF},
                              VT, Custom);
         }
+
+        setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand);
       }
     }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index da095a48a90441..117eb3357a7857 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -570,7 +570,8 @@ foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR",
 // Give explicit Complexity to prefer simm5/uimm5.
 def SplatPat       : ComplexPattern<vAny, 1, "selectVSplat",      [], [], 1>;
 def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [], [], 3>;
-def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimm5", [], [], 3>;
+def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<5>", [], [], 3>;
+def SplatPat_uimm6 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<6>", [], [], 3>;
 def SplatPat_simm5_plus1
     : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", [], [], 3>;
 def SplatPat_simm5_plus1_nonzero

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 98877bb24cc95b..de51f63c89aa36 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -199,6 +199,9 @@ defm PseudoVCLZ : VPseudoUnaryV_V;
 defm PseudoVCTZ : VPseudoUnaryV_V;
 defm PseudoVCPOP : VPseudoUnaryV_V;
 
+defm PseudoVROL : VPseudoVALU_VV_VX;
+defm PseudoVROR : VPseudoVALU_VV_VX_VI<uimm6>;
+
 //===----------------------------------------------------------------------===//
 // SDNode patterns
 //===----------------------------------------------------------------------===//
@@ -250,6 +253,29 @@ defm : VPatUnarySDNode_V<ctlz, "PseudoVCLZ">;
 defm : VPatUnarySDNode_V<cttz, "PseudoVCTZ">;
 defm : VPatUnarySDNode_V<ctpop, "PseudoVCPOP">;
 
+defm : VPatBinarySDNode_VV_VX<rotl, "PseudoVROL">;
+
+def NegImm64 : SDNodeXForm<imm, [{
+  return CurDAG->getTargetConstant(0x3f & (64 - N->getZExtValue()), SDLoc(N),
+                                   N->getValueType(0));
+}]>;
+
+// Although there is no vrol.vi, an immediate rotate left can be achieved by
+// negating the immediate in vror.vi
+foreach vti = AllIntegerVectors in {
+  let Predicates = !listconcat([HasStdExtZvbb],
+                               GetVTypePredicates<vti>.Predicates) in {
+    def : Pat<(vti.Vector (rotl vti.RegClass:$rs2,
+                                (vti.Vector (SplatPat_uimm6 uimm6:$rs1)))),
+              (!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX)
+                 (vti.Vector (IMPLICIT_DEF)),
+                 vti.RegClass:$rs2,
+                 (NegImm64 uimm6:$rs1),
+                 vti.AVL, vti.Log2SEW, TA_MA)>;
+  }
+}
+defm : VPatBinarySDNode_VV_VX_VI<rotr, "PseudoVROR", uimm6>;
+
 //===----------------------------------------------------------------------===//
 // VL patterns
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll
new file mode 100644
index 00000000000000..107ddb8024e31b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll
@@ -0,0 +1,1233 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB64
+
+declare <vscale x 1 x i8> @llvm.fshl.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>)
+
+define <vscale x 1 x i8> @vrol_vv_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; CHECK-LABEL: vrol_vv_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv1i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i8> @llvm.fshl.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b)
+  ret <vscale x 1 x i8> %x
+}
+
+define <vscale x 1 x i8> @vrol_vx_nxv1i8(<vscale x 1 x i8> %a, i8 %b) {
+; CHECK-LABEL: vrol_vx_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv1i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 1 x i8> %b.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i8> @llvm.fshl.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b.splat)
+  ret <vscale x 1 x i8> %x
+}
+
+declare <vscale x 2 x i8> @llvm.fshl.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>)
+
+define <vscale x 2 x i8> @vrol_vv_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; CHECK-LABEL: vrol_vv_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv2i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i8> @llvm.fshl.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b)
+  ret <vscale x 2 x i8> %x
+}
+
+define <vscale x 2 x i8> @vrol_vx_nxv2i8(<vscale x 2 x i8> %a, i8 %b) {
+; CHECK-LABEL: vrol_vx_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv2i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 2 x i8> %b.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i8> @llvm.fshl.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b.splat)
+  ret <vscale x 2 x i8> %x
+}
+
+declare <vscale x 4 x i8> @llvm.fshl.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>)
+
+define <vscale x 4 x i8> @vrol_vv_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; CHECK-LABEL: vrol_vv_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv4i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i8> @llvm.fshl.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b)
+  ret <vscale x 4 x i8> %x
+}
+
+define <vscale x 4 x i8> @vrol_vx_nxv4i8(<vscale x 4 x i8> %a, i8 %b) {
+; CHECK-LABEL: vrol_vx_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv4i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 4 x i8> %b.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i8> @llvm.fshl.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b.splat)
+  ret <vscale x 4 x i8> %x
+}
+
+declare <vscale x 8 x i8> @llvm.fshl.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>)
+
+define <vscale x 8 x i8> @vrol_vv_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vrol_vv_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv8i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i8> @llvm.fshl.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b)
+  ret <vscale x 8 x i8> %x
+}
+
+define <vscale x 8 x i8> @vrol_vx_nxv8i8(<vscale x 8 x i8> %a, i8 %b) {
+; CHECK-LABEL: vrol_vx_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv8i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 8 x i8> %b.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i8> @llvm.fshl.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b.splat)
+  ret <vscale x 8 x i8> %x
+}
+
+declare <vscale x 16 x i8> @llvm.fshl.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+
+define <vscale x 16 x i8> @vrol_vv_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: vrol_vv_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vand.vi v12, v10, 7
+; CHECK-NEXT:    vsll.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vi v10, v10, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv16i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i8> @llvm.fshl.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %x
+}
+
+define <vscale x 16 x i8> @vrol_vx_nxv16i8(<vscale x 16 x i8> %a, i8 %b) {
+; CHECK-LABEL: vrol_vx_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vand.vi v12, v10, 7
+; CHECK-NEXT:    vsll.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vi v10, v10, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv16i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 16 x i8> %b.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+  %x = call <vscale x 16 x i8> @llvm.fshl.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b.splat)
+  ret <vscale x 16 x i8> %x
+}
+
+declare <vscale x 32 x i8> @llvm.fshl.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>)
+
+define <vscale x 32 x i8> @vrol_vv_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; CHECK-LABEL: vrol_vv_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vand.vi v16, v12, 7
+; CHECK-NEXT:    vsll.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vi v12, v12, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv32i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v12
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 32 x i8> @llvm.fshl.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b)
+  ret <vscale x 32 x i8> %x
+}
+
+define <vscale x 32 x i8> @vrol_vx_nxv32i8(<vscale x 32 x i8> %a, i8 %b) {
+; CHECK-LABEL: vrol_vx_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vand.vi v16, v12, 7
+; CHECK-NEXT:    vsll.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vi v12, v12, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv32i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 32 x i8> %b.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+  %x = call <vscale x 32 x i8> @llvm.fshl.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b.splat)
+  ret <vscale x 32 x i8> %x
+}
+
+declare <vscale x 64 x i8> @llvm.fshl.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>)
+
+define <vscale x 64 x i8> @vrol_vv_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
+; CHECK-LABEL: vrol_vv_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vand.vi v24, v16, 7
+; CHECK-NEXT:    vsll.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vi v16, v16, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv64i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v16
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 64 x i8> @llvm.fshl.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b)
+  ret <vscale x 64 x i8> %x
+}
+
+define <vscale x 64 x i8> @vrol_vx_nxv64i8(<vscale x 64 x i8> %a, i8 %b) {
+; CHECK-LABEL: vrol_vx_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vand.vi v24, v16, 7
+; CHECK-NEXT:    vsll.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vi v16, v16, 7
+; CHECK-NEXT:    vsrl.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv64i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 64 x i8> %b.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+  %x = call <vscale x 64 x i8> @llvm.fshl.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b.splat)
+  ret <vscale x 64 x i8> %x
+}
+
+declare <vscale x 1 x i16> @llvm.fshl.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>)
+
+define <vscale x 1 x i16> @vrol_vv_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; CHECK-LABEL: vrol_vv_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv1i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i16> @llvm.fshl.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b)
+  ret <vscale x 1 x i16> %x
+}
+
+define <vscale x 1 x i16> @vrol_vx_nxv1i16(<vscale x 1 x i16> %a, i16 %b) {
+; CHECK-LABEL: vrol_vx_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv1i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 1 x i16> %b.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i16> @llvm.fshl.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b.splat)
+  ret <vscale x 1 x i16> %x
+}
+
+declare <vscale x 2 x i16> @llvm.fshl.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>)
+
+define <vscale x 2 x i16> @vrol_vv_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; CHECK-LABEL: vrol_vv_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv2i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i16> @llvm.fshl.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b)
+  ret <vscale x 2 x i16> %x
+}
+
+define <vscale x 2 x i16> @vrol_vx_nxv2i16(<vscale x 2 x i16> %a, i16 %b) {
+; CHECK-LABEL: vrol_vx_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv2i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 2 x i16> %b.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i16> @llvm.fshl.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b.splat)
+  ret <vscale x 2 x i16> %x
+}
+
+declare <vscale x 4 x i16> @llvm.fshl.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>)
+
+define <vscale x 4 x i16> @vrol_vv_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: vrol_vv_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv4i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i16> @llvm.fshl.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b)
+  ret <vscale x 4 x i16> %x
+}
+
+define <vscale x 4 x i16> @vrol_vx_nxv4i16(<vscale x 4 x i16> %a, i16 %b) {
+; CHECK-LABEL: vrol_vx_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv4i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 4 x i16> %b.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i16> @llvm.fshl.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b.splat)
+  ret <vscale x 4 x i16> %x
+}
+
+declare <vscale x 8 x i16> @llvm.fshl.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+
+define <vscale x 8 x i16> @vrol_vv_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: vrol_vv_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vand.vi v12, v10, 15
+; CHECK-NEXT:    vsll.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vi v10, v10, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i16> @llvm.fshl.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %x
+}
+
+define <vscale x 8 x i16> @vrol_vx_nxv8i16(<vscale x 8 x i16> %a, i16 %b) {
+; CHECK-LABEL: vrol_vx_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vand.vi v12, v10, 15
+; CHECK-NEXT:    vsll.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vi v10, v10, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 8 x i16> %b.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i16> @llvm.fshl.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b.splat)
+  ret <vscale x 8 x i16> %x
+}
+
+declare <vscale x 16 x i16> @llvm.fshl.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>)
+
+define <vscale x 16 x i16> @vrol_vv_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; CHECK-LABEL: vrol_vv_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vand.vi v16, v12, 15
+; CHECK-NEXT:    vsll.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vi v12, v12, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv16i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v12
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i16> @llvm.fshl.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b)
+  ret <vscale x 16 x i16> %x
+}
+
+define <vscale x 16 x i16> @vrol_vx_nxv16i16(<vscale x 16 x i16> %a, i16 %b) {
+; CHECK-LABEL: vrol_vx_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vand.vi v16, v12, 15
+; CHECK-NEXT:    vsll.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vi v12, v12, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv16i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 16 x i16> %b.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+  %x = call <vscale x 16 x i16> @llvm.fshl.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b.splat)
+  ret <vscale x 16 x i16> %x
+}
+
+declare <vscale x 32 x i16> @llvm.fshl.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>)
+
+define <vscale x 32 x i16> @vrol_vv_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
+; CHECK-LABEL: vrol_vv_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vand.vi v24, v16, 15
+; CHECK-NEXT:    vsll.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vi v16, v16, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv32i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v16
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 32 x i16> @llvm.fshl.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b)
+  ret <vscale x 32 x i16> %x
+}
+
+define <vscale x 32 x i16> @vrol_vx_nxv32i16(<vscale x 32 x i16> %a, i16 %b) {
+; CHECK-LABEL: vrol_vx_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vand.vi v24, v16, 15
+; CHECK-NEXT:    vsll.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vi v16, v16, 15
+; CHECK-NEXT:    vsrl.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv32i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 32 x i16> %b.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+  %x = call <vscale x 32 x i16> @llvm.fshl.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b.splat)
+  ret <vscale x 32 x i16> %x
+}
+
+declare <vscale x 1 x i32> @llvm.fshl.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>)
+
+define <vscale x 1 x i32> @vrol_vv_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b) {
+; CHECK-LABEL: vrol_vv_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 31
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vand.vx v10, v9, a0
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv1i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i32> @llvm.fshl.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %a, <vscale x 1 x i32> %b)
+  ret <vscale x 1 x i32> %x
+}
+
+define <vscale x 1 x i32> @vrol_vx_nxv1i32(<vscale x 1 x i32> %a, i32 %b) {
+; CHECK-RV32-LABEL: vrol_vx_nxv1i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    andi a1, a0, 31
+; CHECK-RV32-NEXT:    vsetvli a2, zero, e32, mf2, ta, ma
+; CHECK-RV32-NEXT:    vsll.vx v9, v8, a1
+; CHECK-RV32-NEXT:    neg a0, a0
+; CHECK-RV32-NEXT:    andi a0, a0, 31
+; CHECK-RV32-NEXT:    vsrl.vx v8, v8, a0
+; CHECK-RV32-NEXT:    vor.vv v8, v9, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vrol_vx_nxv1i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v9, a0
+; CHECK-RV64-NEXT:    li a0, 31
+; CHECK-RV64-NEXT:    vand.vx v10, v9, a0
+; CHECK-RV64-NEXT:    vsll.vv v10, v8, v10
+; CHECK-RV64-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-RV64-NEXT:    vand.vx v9, v9, a0
+; CHECK-RV64-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-RV64-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv1i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
+  %b.splat = shufflevector <vscale x 1 x i32> %b.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i32> @llvm.fshl.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %a, <vscale x 1 x i32> %b.splat)
+  ret <vscale x 1 x i32> %x
+}
+
+declare <vscale x 2 x i32> @llvm.fshl.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>)
+
+define <vscale x 2 x i32> @vrol_vv_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: vrol_vv_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 31
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vand.vx v10, v9, a0
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv2i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i32> @llvm.fshl.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %a, <vscale x 2 x i32> %b)
+  ret <vscale x 2 x i32> %x
+}
+
+define <vscale x 2 x i32> @vrol_vx_nxv2i32(<vscale x 2 x i32> %a, i32 %b) {
+; CHECK-RV32-LABEL: vrol_vx_nxv2i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    andi a1, a0, 31
+; CHECK-RV32-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
+; CHECK-RV32-NEXT:    vsll.vx v9, v8, a1
+; CHECK-RV32-NEXT:    neg a0, a0
+; CHECK-RV32-NEXT:    andi a0, a0, 31
+; CHECK-RV32-NEXT:    vsrl.vx v8, v8, a0
+; CHECK-RV32-NEXT:    vor.vv v8, v9, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vrol_vx_nxv2i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v9, a0
+; CHECK-RV64-NEXT:    li a0, 31
+; CHECK-RV64-NEXT:    vand.vx v10, v9, a0
+; CHECK-RV64-NEXT:    vsll.vv v10, v8, v10
+; CHECK-RV64-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-RV64-NEXT:    vand.vx v9, v9, a0
+; CHECK-RV64-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-RV64-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv2i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+  %b.splat = shufflevector <vscale x 2 x i32> %b.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i32> @llvm.fshl.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %a, <vscale x 2 x i32> %b.splat)
+  ret <vscale x 2 x i32> %x
+}
+
+declare <vscale x 4 x i32> @llvm.fshl.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+
+define <vscale x 4 x i32> @vrol_vv_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: vrol_vv_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 31
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vand.vx v12, v10, a0
+; CHECK-NEXT:    vsll.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vx v10, v10, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i32> @llvm.fshl.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %x
+}
+
+define <vscale x 4 x i32> @vrol_vx_nxv4i32(<vscale x 4 x i32> %a, i32 %b) {
+; CHECK-RV32-LABEL: vrol_vx_nxv4i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    andi a1, a0, 31
+; CHECK-RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
+; CHECK-RV32-NEXT:    vsll.vx v10, v8, a1
+; CHECK-RV32-NEXT:    neg a0, a0
+; CHECK-RV32-NEXT:    andi a0, a0, 31
+; CHECK-RV32-NEXT:    vsrl.vx v8, v8, a0
+; CHECK-RV32-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vrol_vx_nxv4i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v10, a0
+; CHECK-RV64-NEXT:    li a0, 31
+; CHECK-RV64-NEXT:    vand.vx v12, v10, a0
+; CHECK-RV64-NEXT:    vsll.vv v12, v8, v12
+; CHECK-RV64-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-RV64-NEXT:    vand.vx v10, v10, a0
+; CHECK-RV64-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-RV64-NEXT:    vor.vv v8, v12, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+  %b.splat = shufflevector <vscale x 4 x i32> %b.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i32> @llvm.fshl.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b.splat)
+  ret <vscale x 4 x i32> %x
+}
+
+declare <vscale x 8 x i32> @llvm.fshl.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>)
+
+define <vscale x 8 x i32> @vrol_vv_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) {
+; CHECK-LABEL: vrol_vv_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 31
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vand.vx v16, v12, a0
+; CHECK-NEXT:    vsll.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vx v12, v12, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv8i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v12
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i32> @llvm.fshl.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %a, <vscale x 8 x i32> %b)
+  ret <vscale x 8 x i32> %x
+}
+
+define <vscale x 8 x i32> @vrol_vx_nxv8i32(<vscale x 8 x i32> %a, i32 %b) {
+; CHECK-RV32-LABEL: vrol_vx_nxv8i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    andi a1, a0, 31
+; CHECK-RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, ma
+; CHECK-RV32-NEXT:    vsll.vx v12, v8, a1
+; CHECK-RV32-NEXT:    neg a0, a0
+; CHECK-RV32-NEXT:    andi a0, a0, 31
+; CHECK-RV32-NEXT:    vsrl.vx v8, v8, a0
+; CHECK-RV32-NEXT:    vor.vv v8, v12, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vrol_vx_nxv8i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v12, a0
+; CHECK-RV64-NEXT:    li a0, 31
+; CHECK-RV64-NEXT:    vand.vx v16, v12, a0
+; CHECK-RV64-NEXT:    vsll.vv v16, v8, v16
+; CHECK-RV64-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-RV64-NEXT:    vand.vx v12, v12, a0
+; CHECK-RV64-NEXT:    vsrl.vv v8, v8, v12
+; CHECK-RV64-NEXT:    vor.vv v8, v16, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv8i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
+  %b.splat = shufflevector <vscale x 8 x i32> %b.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i32> @llvm.fshl.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %a, <vscale x 8 x i32> %b.splat)
+  ret <vscale x 8 x i32> %x
+}
+
+declare <vscale x 16 x i32> @llvm.fshl.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>)
+
+define <vscale x 16 x i32> @vrol_vv_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) {
+; CHECK-LABEL: vrol_vv_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 31
+; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vand.vx v24, v16, a0
+; CHECK-NEXT:    vsll.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vx v16, v16, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv16i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v16
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i32> @llvm.fshl.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %a, <vscale x 16 x i32> %b)
+  ret <vscale x 16 x i32> %x
+}
+
+define <vscale x 16 x i32> @vrol_vx_nxv16i32(<vscale x 16 x i32> %a, i32 %b) {
+; CHECK-RV32-LABEL: vrol_vx_nxv16i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    andi a1, a0, 31
+; CHECK-RV32-NEXT:    vsetvli a2, zero, e32, m8, ta, ma
+; CHECK-RV32-NEXT:    vsll.vx v16, v8, a1
+; CHECK-RV32-NEXT:    neg a0, a0
+; CHECK-RV32-NEXT:    andi a0, a0, 31
+; CHECK-RV32-NEXT:    vsrl.vx v8, v8, a0
+; CHECK-RV32-NEXT:    vor.vv v8, v16, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vrol_vx_nxv16i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v16, a0
+; CHECK-RV64-NEXT:    li a0, 31
+; CHECK-RV64-NEXT:    vand.vx v24, v16, a0
+; CHECK-RV64-NEXT:    vsll.vv v24, v8, v24
+; CHECK-RV64-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-RV64-NEXT:    vand.vx v16, v16, a0
+; CHECK-RV64-NEXT:    vsrl.vv v8, v8, v16
+; CHECK-RV64-NEXT:    vor.vv v8, v24, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vx_nxv16i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
+  %b.splat = shufflevector <vscale x 16 x i32> %b.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+  %x = call <vscale x 16 x i32> @llvm.fshl.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %a, <vscale x 16 x i32> %b.splat)
+  ret <vscale x 16 x i32> %x
+}
+
+declare <vscale x 1 x i64> @llvm.fshl.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>)
+
+define <vscale x 1 x i64> @vrol_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b) {
+; CHECK-LABEL: vrol_vv_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vand.vx v10, v9, a0
+; CHECK-NEXT:    vsll.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv1i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i64> @llvm.fshl.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %a, <vscale x 1 x i64> %b)
+  ret <vscale x 1 x i64> %x
+}
+
+define <vscale x 1 x i64> @vrol_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b) {
+; CHECK-RV32-LABEL: vrol_vx_nxv1i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v9, (a0), zero
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vand.vx v10, v9, a0
+; CHECK-RV32-NEXT:    vsll.vv v10, v8, v10
+; CHECK-RV32-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-RV32-NEXT:    vand.vx v9, v9, a0
+; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v9
+; CHECK-RV32-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vrol_vx_nxv1i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    andi a1, a0, 63
+; CHECK-RV64-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; CHECK-RV64-NEXT:    vsll.vx v9, v8, a1
+; CHECK-RV64-NEXT:    negw a0, a0
+; CHECK-RV64-NEXT:    andi a0, a0, 63
+; CHECK-RV64-NEXT:    vsrl.vx v8, v8, a0
+; CHECK-RV64-NEXT:    vor.vv v8, v9, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vrol_vx_nxv1i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v9, (a0), zero
+; CHECK-ZVBB32-NEXT:    vrol.vv v8, v8, v9
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vrol_vx_nxv1i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-ZVBB64-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB64-NEXT:    ret
+  %b.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+  %b.splat = shufflevector <vscale x 1 x i64> %b.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i64> @llvm.fshl.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %a, <vscale x 1 x i64> %b.splat)
+  ret <vscale x 1 x i64> %x
+}
+
+declare <vscale x 2 x i64> @llvm.fshl.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+
+define <vscale x 2 x i64> @vrol_vv_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: vrol_vv_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vand.vx v12, v10, a0
+; CHECK-NEXT:    vsll.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vx v10, v10, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv2i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i64> @llvm.fshl.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %x
+}
+
+define <vscale x 2 x i64> @vrol_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b) {
+; CHECK-RV32-LABEL: vrol_vx_nxv2i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v10, (a0), zero
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vand.vx v12, v10, a0
+; CHECK-RV32-NEXT:    vsll.vv v12, v8, v12
+; CHECK-RV32-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-RV32-NEXT:    vand.vx v10, v10, a0
+; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-RV32-NEXT:    vor.vv v8, v12, v8
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vrol_vx_nxv2i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    andi a1, a0, 63
+; CHECK-RV64-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
+; CHECK-RV64-NEXT:    vsll.vx v10, v8, a1
+; CHECK-RV64-NEXT:    negw a0, a0
+; CHECK-RV64-NEXT:    andi a0, a0, 63
+; CHECK-RV64-NEXT:    vsrl.vx v8, v8, a0
+; CHECK-RV64-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vrol_vx_nxv2i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v10, (a0), zero
+; CHECK-ZVBB32-NEXT:    vrol.vv v8, v8, v10
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vrol_vx_nxv2i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-ZVBB64-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB64-NEXT:    ret
+  %b.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+  %b.splat = shufflevector <vscale x 2 x i64> %b.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i64> @llvm.fshl.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b.splat)
+  ret <vscale x 2 x i64> %x
+}
+
+declare <vscale x 4 x i64> @llvm.fshl.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>)
+
+define <vscale x 4 x i64> @vrol_vv_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) {
+; CHECK-LABEL: vrol_vv_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vand.vx v16, v12, a0
+; CHECK-NEXT:    vsll.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vx v12, v12, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv4i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v12
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i64> @llvm.fshl.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b)
+  ret <vscale x 4 x i64> %x
+}
+
+define <vscale x 4 x i64> @vrol_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b) {
+; CHECK-RV32-LABEL: vrol_vx_nxv4i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v12, (a0), zero
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vand.vx v16, v12, a0
+; CHECK-RV32-NEXT:    vsll.vv v16, v8, v16
+; CHECK-RV32-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-RV32-NEXT:    vand.vx v12, v12, a0
+; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v12
+; CHECK-RV32-NEXT:    vor.vv v8, v16, v8
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vrol_vx_nxv4i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    andi a1, a0, 63
+; CHECK-RV64-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; CHECK-RV64-NEXT:    vsll.vx v12, v8, a1
+; CHECK-RV64-NEXT:    negw a0, a0
+; CHECK-RV64-NEXT:    andi a0, a0, 63
+; CHECK-RV64-NEXT:    vsrl.vx v8, v8, a0
+; CHECK-RV64-NEXT:    vor.vv v8, v12, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vrol_vx_nxv4i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v12, (a0), zero
+; CHECK-ZVBB32-NEXT:    vrol.vv v8, v8, v12
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vrol_vx_nxv4i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-ZVBB64-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB64-NEXT:    ret
+  %b.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+  %b.splat = shufflevector <vscale x 4 x i64> %b.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i64> @llvm.fshl.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b.splat)
+  ret <vscale x 4 x i64> %x
+}
+
+declare <vscale x 8 x i64> @llvm.fshl.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>)
+
+define <vscale x 8 x i64> @vrol_vv_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) {
+; CHECK-LABEL: vrol_vv_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vand.vx v24, v16, a0
+; CHECK-NEXT:    vsll.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vx v16, v16, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vrol_vv_nxv8i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vv v8, v8, v16
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i64> @llvm.fshl.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %a, <vscale x 8 x i64> %b)
+  ret <vscale x 8 x i64> %x
+}
+
+define <vscale x 8 x i64> @vrol_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b) {
+; CHECK-RV32-LABEL: vrol_vx_nxv8i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v16, (a0), zero
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vand.vx v24, v16, a0
+; CHECK-RV32-NEXT:    vsll.vv v24, v8, v24
+; CHECK-RV32-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-RV32-NEXT:    vand.vx v16, v16, a0
+; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v16
+; CHECK-RV32-NEXT:    vor.vv v8, v24, v8
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vrol_vx_nxv8i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    andi a1, a0, 63
+; CHECK-RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
+; CHECK-RV64-NEXT:    vsll.vx v16, v8, a1
+; CHECK-RV64-NEXT:    negw a0, a0
+; CHECK-RV64-NEXT:    andi a0, a0, 63
+; CHECK-RV64-NEXT:    vsrl.vx v8, v8, a0
+; CHECK-RV64-NEXT:    vor.vv v8, v16, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vrol_vx_nxv8i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v16, (a0), zero
+; CHECK-ZVBB32-NEXT:    vrol.vv v8, v8, v16
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vrol_vx_nxv8i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-ZVBB64-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB64-NEXT:    ret
+  %b.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+  %b.splat = shufflevector <vscale x 8 x i64> %b.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i64> @llvm.fshl.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %a, <vscale x 8 x i64> %b.splat)
+  ret <vscale x 8 x i64> %x
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll
new file mode 100644
index 00000000000000..2c69b747e50441
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll
@@ -0,0 +1,2167 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB64
+
+declare <vscale x 1 x i8> @llvm.fshr.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>)
+declare <vscale x 1 x i8> @llvm.fshl.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>)
+
+define <vscale x 1 x i8> @vror_vv_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; CHECK-LABEL: vror_vv_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv1i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i8> @llvm.fshr.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b)
+  ret <vscale x 1 x i8> %x
+}
+
+define <vscale x 1 x i8> @vror_vx_nxv1i8(<vscale x 1 x i8> %a, i8 %b) {
+; CHECK-LABEL: vror_vx_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv1i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 1 x i8> %b.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i8> @llvm.fshr.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b.splat)
+  ret <vscale x 1 x i8> %x
+}
+
+define <vscale x 1 x i8> @vror_vi_nxv1i8(<vscale x 1 x i8> %a) {
+; CHECK-LABEL: vror_vi_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vsll.vi v9, v8, 7
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv1i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i8> @llvm.fshr.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %a, <vscale x 1 x i8> shufflevector(<vscale x 1 x i8> insertelement(<vscale x 1 x i8> poison, i8 1, i32 0), <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer))
+  ret <vscale x 1 x i8> %x
+}
+
+define <vscale x 1 x i8> @vror_vi_rotl_nxv1i8(<vscale x 1 x i8> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vsrl.vi v9, v8, 7
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv1i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i8> @llvm.fshl.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %a, <vscale x 1 x i8> shufflevector(<vscale x 1 x i8> insertelement(<vscale x 1 x i8> poison, i8 1, i32 0), <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer))
+  ret <vscale x 1 x i8> %x
+}
+
+declare <vscale x 2 x i8> @llvm.fshr.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>)
+declare <vscale x 2 x i8> @llvm.fshl.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>)
+
+define <vscale x 2 x i8> @vror_vv_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; CHECK-LABEL: vror_vv_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv2i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i8> @llvm.fshr.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b)
+  ret <vscale x 2 x i8> %x
+}
+
+define <vscale x 2 x i8> @vror_vx_nxv2i8(<vscale x 2 x i8> %a, i8 %b) {
+; CHECK-LABEL: vror_vx_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv2i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 2 x i8> %b.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i8> @llvm.fshr.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b.splat)
+  ret <vscale x 2 x i8> %x
+}
+
+define <vscale x 2 x i8> @vror_vi_nxv2i8(<vscale x 2 x i8> %a) {
+; CHECK-LABEL: vror_vi_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vsll.vi v9, v8, 7
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv2i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i8> @llvm.fshr.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %a, <vscale x 2 x i8> shufflevector(<vscale x 2 x i8> insertelement(<vscale x 2 x i8> poison, i8 1, i32 0), <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer))
+  ret <vscale x 2 x i8> %x
+}
+
+define <vscale x 2 x i8> @vror_vi_rotl_nxv2i8(<vscale x 2 x i8> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vsrl.vi v9, v8, 7
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv2i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i8> @llvm.fshl.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %a, <vscale x 2 x i8> shufflevector(<vscale x 2 x i8> insertelement(<vscale x 2 x i8> poison, i8 1, i32 0), <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer))
+  ret <vscale x 2 x i8> %x
+}
+
+declare <vscale x 4 x i8> @llvm.fshr.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>)
+declare <vscale x 4 x i8> @llvm.fshl.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>)
+
+define <vscale x 4 x i8> @vror_vv_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; CHECK-LABEL: vror_vv_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv4i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i8> @llvm.fshr.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b)
+  ret <vscale x 4 x i8> %x
+}
+
+define <vscale x 4 x i8> @vror_vx_nxv4i8(<vscale x 4 x i8> %a, i8 %b) {
+; CHECK-LABEL: vror_vx_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv4i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 4 x i8> %b.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i8> @llvm.fshr.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b.splat)
+  ret <vscale x 4 x i8> %x
+}
+
+define <vscale x 4 x i8> @vror_vi_nxv4i8(<vscale x 4 x i8> %a) {
+; CHECK-LABEL: vror_vi_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vsll.vi v9, v8, 7
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv4i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i8> @llvm.fshr.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %a, <vscale x 4 x i8> shufflevector(<vscale x 4 x i8> insertelement(<vscale x 4 x i8> poison, i8 1, i32 0), <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer))
+  ret <vscale x 4 x i8> %x
+}
+
+define <vscale x 4 x i8> @vror_vi_rotl_nxv4i8(<vscale x 4 x i8> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vsrl.vi v9, v8, 7
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv4i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i8> @llvm.fshl.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %a, <vscale x 4 x i8> shufflevector(<vscale x 4 x i8> insertelement(<vscale x 4 x i8> poison, i8 1, i32 0), <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer))
+  ret <vscale x 4 x i8> %x
+}
+
+declare <vscale x 8 x i8> @llvm.fshr.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>)
+declare <vscale x 8 x i8> @llvm.fshl.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>)
+
+define <vscale x 8 x i8> @vror_vv_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vror_vv_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv8i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i8> @llvm.fshr.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b)
+  ret <vscale x 8 x i8> %x
+}
+
+define <vscale x 8 x i8> @vror_vx_nxv8i8(<vscale x 8 x i8> %a, i8 %b) {
+; CHECK-LABEL: vror_vx_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 7
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv8i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 8 x i8> %b.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i8> @llvm.fshr.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b.splat)
+  ret <vscale x 8 x i8> %x
+}
+
+define <vscale x 8 x i8> @vror_vi_nxv8i8(<vscale x 8 x i8> %a) {
+; CHECK-LABEL: vror_vi_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vsll.vi v9, v8, 7
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv8i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i8> @llvm.fshr.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %a, <vscale x 8 x i8> shufflevector(<vscale x 8 x i8> insertelement(<vscale x 8 x i8> poison, i8 1, i32 0), <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer))
+  ret <vscale x 8 x i8> %x
+}
+
+define <vscale x 8 x i8> @vror_vi_rotl_nxv8i8(<vscale x 8 x i8> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vsrl.vi v9, v8, 7
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv8i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i8> @llvm.fshl.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %a, <vscale x 8 x i8> shufflevector(<vscale x 8 x i8> insertelement(<vscale x 8 x i8> poison, i8 1, i32 0), <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer))
+  ret <vscale x 8 x i8> %x
+}
+
+declare <vscale x 16 x i8> @llvm.fshr.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 16 x i8> @llvm.fshl.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+
+define <vscale x 16 x i8> @vror_vv_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: vror_vv_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vand.vi v12, v10, 7
+; CHECK-NEXT:    vsrl.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vi v10, v10, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv16i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i8> @llvm.fshr.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %x
+}
+
+define <vscale x 16 x i8> @vror_vx_nxv16i8(<vscale x 16 x i8> %a, i8 %b) {
+; CHECK-LABEL: vror_vx_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vand.vi v12, v10, 7
+; CHECK-NEXT:    vsrl.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vi v10, v10, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv16i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 16 x i8> %b.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+  %x = call <vscale x 16 x i8> @llvm.fshr.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b.splat)
+  ret <vscale x 16 x i8> %x
+}
+
+define <vscale x 16 x i8> @vror_vi_nxv16i8(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: vror_vi_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vsll.vi v10, v8, 7
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v10
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv16i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i8> @llvm.fshr.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %a, <vscale x 16 x i8> shufflevector(<vscale x 16 x i8> insertelement(<vscale x 16 x i8> poison, i8 1, i32 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer))
+  ret <vscale x 16 x i8> %x
+}
+
+define <vscale x 16 x i8> @vror_vi_rotl_nxv16i8(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vsrl.vi v10, v8, 7
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v10
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv16i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i8> @llvm.fshl.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %a, <vscale x 16 x i8> shufflevector(<vscale x 16 x i8> insertelement(<vscale x 16 x i8> poison, i8 1, i32 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer))
+  ret <vscale x 16 x i8> %x
+}
+
+declare <vscale x 32 x i8> @llvm.fshr.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>)
+declare <vscale x 32 x i8> @llvm.fshl.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>)
+
+define <vscale x 32 x i8> @vror_vv_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; CHECK-LABEL: vror_vv_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vand.vi v16, v12, 7
+; CHECK-NEXT:    vsrl.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vi v12, v12, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv32i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v12
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 32 x i8> @llvm.fshr.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b)
+  ret <vscale x 32 x i8> %x
+}
+
+define <vscale x 32 x i8> @vror_vx_nxv32i8(<vscale x 32 x i8> %a, i8 %b) {
+; CHECK-LABEL: vror_vx_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vand.vi v16, v12, 7
+; CHECK-NEXT:    vsrl.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vi v12, v12, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv32i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 32 x i8> %b.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+  %x = call <vscale x 32 x i8> @llvm.fshr.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b.splat)
+  ret <vscale x 32 x i8> %x
+}
+
+define <vscale x 32 x i8> @vror_vi_nxv32i8(<vscale x 32 x i8> %a) {
+; CHECK-LABEL: vror_vi_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vsll.vi v12, v8, 7
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv32i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 32 x i8> @llvm.fshr.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %a, <vscale x 32 x i8> shufflevector(<vscale x 32 x i8> insertelement(<vscale x 32 x i8> poison, i8 1, i32 0), <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer))
+  ret <vscale x 32 x i8> %x
+}
+
+define <vscale x 32 x i8> @vror_vi_rotl_nxv32i8(<vscale x 32 x i8> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vsrl.vi v12, v8, 7
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv32i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 32 x i8> @llvm.fshl.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %a, <vscale x 32 x i8> shufflevector(<vscale x 32 x i8> insertelement(<vscale x 32 x i8> poison, i8 1, i32 0), <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer))
+  ret <vscale x 32 x i8> %x
+}
+
+declare <vscale x 64 x i8> @llvm.fshr.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>)
+declare <vscale x 64 x i8> @llvm.fshl.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>)
+
+define <vscale x 64 x i8> @vror_vv_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
+; CHECK-LABEL: vror_vv_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vand.vi v24, v16, 7
+; CHECK-NEXT:    vsrl.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vi v16, v16, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv64i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v16
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 64 x i8> @llvm.fshr.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b)
+  ret <vscale x 64 x i8> %x
+}
+
+define <vscale x 64 x i8> @vror_vx_nxv64i8(<vscale x 64 x i8> %a, i8 %b) {
+; CHECK-LABEL: vror_vx_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vand.vi v24, v16, 7
+; CHECK-NEXT:    vsrl.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vi v16, v16, 7
+; CHECK-NEXT:    vsll.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv64i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
+  %b.splat = shufflevector <vscale x 64 x i8> %b.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+  %x = call <vscale x 64 x i8> @llvm.fshr.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b.splat)
+  ret <vscale x 64 x i8> %x
+}
+
+define <vscale x 64 x i8> @vror_vi_nxv64i8(<vscale x 64 x i8> %a) {
+; CHECK-LABEL: vror_vi_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vsll.vi v16, v8, 7
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv64i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 64 x i8> @llvm.fshr.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %a, <vscale x 64 x i8> shufflevector(<vscale x 64 x i8> insertelement(<vscale x 64 x i8> poison, i8 1, i32 0), <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer))
+  ret <vscale x 64 x i8> %x
+}
+
+define <vscale x 64 x i8> @vror_vi_rotl_nxv64i8(<vscale x 64 x i8> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vsrl.vi v16, v8, 7
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv64i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 64 x i8> @llvm.fshl.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %a, <vscale x 64 x i8> shufflevector(<vscale x 64 x i8> insertelement(<vscale x 64 x i8> poison, i8 1, i32 0), <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer))
+  ret <vscale x 64 x i8> %x
+}
+
+declare <vscale x 1 x i16> @llvm.fshr.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>)
+declare <vscale x 1 x i16> @llvm.fshl.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>)
+
+define <vscale x 1 x i16> @vror_vv_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; CHECK-LABEL: vror_vv_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv1i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i16> @llvm.fshr.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b)
+  ret <vscale x 1 x i16> %x
+}
+
+define <vscale x 1 x i16> @vror_vx_nxv1i16(<vscale x 1 x i16> %a, i16 %b) {
+; CHECK-LABEL: vror_vx_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv1i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 1 x i16> %b.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i16> @llvm.fshr.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b.splat)
+  ret <vscale x 1 x i16> %x
+}
+
+define <vscale x 1 x i16> @vror_vi_nxv1i16(<vscale x 1 x i16> %a) {
+; CHECK-LABEL: vror_vi_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vsll.vi v9, v8, 15
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv1i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i16> @llvm.fshr.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %a, <vscale x 1 x i16> shufflevector(<vscale x 1 x i16> insertelement(<vscale x 1 x i16> poison, i16 1, i32 0), <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer))
+  ret <vscale x 1 x i16> %x
+}
+
+define <vscale x 1 x i16> @vror_vi_rotl_nxv1i16(<vscale x 1 x i16> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vsrl.vi v9, v8, 15
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv1i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i16> @llvm.fshl.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %a, <vscale x 1 x i16> shufflevector(<vscale x 1 x i16> insertelement(<vscale x 1 x i16> poison, i16 1, i32 0), <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer))
+  ret <vscale x 1 x i16> %x
+}
+
+declare <vscale x 2 x i16> @llvm.fshr.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>)
+declare <vscale x 2 x i16> @llvm.fshl.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>)
+
+define <vscale x 2 x i16> @vror_vv_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; CHECK-LABEL: vror_vv_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv2i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i16> @llvm.fshr.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b)
+  ret <vscale x 2 x i16> %x
+}
+
+define <vscale x 2 x i16> @vror_vx_nxv2i16(<vscale x 2 x i16> %a, i16 %b) {
+; CHECK-LABEL: vror_vx_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv2i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 2 x i16> %b.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i16> @llvm.fshr.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b.splat)
+  ret <vscale x 2 x i16> %x
+}
+
+define <vscale x 2 x i16> @vror_vi_nxv2i16(<vscale x 2 x i16> %a) {
+; CHECK-LABEL: vror_vi_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsll.vi v9, v8, 15
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv2i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i16> @llvm.fshr.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %a, <vscale x 2 x i16> shufflevector(<vscale x 2 x i16> insertelement(<vscale x 2 x i16> poison, i16 1, i32 0), <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer))
+  ret <vscale x 2 x i16> %x
+}
+
+define <vscale x 2 x i16> @vror_vi_rotl_nxv2i16(<vscale x 2 x i16> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsrl.vi v9, v8, 15
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv2i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i16> @llvm.fshl.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %a, <vscale x 2 x i16> shufflevector(<vscale x 2 x i16> insertelement(<vscale x 2 x i16> poison, i16 1, i32 0), <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer))
+  ret <vscale x 2 x i16> %x
+}
+
+declare <vscale x 4 x i16> @llvm.fshr.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>)
+declare <vscale x 4 x i16> @llvm.fshl.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>)
+
+define <vscale x 4 x i16> @vror_vv_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: vror_vv_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv4i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i16> @llvm.fshr.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b)
+  ret <vscale x 4 x i16> %x
+}
+
+define <vscale x 4 x i16> @vror_vx_nxv4i16(<vscale x 4 x i16> %a, i16 %b) {
+; CHECK-LABEL: vror_vx_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vand.vi v10, v9, 15
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vi v9, v9, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv4i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 4 x i16> %b.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i16> @llvm.fshr.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b.splat)
+  ret <vscale x 4 x i16> %x
+}
+
+define <vscale x 4 x i16> @vror_vi_nxv4i16(<vscale x 4 x i16> %a) {
+; CHECK-LABEL: vror_vi_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsll.vi v9, v8, 15
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv4i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i16> @llvm.fshr.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %a, <vscale x 4 x i16> shufflevector(<vscale x 4 x i16> insertelement(<vscale x 4 x i16> poison, i16 1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer))
+  ret <vscale x 4 x i16> %x
+}
+
+define <vscale x 4 x i16> @vror_vi_rotl_nxv4i16(<vscale x 4 x i16> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsrl.vi v9, v8, 15
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv4i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i16> @llvm.fshl.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %a, <vscale x 4 x i16> shufflevector(<vscale x 4 x i16> insertelement(<vscale x 4 x i16> poison, i16 1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer))
+  ret <vscale x 4 x i16> %x
+}
+
+declare <vscale x 8 x i16> @llvm.fshr.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 8 x i16> @llvm.fshl.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+
+define <vscale x 8 x i16> @vror_vv_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: vror_vv_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vand.vi v12, v10, 15
+; CHECK-NEXT:    vsrl.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vi v10, v10, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i16> @llvm.fshr.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %x
+}
+
+define <vscale x 8 x i16> @vror_vx_nxv8i16(<vscale x 8 x i16> %a, i16 %b) {
+; CHECK-LABEL: vror_vx_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vand.vi v12, v10, 15
+; CHECK-NEXT:    vsrl.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vi v10, v10, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 8 x i16> %b.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i16> @llvm.fshr.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b.splat)
+  ret <vscale x 8 x i16> %x
+}
+
+define <vscale x 8 x i16> @vror_vi_nxv8i16(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: vror_vi_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsll.vi v10, v8, 15
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v10
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i16> @llvm.fshr.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %a, <vscale x 8 x i16> shufflevector(<vscale x 8 x i16> insertelement(<vscale x 8 x i16> poison, i16 1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer))
+  ret <vscale x 8 x i16> %x
+}
+
+define <vscale x 8 x i16> @vror_vi_rotl_nxv8i16(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsrl.vi v10, v8, 15
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v10
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i16> @llvm.fshl.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %a, <vscale x 8 x i16> shufflevector(<vscale x 8 x i16> insertelement(<vscale x 8 x i16> poison, i16 1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer))
+  ret <vscale x 8 x i16> %x
+}
+
+declare <vscale x 16 x i16> @llvm.fshr.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>)
+declare <vscale x 16 x i16> @llvm.fshl.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>)
+
+define <vscale x 16 x i16> @vror_vv_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; CHECK-LABEL: vror_vv_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vand.vi v16, v12, 15
+; CHECK-NEXT:    vsrl.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vi v12, v12, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv16i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v12
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i16> @llvm.fshr.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b)
+  ret <vscale x 16 x i16> %x
+}
+
+define <vscale x 16 x i16> @vror_vx_nxv16i16(<vscale x 16 x i16> %a, i16 %b) {
+; CHECK-LABEL: vror_vx_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vand.vi v16, v12, 15
+; CHECK-NEXT:    vsrl.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vi v12, v12, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv16i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 16 x i16> %b.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+  %x = call <vscale x 16 x i16> @llvm.fshr.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b.splat)
+  ret <vscale x 16 x i16> %x
+}
+
+define <vscale x 16 x i16> @vror_vi_nxv16i16(<vscale x 16 x i16> %a) {
+; CHECK-LABEL: vror_vi_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vsll.vi v12, v8, 15
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv16i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i16> @llvm.fshr.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %a, <vscale x 16 x i16> shufflevector(<vscale x 16 x i16> insertelement(<vscale x 16 x i16> poison, i16 1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer))
+  ret <vscale x 16 x i16> %x
+}
+
+define <vscale x 16 x i16> @vror_vi_rotl_nxv16i16(<vscale x 16 x i16> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vsrl.vi v12, v8, 15
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv16i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i16> @llvm.fshl.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %a, <vscale x 16 x i16> shufflevector(<vscale x 16 x i16> insertelement(<vscale x 16 x i16> poison, i16 1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer))
+  ret <vscale x 16 x i16> %x
+}
+
+declare <vscale x 32 x i16> @llvm.fshr.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>)
+declare <vscale x 32 x i16> @llvm.fshl.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>)
+
+define <vscale x 32 x i16> @vror_vv_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
+; CHECK-LABEL: vror_vv_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vand.vi v24, v16, 15
+; CHECK-NEXT:    vsrl.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vi v16, v16, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv32i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v16
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 32 x i16> @llvm.fshr.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b)
+  ret <vscale x 32 x i16> %x
+}
+
+define <vscale x 32 x i16> @vror_vx_nxv32i16(<vscale x 32 x i16> %a, i16 %b) {
+; CHECK-LABEL: vror_vx_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vand.vi v24, v16, 15
+; CHECK-NEXT:    vsrl.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vi v16, v16, 15
+; CHECK-NEXT:    vsll.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv32i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
+  %b.splat = shufflevector <vscale x 32 x i16> %b.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+  %x = call <vscale x 32 x i16> @llvm.fshr.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b.splat)
+  ret <vscale x 32 x i16> %x
+}
+
+define <vscale x 32 x i16> @vror_vi_nxv32i16(<vscale x 32 x i16> %a) {
+; CHECK-LABEL: vror_vi_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vsll.vi v16, v8, 15
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv32i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 32 x i16> @llvm.fshr.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %a, <vscale x 32 x i16> shufflevector(<vscale x 32 x i16> insertelement(<vscale x 32 x i16> poison, i16 1, i32 0), <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer))
+  ret <vscale x 32 x i16> %x
+}
+
+define <vscale x 32 x i16> @vror_vi_rotl_nxv32i16(<vscale x 32 x i16> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vsrl.vi v16, v8, 15
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv32i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 32 x i16> @llvm.fshl.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %a, <vscale x 32 x i16> shufflevector(<vscale x 32 x i16> insertelement(<vscale x 32 x i16> poison, i16 1, i32 0), <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer))
+  ret <vscale x 32 x i16> %x
+}
+
+declare <vscale x 1 x i32> @llvm.fshr.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>)
+declare <vscale x 1 x i32> @llvm.fshl.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>)
+
+define <vscale x 1 x i32> @vror_vv_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b) {
+; CHECK-LABEL: vror_vv_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 31
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vand.vx v10, v9, a0
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv1i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i32> @llvm.fshr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %a, <vscale x 1 x i32> %b)
+  ret <vscale x 1 x i32> %x
+}
+
+define <vscale x 1 x i32> @vror_vx_nxv1i32(<vscale x 1 x i32> %a, i32 %b) {
+; CHECK-RV32-LABEL: vror_vx_nxv1i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    andi a1, a0, 31
+; CHECK-RV32-NEXT:    vsetvli a2, zero, e32, mf2, ta, ma
+; CHECK-RV32-NEXT:    vsrl.vx v9, v8, a1
+; CHECK-RV32-NEXT:    neg a0, a0
+; CHECK-RV32-NEXT:    andi a0, a0, 31
+; CHECK-RV32-NEXT:    vsll.vx v8, v8, a0
+; CHECK-RV32-NEXT:    vor.vv v8, v9, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vx_nxv1i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v9, a0
+; CHECK-RV64-NEXT:    li a0, 31
+; CHECK-RV64-NEXT:    vand.vx v10, v9, a0
+; CHECK-RV64-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-RV64-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-RV64-NEXT:    vand.vx v9, v9, a0
+; CHECK-RV64-NEXT:    vsll.vv v8, v8, v9
+; CHECK-RV64-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv1i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
+  %b.splat = shufflevector <vscale x 1 x i32> %b.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i32> @llvm.fshr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %a, <vscale x 1 x i32> %b.splat)
+  ret <vscale x 1 x i32> %x
+}
+
+define <vscale x 1 x i32> @vror_vi_nxv1i32(<vscale x 1 x i32> %a) {
+; CHECK-LABEL: vror_vi_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vsll.vi v9, v8, 31
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv1i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i32> @llvm.fshr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %a, <vscale x 1 x i32> shufflevector(<vscale x 1 x i32> insertelement(<vscale x 1 x i32> poison, i32 1, i32 0), <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer))
+  ret <vscale x 1 x i32> %x
+}
+
+define <vscale x 1 x i32> @vror_vi_rotl_nxv1i32(<vscale x 1 x i32> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vsrl.vi v9, v8, 31
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv1i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i32> @llvm.fshl.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %a, <vscale x 1 x i32> shufflevector(<vscale x 1 x i32> insertelement(<vscale x 1 x i32> poison, i32 1, i32 0), <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer))
+  ret <vscale x 1 x i32> %x
+}
+
+declare <vscale x 2 x i32> @llvm.fshr.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>)
+declare <vscale x 2 x i32> @llvm.fshl.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>)
+
+define <vscale x 2 x i32> @vror_vv_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: vror_vv_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 31
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vand.vx v10, v9, a0
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv2i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i32> @llvm.fshr.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %a, <vscale x 2 x i32> %b)
+  ret <vscale x 2 x i32> %x
+}
+
+define <vscale x 2 x i32> @vror_vx_nxv2i32(<vscale x 2 x i32> %a, i32 %b) {
+; CHECK-RV32-LABEL: vror_vx_nxv2i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    andi a1, a0, 31
+; CHECK-RV32-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
+; CHECK-RV32-NEXT:    vsrl.vx v9, v8, a1
+; CHECK-RV32-NEXT:    neg a0, a0
+; CHECK-RV32-NEXT:    andi a0, a0, 31
+; CHECK-RV32-NEXT:    vsll.vx v8, v8, a0
+; CHECK-RV32-NEXT:    vor.vv v8, v9, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vx_nxv2i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v9, a0
+; CHECK-RV64-NEXT:    li a0, 31
+; CHECK-RV64-NEXT:    vand.vx v10, v9, a0
+; CHECK-RV64-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-RV64-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-RV64-NEXT:    vand.vx v9, v9, a0
+; CHECK-RV64-NEXT:    vsll.vv v8, v8, v9
+; CHECK-RV64-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv2i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+  %b.splat = shufflevector <vscale x 2 x i32> %b.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i32> @llvm.fshr.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %a, <vscale x 2 x i32> %b.splat)
+  ret <vscale x 2 x i32> %x
+}
+
+define <vscale x 2 x i32> @vror_vi_nxv2i32(<vscale x 2 x i32> %a) {
+; CHECK-LABEL: vror_vi_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vsll.vi v9, v8, 31
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv2i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i32> @llvm.fshr.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %a, <vscale x 2 x i32> shufflevector(<vscale x 2 x i32> insertelement(<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer))
+  ret <vscale x 2 x i32> %x
+}
+
+define <vscale x 2 x i32> @vror_vi_rotl_nxv2i32(<vscale x 2 x i32> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vsrl.vi v9, v8, 31
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv2i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i32> @llvm.fshl.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %a, <vscale x 2 x i32> shufflevector(<vscale x 2 x i32> insertelement(<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer))
+  ret <vscale x 2 x i32> %x
+}
+
+declare <vscale x 4 x i32> @llvm.fshr.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.fshl.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+
+define <vscale x 4 x i32> @vror_vv_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: vror_vv_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 31
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vand.vx v12, v10, a0
+; CHECK-NEXT:    vsrl.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vx v10, v10, a0
+; CHECK-NEXT:    vsll.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i32> @llvm.fshr.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %x
+}
+
+define <vscale x 4 x i32> @vror_vx_nxv4i32(<vscale x 4 x i32> %a, i32 %b) {
+; CHECK-RV32-LABEL: vror_vx_nxv4i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    andi a1, a0, 31
+; CHECK-RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
+; CHECK-RV32-NEXT:    vsrl.vx v10, v8, a1
+; CHECK-RV32-NEXT:    neg a0, a0
+; CHECK-RV32-NEXT:    andi a0, a0, 31
+; CHECK-RV32-NEXT:    vsll.vx v8, v8, a0
+; CHECK-RV32-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vx_nxv4i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v10, a0
+; CHECK-RV64-NEXT:    li a0, 31
+; CHECK-RV64-NEXT:    vand.vx v12, v10, a0
+; CHECK-RV64-NEXT:    vsrl.vv v12, v8, v12
+; CHECK-RV64-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-RV64-NEXT:    vand.vx v10, v10, a0
+; CHECK-RV64-NEXT:    vsll.vv v8, v8, v10
+; CHECK-RV64-NEXT:    vor.vv v8, v12, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+  %b.splat = shufflevector <vscale x 4 x i32> %b.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i32> @llvm.fshr.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b.splat)
+  ret <vscale x 4 x i32> %x
+}
+
+define <vscale x 4 x i32> @vror_vi_nxv4i32(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: vror_vi_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vsll.vi v10, v8, 31
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v10
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i32> @llvm.fshr.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> shufflevector(<vscale x 4 x i32> insertelement(<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer))
+  ret <vscale x 4 x i32> %x
+}
+
+define <vscale x 4 x i32> @vror_vi_rotl_nxv4i32(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vsrl.vi v10, v8, 31
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v10
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i32> @llvm.fshl.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> shufflevector(<vscale x 4 x i32> insertelement(<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer))
+  ret <vscale x 4 x i32> %x
+}
+
+declare <vscale x 8 x i32> @llvm.fshr.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>)
+declare <vscale x 8 x i32> @llvm.fshl.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>)
+
+define <vscale x 8 x i32> @vror_vv_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) {
+; CHECK-LABEL: vror_vv_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 31
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vand.vx v16, v12, a0
+; CHECK-NEXT:    vsrl.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vx v12, v12, a0
+; CHECK-NEXT:    vsll.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv8i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v12
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i32> @llvm.fshr.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %a, <vscale x 8 x i32> %b)
+  ret <vscale x 8 x i32> %x
+}
+
+define <vscale x 8 x i32> @vror_vx_nxv8i32(<vscale x 8 x i32> %a, i32 %b) {
+; CHECK-RV32-LABEL: vror_vx_nxv8i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    andi a1, a0, 31
+; CHECK-RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, ma
+; CHECK-RV32-NEXT:    vsrl.vx v12, v8, a1
+; CHECK-RV32-NEXT:    neg a0, a0
+; CHECK-RV32-NEXT:    andi a0, a0, 31
+; CHECK-RV32-NEXT:    vsll.vx v8, v8, a0
+; CHECK-RV32-NEXT:    vor.vv v8, v12, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vx_nxv8i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v12, a0
+; CHECK-RV64-NEXT:    li a0, 31
+; CHECK-RV64-NEXT:    vand.vx v16, v12, a0
+; CHECK-RV64-NEXT:    vsrl.vv v16, v8, v16
+; CHECK-RV64-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-RV64-NEXT:    vand.vx v12, v12, a0
+; CHECK-RV64-NEXT:    vsll.vv v8, v8, v12
+; CHECK-RV64-NEXT:    vor.vv v8, v16, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv8i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
+  %b.splat = shufflevector <vscale x 8 x i32> %b.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i32> @llvm.fshr.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %a, <vscale x 8 x i32> %b.splat)
+  ret <vscale x 8 x i32> %x
+}
+
+define <vscale x 8 x i32> @vror_vi_nxv8i32(<vscale x 8 x i32> %a) {
+; CHECK-LABEL: vror_vi_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vsll.vi v12, v8, 31
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv8i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i32> @llvm.fshr.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %a, <vscale x 8 x i32> shufflevector(<vscale x 8 x i32> insertelement(<vscale x 8 x i32> poison, i32 1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer))
+  ret <vscale x 8 x i32> %x
+}
+
+define <vscale x 8 x i32> @vror_vi_rotl_nxv8i32(<vscale x 8 x i32> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vsrl.vi v12, v8, 31
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv8i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i32> @llvm.fshl.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %a, <vscale x 8 x i32> shufflevector(<vscale x 8 x i32> insertelement(<vscale x 8 x i32> poison, i32 1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer))
+  ret <vscale x 8 x i32> %x
+}
+
+declare <vscale x 16 x i32> @llvm.fshr.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>)
+declare <vscale x 16 x i32> @llvm.fshl.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>)
+
+define <vscale x 16 x i32> @vror_vv_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) {
+; CHECK-LABEL: vror_vv_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 31
+; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vand.vx v24, v16, a0
+; CHECK-NEXT:    vsrl.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vx v16, v16, a0
+; CHECK-NEXT:    vsll.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv16i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v16
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i32> @llvm.fshr.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %a, <vscale x 16 x i32> %b)
+  ret <vscale x 16 x i32> %x
+}
+
+define <vscale x 16 x i32> @vror_vx_nxv16i32(<vscale x 16 x i32> %a, i32 %b) {
+; CHECK-RV32-LABEL: vror_vx_nxv16i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    andi a1, a0, 31
+; CHECK-RV32-NEXT:    vsetvli a2, zero, e32, m8, ta, ma
+; CHECK-RV32-NEXT:    vsrl.vx v16, v8, a1
+; CHECK-RV32-NEXT:    neg a0, a0
+; CHECK-RV32-NEXT:    andi a0, a0, 31
+; CHECK-RV32-NEXT:    vsll.vx v8, v8, a0
+; CHECK-RV32-NEXT:    vor.vv v8, v16, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vx_nxv16i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v16, a0
+; CHECK-RV64-NEXT:    li a0, 31
+; CHECK-RV64-NEXT:    vand.vx v24, v16, a0
+; CHECK-RV64-NEXT:    vsrl.vv v24, v8, v24
+; CHECK-RV64-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-RV64-NEXT:    vand.vx v16, v16, a0
+; CHECK-RV64-NEXT:    vsll.vv v8, v8, v16
+; CHECK-RV64-NEXT:    vor.vv v8, v24, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vx_nxv16i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
+  %b.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
+  %b.splat = shufflevector <vscale x 16 x i32> %b.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+  %x = call <vscale x 16 x i32> @llvm.fshr.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %a, <vscale x 16 x i32> %b.splat)
+  ret <vscale x 16 x i32> %x
+}
+
+define <vscale x 16 x i32> @vror_vi_nxv16i32(<vscale x 16 x i32> %a) {
+; CHECK-LABEL: vror_vi_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vsll.vi v16, v8, 31
+; CHECK-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv16i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i32> @llvm.fshr.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %a, <vscale x 16 x i32> shufflevector(<vscale x 16 x i32> insertelement(<vscale x 16 x i32> poison, i32 1, i32 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer))
+  ret <vscale x 16 x i32> %x
+}
+
+define <vscale x 16 x i32> @vror_vi_rotl_nxv16i32(<vscale x 16 x i32> %a) {
+; CHECK-LABEL: vror_vi_rotl_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vsrl.vi v16, v8, 31
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv16i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 16 x i32> @llvm.fshl.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %a, <vscale x 16 x i32> shufflevector(<vscale x 16 x i32> insertelement(<vscale x 16 x i32> poison, i32 1, i32 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer))
+  ret <vscale x 16 x i32> %x
+}
+
+declare <vscale x 1 x i64> @llvm.fshr.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>)
+declare <vscale x 1 x i64> @llvm.fshl.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>)
+
+define <vscale x 1 x i64> @vror_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b) {
+; CHECK-LABEL: vror_vv_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vand.vx v10, v9, a0
+; CHECK-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    vsll.vv v8, v8, v9
+; CHECK-NEXT:    vor.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv1i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v9
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i64> @llvm.fshr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %a, <vscale x 1 x i64> %b)
+  ret <vscale x 1 x i64> %x
+}
+
+define <vscale x 1 x i64> @vror_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b) {
+; CHECK-RV32-LABEL: vror_vx_nxv1i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v9, (a0), zero
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vand.vx v10, v9, a0
+; CHECK-RV32-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-RV32-NEXT:    vrsub.vi v9, v9, 0
+; CHECK-RV32-NEXT:    vand.vx v9, v9, a0
+; CHECK-RV32-NEXT:    vsll.vv v8, v8, v9
+; CHECK-RV32-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vx_nxv1i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    andi a1, a0, 63
+; CHECK-RV64-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; CHECK-RV64-NEXT:    vsrl.vx v9, v8, a1
+; CHECK-RV64-NEXT:    negw a0, a0
+; CHECK-RV64-NEXT:    andi a0, a0, 63
+; CHECK-RV64-NEXT:    vsll.vx v8, v8, a0
+; CHECK-RV64-NEXT:    vor.vv v8, v9, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vror_vx_nxv1i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v9, (a0), zero
+; CHECK-ZVBB32-NEXT:    vror.vv v8, v8, v9
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vror_vx_nxv1i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-ZVBB64-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB64-NEXT:    ret
+  %b.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+  %b.splat = shufflevector <vscale x 1 x i64> %b.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i64> @llvm.fshr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %a, <vscale x 1 x i64> %b.splat)
+  ret <vscale x 1 x i64> %x
+}
+
+define <vscale x 1 x i64> @vror_vi_nxv1i64(<vscale x 1 x i64> %a) {
+; CHECK-RV32-LABEL: vror_vi_nxv1i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-RV32-NEXT:    vmv.v.x v9, a0
+; CHECK-RV32-NEXT:    vand.vi v9, v9, 1
+; CHECK-RV32-NEXT:    vsrl.vv v9, v8, v9
+; CHECK-RV32-NEXT:    vmv.v.i v10, 1
+; CHECK-RV32-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-RV32-NEXT:    vand.vx v10, v10, a0
+; CHECK-RV32-NEXT:    vsll.vv v8, v8, v10
+; CHECK-RV32-NEXT:    vor.vv v8, v9, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vi_nxv1i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    li a0, 63
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-RV64-NEXT:    vsll.vx v9, v8, a0
+; CHECK-RV64-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-RV64-NEXT:    vor.vv v8, v8, v9
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv1i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i64> @llvm.fshr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %a, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 1, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer))
+  ret <vscale x 1 x i64> %x
+}
+
+define <vscale x 1 x i64> @vror_vi_rotl_nxv1i64(<vscale x 1 x i64> %a) {
+; CHECK-RV32-LABEL: vror_vi_rotl_nxv1i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-RV32-NEXT:    vmv.v.x v9, a0
+; CHECK-RV32-NEXT:    vand.vi v9, v9, 1
+; CHECK-RV32-NEXT:    vsll.vv v9, v8, v9
+; CHECK-RV32-NEXT:    vmv.v.i v10, 1
+; CHECK-RV32-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-RV32-NEXT:    vand.vx v10, v10, a0
+; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-RV32-NEXT:    vor.vv v8, v9, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vi_rotl_nxv1i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    li a0, 63
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-RV64-NEXT:    vsrl.vx v9, v8, a0
+; CHECK-RV64-NEXT:    vadd.vv v8, v8, v8
+; CHECK-RV64-NEXT:    vor.vv v8, v8, v9
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv1i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 1 x i64> @llvm.fshl.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %a, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 1, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer))
+  ret <vscale x 1 x i64> %x
+}
+
+declare <vscale x 2 x i64> @llvm.fshr.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.fshl.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+
+define <vscale x 2 x i64> @vror_vv_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: vror_vv_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vand.vx v12, v10, a0
+; CHECK-NEXT:    vsrl.vv v12, v8, v12
+; CHECK-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-NEXT:    vand.vx v10, v10, a0
+; CHECK-NEXT:    vsll.vv v8, v8, v10
+; CHECK-NEXT:    vor.vv v8, v12, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv2i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i64> @llvm.fshr.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %x
+}
+
+define <vscale x 2 x i64> @vror_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b) {
+; CHECK-RV32-LABEL: vror_vx_nxv2i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v10, (a0), zero
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vand.vx v12, v10, a0
+; CHECK-RV32-NEXT:    vsrl.vv v12, v8, v12
+; CHECK-RV32-NEXT:    vrsub.vi v10, v10, 0
+; CHECK-RV32-NEXT:    vand.vx v10, v10, a0
+; CHECK-RV32-NEXT:    vsll.vv v8, v8, v10
+; CHECK-RV32-NEXT:    vor.vv v8, v12, v8
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vx_nxv2i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    andi a1, a0, 63
+; CHECK-RV64-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
+; CHECK-RV64-NEXT:    vsrl.vx v10, v8, a1
+; CHECK-RV64-NEXT:    negw a0, a0
+; CHECK-RV64-NEXT:    andi a0, a0, 63
+; CHECK-RV64-NEXT:    vsll.vx v8, v8, a0
+; CHECK-RV64-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vror_vx_nxv2i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v10, (a0), zero
+; CHECK-ZVBB32-NEXT:    vror.vv v8, v8, v10
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vror_vx_nxv2i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-ZVBB64-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB64-NEXT:    ret
+  %b.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+  %b.splat = shufflevector <vscale x 2 x i64> %b.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i64> @llvm.fshr.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b.splat)
+  ret <vscale x 2 x i64> %x
+}
+
+define <vscale x 2 x i64> @vror_vi_nxv2i64(<vscale x 2 x i64> %a) {
+; CHECK-RV32-LABEL: vror_vi_nxv2i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-RV32-NEXT:    vmv.v.x v10, a0
+; CHECK-RV32-NEXT:    vand.vi v10, v10, 1
+; CHECK-RV32-NEXT:    vsrl.vv v10, v8, v10
+; CHECK-RV32-NEXT:    vmv.v.i v12, 1
+; CHECK-RV32-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-RV32-NEXT:    vand.vx v12, v12, a0
+; CHECK-RV32-NEXT:    vsll.vv v8, v8, v12
+; CHECK-RV32-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vi_nxv2i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    li a0, 63
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-RV64-NEXT:    vsll.vx v10, v8, a0
+; CHECK-RV64-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-RV64-NEXT:    vor.vv v8, v8, v10
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv2i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i64> @llvm.fshr.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer))
+  ret <vscale x 2 x i64> %x
+}
+
+define <vscale x 2 x i64> @vror_vi_rotl_nxv2i64(<vscale x 2 x i64> %a) {
+; CHECK-RV32-LABEL: vror_vi_rotl_nxv2i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-RV32-NEXT:    vmv.v.x v10, a0
+; CHECK-RV32-NEXT:    vand.vi v10, v10, 1
+; CHECK-RV32-NEXT:    vsll.vv v10, v8, v10
+; CHECK-RV32-NEXT:    vmv.v.i v12, 1
+; CHECK-RV32-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-RV32-NEXT:    vand.vx v12, v12, a0
+; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v12
+; CHECK-RV32-NEXT:    vor.vv v8, v10, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vi_rotl_nxv2i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    li a0, 63
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-RV64-NEXT:    vsrl.vx v10, v8, a0
+; CHECK-RV64-NEXT:    vadd.vv v8, v8, v8
+; CHECK-RV64-NEXT:    vor.vv v8, v8, v10
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv2i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 2 x i64> @llvm.fshl.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer))
+  ret <vscale x 2 x i64> %x
+}
+
+declare <vscale x 4 x i64> @llvm.fshr.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>)
+declare <vscale x 4 x i64> @llvm.fshl.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>)
+
+define <vscale x 4 x i64> @vror_vv_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) {
+; CHECK-LABEL: vror_vv_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vand.vx v16, v12, a0
+; CHECK-NEXT:    vsrl.vv v16, v8, v16
+; CHECK-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-NEXT:    vand.vx v12, v12, a0
+; CHECK-NEXT:    vsll.vv v8, v8, v12
+; CHECK-NEXT:    vor.vv v8, v16, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv4i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v12
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i64> @llvm.fshr.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b)
+  ret <vscale x 4 x i64> %x
+}
+
+define <vscale x 4 x i64> @vror_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b) {
+; CHECK-RV32-LABEL: vror_vx_nxv4i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v12, (a0), zero
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vand.vx v16, v12, a0
+; CHECK-RV32-NEXT:    vsrl.vv v16, v8, v16
+; CHECK-RV32-NEXT:    vrsub.vi v12, v12, 0
+; CHECK-RV32-NEXT:    vand.vx v12, v12, a0
+; CHECK-RV32-NEXT:    vsll.vv v8, v8, v12
+; CHECK-RV32-NEXT:    vor.vv v8, v16, v8
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vx_nxv4i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    andi a1, a0, 63
+; CHECK-RV64-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; CHECK-RV64-NEXT:    vsrl.vx v12, v8, a1
+; CHECK-RV64-NEXT:    negw a0, a0
+; CHECK-RV64-NEXT:    andi a0, a0, 63
+; CHECK-RV64-NEXT:    vsll.vx v8, v8, a0
+; CHECK-RV64-NEXT:    vor.vv v8, v12, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vror_vx_nxv4i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v12, (a0), zero
+; CHECK-ZVBB32-NEXT:    vror.vv v8, v8, v12
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vror_vx_nxv4i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-ZVBB64-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB64-NEXT:    ret
+  %b.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+  %b.splat = shufflevector <vscale x 4 x i64> %b.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i64> @llvm.fshr.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b.splat)
+  ret <vscale x 4 x i64> %x
+}
+
+define <vscale x 4 x i64> @vror_vi_nxv4i64(<vscale x 4 x i64> %a) {
+; CHECK-RV32-LABEL: vror_vi_nxv4i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-RV32-NEXT:    vmv.v.x v12, a0
+; CHECK-RV32-NEXT:    vand.vi v12, v12, 1
+; CHECK-RV32-NEXT:    vsrl.vv v12, v8, v12
+; CHECK-RV32-NEXT:    vmv.v.i v16, 1
+; CHECK-RV32-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-RV32-NEXT:    vand.vx v16, v16, a0
+; CHECK-RV32-NEXT:    vsll.vv v8, v8, v16
+; CHECK-RV32-NEXT:    vor.vv v8, v12, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vi_nxv4i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    li a0, 63
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-RV64-NEXT:    vsll.vx v12, v8, a0
+; CHECK-RV64-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-RV64-NEXT:    vor.vv v8, v8, v12
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv4i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i64> @llvm.fshr.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %a, <vscale x 4 x i64> shufflevector(<vscale x 4 x i64> insertelement(<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer))
+  ret <vscale x 4 x i64> %x
+}
+
+define <vscale x 4 x i64> @vror_vi_rotl_nxv4i64(<vscale x 4 x i64> %a) {
+; CHECK-RV32-LABEL: vror_vi_rotl_nxv4i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-RV32-NEXT:    vmv.v.x v12, a0
+; CHECK-RV32-NEXT:    vand.vi v12, v12, 1
+; CHECK-RV32-NEXT:    vsll.vv v12, v8, v12
+; CHECK-RV32-NEXT:    vmv.v.i v16, 1
+; CHECK-RV32-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-RV32-NEXT:    vand.vx v16, v16, a0
+; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v16
+; CHECK-RV32-NEXT:    vor.vv v8, v12, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vi_rotl_nxv4i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    li a0, 63
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-RV64-NEXT:    vsrl.vx v12, v8, a0
+; CHECK-RV64-NEXT:    vadd.vv v8, v8, v8
+; CHECK-RV64-NEXT:    vor.vv v8, v8, v12
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv4i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 4 x i64> @llvm.fshl.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %a, <vscale x 4 x i64> shufflevector(<vscale x 4 x i64> insertelement(<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer))
+  ret <vscale x 4 x i64> %x
+}
+
+declare <vscale x 8 x i64> @llvm.fshr.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>)
+declare <vscale x 8 x i64> @llvm.fshl.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>)
+
+define <vscale x 8 x i64> @vror_vv_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) {
+; CHECK-LABEL: vror_vv_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vand.vx v24, v16, a0
+; CHECK-NEXT:    vsrl.vv v24, v8, v24
+; CHECK-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-NEXT:    vand.vx v16, v16, a0
+; CHECK-NEXT:    vsll.vv v8, v8, v16
+; CHECK-NEXT:    vor.vv v8, v24, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vv_nxv8i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vv v8, v8, v16
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i64> @llvm.fshr.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %a, <vscale x 8 x i64> %b)
+  ret <vscale x 8 x i64> %x
+}
+
+define <vscale x 8 x i64> @vror_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b) {
+; CHECK-RV32-LABEL: vror_vx_nxv8i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v16, (a0), zero
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vand.vx v24, v16, a0
+; CHECK-RV32-NEXT:    vsrl.vv v24, v8, v24
+; CHECK-RV32-NEXT:    vrsub.vi v16, v16, 0
+; CHECK-RV32-NEXT:    vand.vx v16, v16, a0
+; CHECK-RV32-NEXT:    vsll.vv v8, v8, v16
+; CHECK-RV32-NEXT:    vor.vv v8, v24, v8
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vx_nxv8i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    andi a1, a0, 63
+; CHECK-RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
+; CHECK-RV64-NEXT:    vsrl.vx v16, v8, a1
+; CHECK-RV64-NEXT:    negw a0, a0
+; CHECK-RV64-NEXT:    andi a0, a0, 63
+; CHECK-RV64-NEXT:    vsll.vx v8, v8, a0
+; CHECK-RV64-NEXT:    vor.vv v8, v16, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vror_vx_nxv8i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v16, (a0), zero
+; CHECK-ZVBB32-NEXT:    vror.vv v8, v8, v16
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vror_vx_nxv8i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-ZVBB64-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB64-NEXT:    ret
+  %b.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+  %b.splat = shufflevector <vscale x 8 x i64> %b.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i64> @llvm.fshr.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %a, <vscale x 8 x i64> %b.splat)
+  ret <vscale x 8 x i64> %x
+}
+
+define <vscale x 8 x i64> @vror_vi_nxv8i64(<vscale x 8 x i64> %a) {
+; CHECK-RV32-LABEL: vror_vi_nxv8i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-RV32-NEXT:    vmv.v.x v16, a0
+; CHECK-RV32-NEXT:    vand.vi v16, v16, 1
+; CHECK-RV32-NEXT:    vsrl.vv v16, v8, v16
+; CHECK-RV32-NEXT:    vmv.v.i v24, 1
+; CHECK-RV32-NEXT:    vrsub.vi v24, v24, 0
+; CHECK-RV32-NEXT:    vand.vx v24, v24, a0
+; CHECK-RV32-NEXT:    vsll.vv v8, v8, v24
+; CHECK-RV32-NEXT:    vor.vv v8, v16, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vi_nxv8i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    li a0, 63
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-RV64-NEXT:    vsll.vx v16, v8, a0
+; CHECK-RV64-NEXT:    vsrl.vi v8, v8, 1
+; CHECK-RV64-NEXT:    vor.vv v8, v8, v16
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_nxv8i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 1
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i64> @llvm.fshr.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %a, <vscale x 8 x i64> shufflevector(<vscale x 8 x i64> insertelement(<vscale x 8 x i64> poison, i64 1, i32 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer))
+  ret <vscale x 8 x i64> %x
+}
+
+define <vscale x 8 x i64> @vror_vi_rotl_nxv8i64(<vscale x 8 x i64> %a) {
+; CHECK-RV32-LABEL: vror_vi_rotl_nxv8i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    li a0, 63
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-RV32-NEXT:    vmv.v.x v16, a0
+; CHECK-RV32-NEXT:    vand.vi v16, v16, 1
+; CHECK-RV32-NEXT:    vsll.vv v16, v8, v16
+; CHECK-RV32-NEXT:    vmv.v.i v24, 1
+; CHECK-RV32-NEXT:    vrsub.vi v24, v24, 0
+; CHECK-RV32-NEXT:    vand.vx v24, v24, a0
+; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v24
+; CHECK-RV32-NEXT:    vor.vv v8, v16, v8
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vror_vi_rotl_nxv8i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    li a0, 63
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-RV64-NEXT:    vsrl.vx v16, v8, a0
+; CHECK-RV64-NEXT:    vadd.vv v8, v8, v8
+; CHECK-RV64-NEXT:    vor.vv v8, v8, v16
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vror_vi_rotl_nxv8i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vi v8, v8, 63
+; CHECK-ZVBB-NEXT:    ret
+  %x = call <vscale x 8 x i64> @llvm.fshl.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %a, <vscale x 8 x i64> shufflevector(<vscale x 8 x i64> insertelement(<vscale x 8 x i64> poison, i64 1, i32 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer))
+  ret <vscale x 8 x i64> %x
+}
+


        


More information about the llvm-commits mailing list