[llvm] [RISCV] Prefer vrgatherei16 for shuffles (PR #66291)

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 15 15:45:04 PDT 2023


https://github.com/preames updated https://github.com/llvm/llvm-project/pull/66291

>From f57a5bdb514fd7fdffb6773ff2a5afe6ac0e5e62 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Wed, 13 Sep 2023 13:26:29 -0700
Subject: [PATCH 1/2] [RISCV] Prefer vrgatherei16 for shuffles

If the data type is larger than e16, and the requires more than LMUL1 register class, prefer the use of vrgatherei16.  This has three major benefits:
1) Less work needed to evaluate the constant for e.g. vid sequences.  Remember that arithmetic generally scales lineary with LMUL.
2) Less register pressure.  In particular, the source and indices registers *can* overlap so using a smaller index can significantly help at m8.
3) Smaller constants.  We've got a bunch of tricks for materializing small constants, and if needed, can use a EEW=16 load.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |    9 +
 .../RISCV/rvv/fixed-vectors-fp-buildvec.ll    |   11 +-
 .../RISCV/rvv/fixed-vectors-fp-interleave.ll  |  150 +--
 .../RISCV/rvv/fixed-vectors-fp-shuffles.ll    |  110 +-
 .../RISCV/rvv/fixed-vectors-int-interleave.ll |  150 +--
 .../RISCV/rvv/fixed-vectors-int-shuffles.ll   |   99 +-
 .../rvv/fixed-vectors-interleaved-access.ll   | 1034 +++++++----------
 .../rvv/fixed-vectors-shuffle-reverse.ll      |  464 ++------
 .../RISCV/rvv/fixed-vectors-shuffle-rotate.ll |    8 +-
 .../test/CodeGen/RISCV/rvv/shuffle-reverse.ll |  245 ++--
 .../RISCV/rvv/vector-interleave-fixed.ll      |   12 +-
 11 files changed, 835 insertions(+), 1457 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a470ceae90ce591..961c3942ce8976e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4651,6 +4651,15 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
   }
 
+  // If the mask allows, we can do all the index computation in 16 bits.  This
+  // requires less work and less register pressure at high LMUL, and creates
+  // smaller constants which may be cheaper to materialize.
+  if (IndexVT.getScalarType().bitsGT(MVT::i16) && isUInt<16>(NumElts * 2) &&
+      (IndexVT.getSizeInBits() / Subtarget.getRealMinVLen()) > 1) {
+    GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
+    IndexVT = IndexVT.changeVectorElementType(MVT::i16);
+  }
+
   MVT IndexContainerVT =
       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index 19d563c0ecbacc0..a2fde2addc14e66 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -33,16 +33,17 @@ define void @buildvec_no_vid_v4f32(<4 x float>* %x) {
 define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x, <8 x float> %y) optsize {
 ; CHECK-LABEL: hang_when_merging_stores_after_legalization:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vid.v v12
 ; CHECK-NEXT:    li a0, 7
 ; CHECK-NEXT:    vmul.vx v14, v12, a0
-; CHECK-NEXT:    vrgather.vv v12, v8, v14
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v14
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v14, -14
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v0, 12
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT:    vrgather.vv v12, v10, v8, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v12, v10, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v12
 ; CHECK-NEXT:    ret
   %z = shufflevector <8 x float> %x, <8 x float> %y, <4 x i32> <i32 0, i32 7, i32 8, i32 15>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
index 6fa9cddde622ce5..c1a4aa4b05d4bec 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
@@ -36,34 +36,20 @@ define <4 x float> @interleave_v2f32(<2 x float> %x, <2 x float> %y) {
 ; One vXf64 test case to very that we don't optimize it.
 ; FIXME: Is there better codegen we can do here?
 define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
-; RV32-V128-LABEL: interleave_v2f64:
-; RV32-V128:       # %bb.0:
-; RV32-V128-NEXT:    vmv1r.v v12, v9
-; RV32-V128-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-V128-NEXT:    vid.v v9
-; RV32-V128-NEXT:    vsrl.vi v14, v9, 1
-; RV32-V128-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-V128-NEXT:    vrgatherei16.vv v10, v8, v14
-; RV32-V128-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; RV32-V128-NEXT:    vmv.v.i v0, 10
-; RV32-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
-; RV32-V128-NEXT:    vrgatherei16.vv v10, v12, v14, v0.t
-; RV32-V128-NEXT:    vmv.v.v v8, v10
-; RV32-V128-NEXT:    ret
-;
-; RV64-V128-LABEL: interleave_v2f64:
-; RV64-V128:       # %bb.0:
-; RV64-V128-NEXT:    vmv1r.v v12, v9
-; RV64-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-V128-NEXT:    vid.v v10
-; RV64-V128-NEXT:    vsrl.vi v14, v10, 1
-; RV64-V128-NEXT:    vrgather.vv v10, v8, v14
-; RV64-V128-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; RV64-V128-NEXT:    vmv.v.i v0, 10
-; RV64-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
-; RV64-V128-NEXT:    vrgather.vv v10, v12, v14, v0.t
-; RV64-V128-NEXT:    vmv.v.v v8, v10
-; RV64-V128-NEXT:    ret
+; V128-LABEL: interleave_v2f64:
+; V128:       # %bb.0:
+; V128-NEXT:    vmv1r.v v12, v9
+; V128-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; V128-NEXT:    vid.v v9
+; V128-NEXT:    vsrl.vi v14, v9, 1
+; V128-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; V128-NEXT:    vrgatherei16.vv v10, v8, v14
+; V128-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; V128-NEXT:    vmv.v.i v0, 10
+; V128-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; V128-NEXT:    vrgatherei16.vv v10, v12, v14, v0.t
+; V128-NEXT:    vmv.v.v v8, v10
+; V128-NEXT:    ret
 ;
 ; RV32-V512-LABEL: interleave_v2f64:
 ; RV32-V512:       # %bb.0:
@@ -255,56 +241,34 @@ define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
 ; RV32-V128-NEXT:    addi sp, sp, -16
 ; RV32-V128-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    li a1, 24
-; RV32-V128-NEXT:    mul a0, a0, a1
+; RV32-V128-NEXT:    slli a0, a0, 2
 ; RV32-V128-NEXT:    sub sp, sp, a0
-; RV32-V128-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    slli a0, a0, 3
-; RV32-V128-NEXT:    add a0, sp, a0
-; RV32-V128-NEXT:    addi a0, a0, 16
-; RV32-V128-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV32-V128-NEXT:    addi a0, sp, 16
-; RV32-V128-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-V128-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
 ; RV32-V128-NEXT:    lui a0, %hi(.LCPI10_0)
 ; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI10_0)
 ; RV32-V128-NEXT:    li a1, 32
 ; RV32-V128-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; RV32-V128-NEXT:    vle32.v v24, (a0)
+; RV32-V128-NEXT:    vle16.v v4, (a0)
 ; RV32-V128-NEXT:    lui a0, %hi(.LCPI10_1)
 ; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI10_1)
-; RV32-V128-NEXT:    vle32.v v16, (a0)
-; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    slli a0, a0, 4
-; RV32-V128-NEXT:    add a0, sp, a0
-; RV32-V128-NEXT:    addi a0, a0, 16
-; RV32-V128-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-V128-NEXT:    vle16.v v24, (a0)
+; RV32-V128-NEXT:    addi a0, sp, 16
+; RV32-V128-NEXT:    vs4r.v v24, (a0) # Unknown-size Folded Spill
 ; RV32-V128-NEXT:    lui a0, 699051
 ; RV32-V128-NEXT:    addi a0, a0, -1366
 ; RV32-V128-NEXT:    vmv.s.x v0, a0
-; RV32-V128-NEXT:    vrgather.vv v16, v8, v24
-; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    slli a0, a0, 4
-; RV32-V128-NEXT:    add a0, sp, a0
-; RV32-V128-NEXT:    addi a0, a0, 16
-; RV32-V128-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    slli a0, a0, 3
-; RV32-V128-NEXT:    add a0, sp, a0
-; RV32-V128-NEXT:    addi a0, a0, 16
-; RV32-V128-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-V128-NEXT:    vrgather.vv v16, v8, v24, v0.t
-; RV32-V128-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-V128-NEXT:    vmv4r.v v24, v8
+; RV32-V128-NEXT:    vrgatherei16.vv v24, v8, v4
 ; RV32-V128-NEXT:    addi a0, sp, 16
-; RV32-V128-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-V128-NEXT:    vwaddu.vv v0, v8, v24
+; RV32-V128-NEXT:    vl4r.v v12, (a0) # Unknown-size Folded Reload
+; RV32-V128-NEXT:    vrgatherei16.vv v24, v16, v12, v0.t
+; RV32-V128-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-V128-NEXT:    vwaddu.vv v0, v8, v16
 ; RV32-V128-NEXT:    li a0, -1
-; RV32-V128-NEXT:    vwmaccu.vx v0, a0, v24
+; RV32-V128-NEXT:    vwmaccu.vx v0, a0, v16
 ; RV32-V128-NEXT:    vmv8r.v v8, v0
+; RV32-V128-NEXT:    vmv8r.v v16, v24
 ; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    li a1, 24
-; RV32-V128-NEXT:    mul a0, a0, a1
+; RV32-V128-NEXT:    slli a0, a0, 2
 ; RV32-V128-NEXT:    add sp, sp, a0
 ; RV32-V128-NEXT:    addi sp, sp, 16
 ; RV32-V128-NEXT:    ret
@@ -314,56 +278,34 @@ define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
 ; RV64-V128-NEXT:    addi sp, sp, -16
 ; RV64-V128-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    li a1, 24
-; RV64-V128-NEXT:    mul a0, a0, a1
+; RV64-V128-NEXT:    slli a0, a0, 2
 ; RV64-V128-NEXT:    sub sp, sp, a0
-; RV64-V128-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    slli a0, a0, 3
-; RV64-V128-NEXT:    add a0, sp, a0
-; RV64-V128-NEXT:    addi a0, a0, 16
-; RV64-V128-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV64-V128-NEXT:    addi a0, sp, 16
-; RV64-V128-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-V128-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
 ; RV64-V128-NEXT:    lui a0, %hi(.LCPI10_0)
 ; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI10_0)
 ; RV64-V128-NEXT:    li a1, 32
 ; RV64-V128-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; RV64-V128-NEXT:    vle32.v v24, (a0)
+; RV64-V128-NEXT:    vle16.v v4, (a0)
 ; RV64-V128-NEXT:    lui a0, %hi(.LCPI10_1)
 ; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI10_1)
-; RV64-V128-NEXT:    vle32.v v16, (a0)
-; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    slli a0, a0, 4
-; RV64-V128-NEXT:    add a0, sp, a0
-; RV64-V128-NEXT:    addi a0, a0, 16
-; RV64-V128-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-V128-NEXT:    vle16.v v24, (a0)
+; RV64-V128-NEXT:    addi a0, sp, 16
+; RV64-V128-NEXT:    vs4r.v v24, (a0) # Unknown-size Folded Spill
 ; RV64-V128-NEXT:    lui a0, 699051
 ; RV64-V128-NEXT:    addiw a0, a0, -1366
 ; RV64-V128-NEXT:    vmv.s.x v0, a0
-; RV64-V128-NEXT:    vrgather.vv v16, v8, v24
-; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    slli a0, a0, 4
-; RV64-V128-NEXT:    add a0, sp, a0
-; RV64-V128-NEXT:    addi a0, a0, 16
-; RV64-V128-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    slli a0, a0, 3
-; RV64-V128-NEXT:    add a0, sp, a0
-; RV64-V128-NEXT:    addi a0, a0, 16
-; RV64-V128-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-V128-NEXT:    vrgather.vv v16, v8, v24, v0.t
-; RV64-V128-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV64-V128-NEXT:    vmv4r.v v24, v8
+; RV64-V128-NEXT:    vrgatherei16.vv v24, v8, v4
 ; RV64-V128-NEXT:    addi a0, sp, 16
-; RV64-V128-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-V128-NEXT:    vwaddu.vv v0, v8, v24
+; RV64-V128-NEXT:    vl4r.v v12, (a0) # Unknown-size Folded Reload
+; RV64-V128-NEXT:    vrgatherei16.vv v24, v16, v12, v0.t
+; RV64-V128-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV64-V128-NEXT:    vwaddu.vv v0, v8, v16
 ; RV64-V128-NEXT:    li a0, -1
-; RV64-V128-NEXT:    vwmaccu.vx v0, a0, v24
+; RV64-V128-NEXT:    vwmaccu.vx v0, a0, v16
 ; RV64-V128-NEXT:    vmv8r.v v8, v0
+; RV64-V128-NEXT:    vmv8r.v v16, v24
 ; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    li a1, 24
-; RV64-V128-NEXT:    mul a0, a0, a1
+; RV64-V128-NEXT:    slli a0, a0, 2
 ; RV64-V128-NEXT:    add sp, sp, a0
 ; RV64-V128-NEXT:    addi sp, sp, 16
 ; RV64-V128-NEXT:    ret
@@ -450,10 +392,12 @@ define <4 x double> @unary_interleave_v4f64(<4 x double> %x) {
 ; RV64-V128:       # %bb.0:
 ; RV64-V128-NEXT:    lui a0, 12304
 ; RV64-V128-NEXT:    addiw a0, a0, 512
-; RV64-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-V128-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV64-V128-NEXT:    vmv.s.x v10, a0
-; RV64-V128-NEXT:    vsext.vf8 v12, v10
-; RV64-V128-NEXT:    vrgather.vv v10, v8, v12
+; RV64-V128-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-V128-NEXT:    vsext.vf2 v12, v10
+; RV64-V128-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-V128-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV64-V128-NEXT:    vmv.v.v v8, v10
 ; RV64-V128-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
index 8d66248a1e57df4..a7852ea5843d8a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
@@ -72,10 +72,12 @@ define <4 x double> @vrgather_permute_shuffle_vu_v4f64(<4 x double> %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, 4096
 ; RV64-NEXT:    addiw a0, a0, 513
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsext.vf8 v12, v10
-; RV64-NEXT:    vrgather.vv v10, v8, v12
+; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-NEXT:    vsext.vf2 v12, v10
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV64-NEXT:    vmv.v.v v8, v10
 ; RV64-NEXT:    ret
   %s = shufflevector <4 x double> %x, <4 x double> poison, <4 x i32> <i32 1, i32 2, i32 0, i32 1>
@@ -100,10 +102,12 @@ define <4 x double> @vrgather_permute_shuffle_uv_v4f64(<4 x double> %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, 4096
 ; RV64-NEXT:    addiw a0, a0, 513
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsext.vf8 v12, v10
-; RV64-NEXT:    vrgather.vv v10, v8, v12
+; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-NEXT:    vsext.vf2 v12, v10
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV64-NEXT:    vmv.v.v v8, v10
 ; RV64-NEXT:    ret
   %s = shufflevector <4 x double> poison, <4 x double> %x, <4 x i32> <i32 5, i32 6, i32 4, i32 5>
@@ -111,66 +115,37 @@ define <4 x double> @vrgather_permute_shuffle_uv_v4f64(<4 x double> %x) {
 }
 
 define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y) {
-; RV32-LABEL: vrgather_shuffle_vv_v4f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lui a0, %hi(.LCPI6_0)
-; RV32-NEXT:    addi a0, a0, %lo(.LCPI6_0)
-; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT:    vle16.v v14, (a0)
-; RV32-NEXT:    vrgatherei16.vv v12, v8, v14
-; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; RV32-NEXT:    vmv.v.i v0, 8
-; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
-; RV32-NEXT:    vrgather.vi v12, v10, 1, v0.t
-; RV32-NEXT:    vmv.v.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vrgather_shuffle_vv_v4f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI6_0)
-; RV64-NEXT:    addi a0, a0, %lo(.LCPI6_0)
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vle64.v v14, (a0)
-; RV64-NEXT:    vrgather.vv v12, v8, v14
-; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; RV64-NEXT:    vmv.v.i v0, 8
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
-; RV64-NEXT:    vrgather.vi v12, v10, 1, v0.t
-; RV64-NEXT:    vmv.v.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: vrgather_shuffle_vv_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI6_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI6_0)
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle16.v v14, (a0)
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v14
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.v.i v0, 8
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vrgather.vi v12, v10, 1, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
   %s = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> <i32 1, i32 2, i32 0, i32 5>
   ret <4 x double> %s
 }
 
 define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) {
-; RV32-LABEL: vrgather_shuffle_xv_v4f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT:    vid.v v12
-; RV32-NEXT:    lui a0, %hi(.LCPI7_0)
-; RV32-NEXT:    addi a0, a0, %lo(.LCPI7_0)
-; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vrsub.vi v12, v12, 4
-; RV32-NEXT:    vmv.v.i v0, 12
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32-NEXT:    vrgatherei16.vv v10, v8, v12, v0.t
-; RV32-NEXT:    vmv.v.v v8, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vrgather_shuffle_xv_v4f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vid.v v10
-; RV64-NEXT:    vrsub.vi v12, v10, 4
-; RV64-NEXT:    lui a0, %hi(.LCPI7_0)
-; RV64-NEXT:    addi a0, a0, %lo(.LCPI7_0)
-; RV64-NEXT:    vlse64.v v10, (a0), zero
-; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; RV64-NEXT:    vmv.v.i v0, 12
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
-; RV64-NEXT:    vrgather.vv v10, v8, v12, v0.t
-; RV64-NEXT:    vmv.v.v v8, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: vrgather_shuffle_xv_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vid.v v12
+; CHECK-NEXT:    lui a0, %hi(.LCPI7_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI7_0)
+; CHECK-NEXT:    vlse64.v v10, (a0), zero
+; CHECK-NEXT:    vrsub.vi v12, v12, 4
+; CHECK-NEXT:    vmv.v.i v0, 12
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
   %s = shufflevector <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x double> %x, <4 x i32> <i32 0, i32 3, i32 6, i32 5>
   ret <4 x double> %s
 }
@@ -193,17 +168,16 @@ define <4 x double> @vrgather_shuffle_vx_v4f64(<4 x double> %x) {
 ;
 ; RV64-LABEL: vrgather_shuffle_vx_v4f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vid.v v10
-; RV64-NEXT:    li a0, 3
-; RV64-NEXT:    vmul.vx v12, v10, a0
+; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT:    vid.v v12
 ; RV64-NEXT:    lui a0, %hi(.LCPI8_0)
 ; RV64-NEXT:    addi a0, a0, %lo(.LCPI8_0)
 ; RV64-NEXT:    vlse64.v v10, (a0), zero
-; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT:    li a0, 3
+; RV64-NEXT:    vmul.vx v12, v12, a0
 ; RV64-NEXT:    vmv.v.i v0, 3
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
-; RV64-NEXT:    vrgather.vv v10, v8, v12, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT:    vrgatherei16.vv v10, v8, v12, v0.t
 ; RV64-NEXT:    vmv.v.v v8, v10
 ; RV64-NEXT:    ret
   %s = shufflevector <4 x double> %x, <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x i32> <i32 0, i32 3, i32 6, i32 5>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
index f9a64498afacc10..83e64651c5c6313 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
@@ -49,34 +49,20 @@ define <4 x i32> @interleave_v2i32(<2 x i32> %x, <2 x i32> %y) {
 ; One vXi64 test case to very that we don't optimize it.
 ; FIXME: Is there better codegen we can do here?
 define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
-; RV32-V128-LABEL: interleave_v2i64:
-; RV32-V128:       # %bb.0:
-; RV32-V128-NEXT:    vmv1r.v v12, v9
-; RV32-V128-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-V128-NEXT:    vid.v v9
-; RV32-V128-NEXT:    vsrl.vi v14, v9, 1
-; RV32-V128-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-V128-NEXT:    vrgatherei16.vv v10, v8, v14
-; RV32-V128-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; RV32-V128-NEXT:    vmv.v.i v0, 10
-; RV32-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
-; RV32-V128-NEXT:    vrgatherei16.vv v10, v12, v14, v0.t
-; RV32-V128-NEXT:    vmv.v.v v8, v10
-; RV32-V128-NEXT:    ret
-;
-; RV64-V128-LABEL: interleave_v2i64:
-; RV64-V128:       # %bb.0:
-; RV64-V128-NEXT:    vmv1r.v v12, v9
-; RV64-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-V128-NEXT:    vid.v v10
-; RV64-V128-NEXT:    vsrl.vi v14, v10, 1
-; RV64-V128-NEXT:    vrgather.vv v10, v8, v14
-; RV64-V128-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; RV64-V128-NEXT:    vmv.v.i v0, 10
-; RV64-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
-; RV64-V128-NEXT:    vrgather.vv v10, v12, v14, v0.t
-; RV64-V128-NEXT:    vmv.v.v v8, v10
-; RV64-V128-NEXT:    ret
+; V128-LABEL: interleave_v2i64:
+; V128:       # %bb.0:
+; V128-NEXT:    vmv1r.v v12, v9
+; V128-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; V128-NEXT:    vid.v v9
+; V128-NEXT:    vsrl.vi v14, v9, 1
+; V128-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; V128-NEXT:    vrgatherei16.vv v10, v8, v14
+; V128-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; V128-NEXT:    vmv.v.i v0, 10
+; V128-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; V128-NEXT:    vrgatherei16.vv v10, v12, v14, v0.t
+; V128-NEXT:    vmv.v.v v8, v10
+; V128-NEXT:    ret
 ;
 ; RV32-V512-LABEL: interleave_v2i64:
 ; RV32-V512:       # %bb.0:
@@ -414,56 +400,34 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
 ; RV32-V128-NEXT:    addi sp, sp, -16
 ; RV32-V128-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    li a1, 24
-; RV32-V128-NEXT:    mul a0, a0, a1
+; RV32-V128-NEXT:    slli a0, a0, 2
 ; RV32-V128-NEXT:    sub sp, sp, a0
-; RV32-V128-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    slli a0, a0, 3
-; RV32-V128-NEXT:    add a0, sp, a0
-; RV32-V128-NEXT:    addi a0, a0, 16
-; RV32-V128-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV32-V128-NEXT:    addi a0, sp, 16
-; RV32-V128-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-V128-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
 ; RV32-V128-NEXT:    lui a0, %hi(.LCPI17_0)
 ; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI17_0)
 ; RV32-V128-NEXT:    li a1, 32
 ; RV32-V128-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; RV32-V128-NEXT:    vle32.v v24, (a0)
+; RV32-V128-NEXT:    vle16.v v4, (a0)
 ; RV32-V128-NEXT:    lui a0, %hi(.LCPI17_1)
 ; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI17_1)
-; RV32-V128-NEXT:    vle32.v v16, (a0)
-; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    slli a0, a0, 4
-; RV32-V128-NEXT:    add a0, sp, a0
-; RV32-V128-NEXT:    addi a0, a0, 16
-; RV32-V128-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-V128-NEXT:    vle16.v v24, (a0)
+; RV32-V128-NEXT:    addi a0, sp, 16
+; RV32-V128-NEXT:    vs4r.v v24, (a0) # Unknown-size Folded Spill
 ; RV32-V128-NEXT:    lui a0, 699051
 ; RV32-V128-NEXT:    addi a0, a0, -1366
 ; RV32-V128-NEXT:    vmv.s.x v0, a0
-; RV32-V128-NEXT:    vrgather.vv v16, v8, v24
-; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    slli a0, a0, 4
-; RV32-V128-NEXT:    add a0, sp, a0
-; RV32-V128-NEXT:    addi a0, a0, 16
-; RV32-V128-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    slli a0, a0, 3
-; RV32-V128-NEXT:    add a0, sp, a0
-; RV32-V128-NEXT:    addi a0, a0, 16
-; RV32-V128-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-V128-NEXT:    vrgather.vv v16, v8, v24, v0.t
-; RV32-V128-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-V128-NEXT:    vmv4r.v v24, v8
+; RV32-V128-NEXT:    vrgatherei16.vv v24, v8, v4
 ; RV32-V128-NEXT:    addi a0, sp, 16
-; RV32-V128-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-V128-NEXT:    vwaddu.vv v0, v8, v24
+; RV32-V128-NEXT:    vl4r.v v12, (a0) # Unknown-size Folded Reload
+; RV32-V128-NEXT:    vrgatherei16.vv v24, v16, v12, v0.t
+; RV32-V128-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-V128-NEXT:    vwaddu.vv v0, v8, v16
 ; RV32-V128-NEXT:    li a0, -1
-; RV32-V128-NEXT:    vwmaccu.vx v0, a0, v24
+; RV32-V128-NEXT:    vwmaccu.vx v0, a0, v16
 ; RV32-V128-NEXT:    vmv8r.v v8, v0
+; RV32-V128-NEXT:    vmv8r.v v16, v24
 ; RV32-V128-NEXT:    csrr a0, vlenb
-; RV32-V128-NEXT:    li a1, 24
-; RV32-V128-NEXT:    mul a0, a0, a1
+; RV32-V128-NEXT:    slli a0, a0, 2
 ; RV32-V128-NEXT:    add sp, sp, a0
 ; RV32-V128-NEXT:    addi sp, sp, 16
 ; RV32-V128-NEXT:    ret
@@ -473,56 +437,34 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
 ; RV64-V128-NEXT:    addi sp, sp, -16
 ; RV64-V128-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    li a1, 24
-; RV64-V128-NEXT:    mul a0, a0, a1
+; RV64-V128-NEXT:    slli a0, a0, 2
 ; RV64-V128-NEXT:    sub sp, sp, a0
-; RV64-V128-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    slli a0, a0, 3
-; RV64-V128-NEXT:    add a0, sp, a0
-; RV64-V128-NEXT:    addi a0, a0, 16
-; RV64-V128-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV64-V128-NEXT:    addi a0, sp, 16
-; RV64-V128-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-V128-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
 ; RV64-V128-NEXT:    lui a0, %hi(.LCPI17_0)
 ; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI17_0)
 ; RV64-V128-NEXT:    li a1, 32
 ; RV64-V128-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; RV64-V128-NEXT:    vle32.v v24, (a0)
+; RV64-V128-NEXT:    vle16.v v4, (a0)
 ; RV64-V128-NEXT:    lui a0, %hi(.LCPI17_1)
 ; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI17_1)
-; RV64-V128-NEXT:    vle32.v v16, (a0)
-; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    slli a0, a0, 4
-; RV64-V128-NEXT:    add a0, sp, a0
-; RV64-V128-NEXT:    addi a0, a0, 16
-; RV64-V128-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-V128-NEXT:    vle16.v v24, (a0)
+; RV64-V128-NEXT:    addi a0, sp, 16
+; RV64-V128-NEXT:    vs4r.v v24, (a0) # Unknown-size Folded Spill
 ; RV64-V128-NEXT:    lui a0, 699051
 ; RV64-V128-NEXT:    addiw a0, a0, -1366
 ; RV64-V128-NEXT:    vmv.s.x v0, a0
-; RV64-V128-NEXT:    vrgather.vv v16, v8, v24
-; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    slli a0, a0, 4
-; RV64-V128-NEXT:    add a0, sp, a0
-; RV64-V128-NEXT:    addi a0, a0, 16
-; RV64-V128-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    slli a0, a0, 3
-; RV64-V128-NEXT:    add a0, sp, a0
-; RV64-V128-NEXT:    addi a0, a0, 16
-; RV64-V128-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-V128-NEXT:    vrgather.vv v16, v8, v24, v0.t
-; RV64-V128-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV64-V128-NEXT:    vmv4r.v v24, v8
+; RV64-V128-NEXT:    vrgatherei16.vv v24, v8, v4
 ; RV64-V128-NEXT:    addi a0, sp, 16
-; RV64-V128-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-V128-NEXT:    vwaddu.vv v0, v8, v24
+; RV64-V128-NEXT:    vl4r.v v12, (a0) # Unknown-size Folded Reload
+; RV64-V128-NEXT:    vrgatherei16.vv v24, v16, v12, v0.t
+; RV64-V128-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV64-V128-NEXT:    vwaddu.vv v0, v8, v16
 ; RV64-V128-NEXT:    li a0, -1
-; RV64-V128-NEXT:    vwmaccu.vx v0, a0, v24
+; RV64-V128-NEXT:    vwmaccu.vx v0, a0, v16
 ; RV64-V128-NEXT:    vmv8r.v v8, v0
+; RV64-V128-NEXT:    vmv8r.v v16, v24
 ; RV64-V128-NEXT:    csrr a0, vlenb
-; RV64-V128-NEXT:    li a1, 24
-; RV64-V128-NEXT:    mul a0, a0, a1
+; RV64-V128-NEXT:    slli a0, a0, 2
 ; RV64-V128-NEXT:    add sp, sp, a0
 ; RV64-V128-NEXT:    addi sp, sp, 16
 ; RV64-V128-NEXT:    ret
@@ -683,10 +625,12 @@ define <4 x i64> @unary_interleave_v4i64(<4 x i64> %x) {
 ; RV64-V128:       # %bb.0:
 ; RV64-V128-NEXT:    lui a0, 12304
 ; RV64-V128-NEXT:    addiw a0, a0, 512
-; RV64-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-V128-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV64-V128-NEXT:    vmv.s.x v10, a0
-; RV64-V128-NEXT:    vsext.vf8 v12, v10
-; RV64-V128-NEXT:    vrgather.vv v10, v8, v12
+; RV64-V128-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-V128-NEXT:    vsext.vf2 v12, v10
+; RV64-V128-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-V128-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV64-V128-NEXT:    vmv.v.v v8, v10
 ; RV64-V128-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index b4f3a0fb197a53b..78cafbb84bb926e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -154,49 +154,29 @@ define <4 x i16> @vrgather_shuffle_vx_v4i16(<4 x i16> %x) {
 }
 
 define <8 x i64> @vrgather_permute_shuffle_vu_v8i64(<8 x i64> %x) {
-; RV32-LABEL: vrgather_permute_shuffle_vu_v8i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lui a0, %hi(.LCPI9_0)
-; RV32-NEXT:    addi a0, a0, %lo(.LCPI9_0)
-; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT:    vle16.v v16, (a0)
-; RV32-NEXT:    vrgatherei16.vv v12, v8, v16
-; RV32-NEXT:    vmv.v.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vrgather_permute_shuffle_vu_v8i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI9_0)
-; RV64-NEXT:    addi a0, a0, %lo(.LCPI9_0)
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vle64.v v16, (a0)
-; RV64-NEXT:    vrgather.vv v12, v8, v16
-; RV64-NEXT:    vmv.v.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: vrgather_permute_shuffle_vu_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI9_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI9_0)
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vle16.v v16, (a0)
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
   %s = shufflevector <8 x i64> %x, <8 x i64> poison, <8 x i32> <i32 1, i32 2, i32 0, i32 1, i32 7, i32 6, i32 0, i32 1>
   ret <8 x i64> %s
 }
 
 define <8 x i64> @vrgather_permute_shuffle_uv_v8i64(<8 x i64> %x) {
-; RV32-LABEL: vrgather_permute_shuffle_uv_v8i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lui a0, %hi(.LCPI10_0)
-; RV32-NEXT:    addi a0, a0, %lo(.LCPI10_0)
-; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT:    vle16.v v16, (a0)
-; RV32-NEXT:    vrgatherei16.vv v12, v8, v16
-; RV32-NEXT:    vmv.v.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vrgather_permute_shuffle_uv_v8i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI10_0)
-; RV64-NEXT:    addi a0, a0, %lo(.LCPI10_0)
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vle64.v v16, (a0)
-; RV64-NEXT:    vrgather.vv v12, v8, v16
-; RV64-NEXT:    vmv.v.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: vrgather_permute_shuffle_uv_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI10_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI10_0)
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vle16.v v16, (a0)
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
   %s = shufflevector <8 x i64> poison, <8 x i64> %x, <8 x i32> <i32 9, i32 10, i32 8, i32 9, i32 15, i32 8, i32 8, i32 11>
   ret <8 x i64> %s
 }
@@ -221,18 +201,26 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
 ;
 ; RV64-LABEL: vrgather_shuffle_vv_v8i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT:    lui a0, %hi(.LCPI11_0)
-; RV64-NEXT:    addi a0, a0, %lo(.LCPI11_0)
-; RV64-NEXT:    vle64.v v20, (a0)
-; RV64-NEXT:    vmv.v.i v24, 2
-; RV64-NEXT:    vrgather.vv v16, v8, v20
+; RV64-NEXT:    vmv4r.v v16, v8
+; RV64-NEXT:    lui a0, 327683
+; RV64-NEXT:    slli a0, a0, 3
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a0, a0, 17
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v20, a0
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vrgatherei16.vv v8, v16, v20
 ; RV64-NEXT:    li a0, 164
 ; RV64-NEXT:    vmv.s.x v0, a0
-; RV64-NEXT:    li a0, 5
-; RV64-NEXT:    vslide1down.vx v8, v24, a0
-; RV64-NEXT:    vrgather.vv v16, v12, v8, v0.t
-; RV64-NEXT:    vmv.v.v v8, v16
+; RV64-NEXT:    lui a0, 163841
+; RV64-NEXT:    slli a0, a0, 4
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a0, a0, 17
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v16, a0
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
 ; RV64-NEXT:    ret
   %s = shufflevector <8 x i64> %x, <8 x i64> %y, <8 x i32> <i32 1, i32 2, i32 10, i32 5, i32 1, i32 10, i32 3, i32 13>
   ret <8 x i64> %s
@@ -258,14 +246,15 @@ define <8 x i64> @vrgather_shuffle_xv_v8i64(<8 x i64> %x) {
 ;
 ; RV64-LABEL: vrgather_shuffle_xv_v8i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI12_0)
-; RV64-NEXT:    addi a0, a0, %lo(.LCPI12_0)
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT:    vle64.v v16, (a0)
 ; RV64-NEXT:    li a0, 113
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v0, a0
+; RV64-NEXT:    lui a0, 98305
+; RV64-NEXT:    slli a0, a0, 6
+; RV64-NEXT:    vmv.v.x v16, a0
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vmv.v.i v12, -1
-; RV64-NEXT:    vrgather.vv v12, v8, v16, v0.t
+; RV64-NEXT:    vrgatherei16.vv v12, v8, v16, v0.t
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
   %s = shufflevector <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> %x, <8 x i32> <i32 8, i32 3, i32 6, i32 5, i32 8, i32 12, i32 14, i32 3>
@@ -295,11 +284,11 @@ define <8 x i64> @vrgather_shuffle_vx_v8i64(<8 x i64> %x) {
 ; RV64-NEXT:    lui a0, %hi(.LCPI13_0)
 ; RV64-NEXT:    addi a0, a0, %lo(.LCPI13_0)
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT:    vle64.v v16, (a0)
+; RV64-NEXT:    vle16.v v16, (a0)
 ; RV64-NEXT:    li a0, 115
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vmv.v.i v12, 5
-; RV64-NEXT:    vrgather.vv v12, v8, v16, v0.t
+; RV64-NEXT:    vrgatherei16.vv v12, v8, v16, v0.t
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
   %s = shufflevector <8 x i64> %x, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>, <8 x i32> <i32 0, i32 3, i32 10, i32 9, i32 4, i32 1, i32 7, i32 14>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index c51c0c2bf7e6fe0..ed8ebe83b89afcc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -131,333 +131,312 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    csrr a2, vlenb
-; RV32-NEXT:    li a3, 82
+; RV32-NEXT:    li a3, 62
 ; RV32-NEXT:    mul a2, a2, a3
 ; RV32-NEXT:    sub sp, sp, a2
-; RV32-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd2, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 82 * vlenb
-; RV32-NEXT:    addi a3, a1, 256
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x3e, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 62 * vlenb
+; RV32-NEXT:    addi a3, a1, 128
+; RV32-NEXT:    addi a4, a1, 256
 ; RV32-NEXT:    li a2, 32
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vle32.v v16, (a3)
-; RV32-NEXT:    csrr a3, vlenb
-; RV32-NEXT:    li a4, 57
-; RV32-NEXT:    mul a3, a3, a4
-; RV32-NEXT:    add a3, sp, a3
-; RV32-NEXT:    addi a3, a3, 16
-; RV32-NEXT:    vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT:    vid.v v24
-; RV32-NEXT:    csrr a3, vlenb
-; RV32-NEXT:    li a4, 37
-; RV32-NEXT:    mul a3, a3, a4
-; RV32-NEXT:    add a3, sp, a3
-; RV32-NEXT:    addi a3, a3, 16
-; RV32-NEXT:    vs4r.v v24, (a3) # Unknown-size Folded Spill
-; RV32-NEXT:    vadd.vi v8, v24, -4
-; RV32-NEXT:    csrr a3, vlenb
-; RV32-NEXT:    slli a4, a3, 4
-; RV32-NEXT:    add a3, a4, a3
-; RV32-NEXT:    add a3, sp, a3
-; RV32-NEXT:    addi a3, a3, 16
-; RV32-NEXT:    vs4r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT:    vrgather.vv v12, v16, v8
-; RV32-NEXT:    csrr a3, vlenb
-; RV32-NEXT:    li a4, 45
-; RV32-NEXT:    mul a3, a3, a4
-; RV32-NEXT:    add a3, sp, a3
-; RV32-NEXT:    addi a3, a3, 16
-; RV32-NEXT:    vs4r.v v12, (a3) # Unknown-size Folded Spill
-; RV32-NEXT:    vadd.vi v8, v24, -10
-; RV32-NEXT:    lui a3, 12
-; RV32-NEXT:    vmv.s.x v0, a3
-; RV32-NEXT:    csrr a3, vlenb
-; RV32-NEXT:    slli a4, a3, 5
-; RV32-NEXT:    add a3, a4, a3
-; RV32-NEXT:    add a3, sp, a3
-; RV32-NEXT:    addi a3, a3, 16
-; RV32-NEXT:    vs1r.v v0, (a3) # Unknown-size Folded Spill
+; RV32-NEXT:    vle32.v v16, (a4)
+; RV32-NEXT:    csrr a4, vlenb
+; RV32-NEXT:    li a5, 38
+; RV32-NEXT:    mul a4, a4, a5
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    addi a4, a4, 16
+; RV32-NEXT:    vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT:    vid.v v10
+; RV32-NEXT:    csrr a4, vlenb
+; RV32-NEXT:    slli a4, a4, 3
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    addi a4, a4, 16
+; RV32-NEXT:    vs2r.v v10, (a4) # Unknown-size Folded Spill
+; RV32-NEXT:    vadd.vi v8, v10, -4
+; RV32-NEXT:    csrr a4, vlenb
+; RV32-NEXT:    li a5, 12
+; RV32-NEXT:    mul a4, a4, a5
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    addi a4, a4, 16
+; RV32-NEXT:    vs2r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT:    vrgatherei16.vv v12, v16, v8
+; RV32-NEXT:    csrr a4, vlenb
+; RV32-NEXT:    li a5, 18
+; RV32-NEXT:    mul a4, a4, a5
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    addi a4, a4, 16
+; RV32-NEXT:    vs4r.v v12, (a4) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT:    vadd.vi v8, v10, -10
+; RV32-NEXT:    lui a4, 12
+; RV32-NEXT:    vmv.s.x v0, a4
+; RV32-NEXT:    csrr a4, vlenb
+; RV32-NEXT:    li a5, 22
+; RV32-NEXT:    mul a4, a4, a5
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    addi a4, a4, 16
+; RV32-NEXT:    vs1r.v v0, (a4) # Unknown-size Folded Spill
 ; RV32-NEXT:    vsetivli zero, 16, e32, m8, ta, ma
 ; RV32-NEXT:    vslidedown.vi v16, v16, 16
-; RV32-NEXT:    csrr a3, vlenb
-; RV32-NEXT:    slli a4, a3, 6
-; RV32-NEXT:    add a3, a4, a3
-; RV32-NEXT:    add a3, sp, a3
-; RV32-NEXT:    addi a3, a3, 16
-; RV32-NEXT:    vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT:    csrr a4, vlenb
+; RV32-NEXT:    li a5, 46
+; RV32-NEXT:    mul a4, a4, a5
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    addi a4, a4, 16
+; RV32-NEXT:    vs8r.v v16, (a4) # Unknown-size Folded Spill
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
-; RV32-NEXT:    vrgather.vv v12, v16, v8, v0.t
-; RV32-NEXT:    csrr a3, vlenb
-; RV32-NEXT:    li a4, 41
-; RV32-NEXT:    mul a3, a3, a4
-; RV32-NEXT:    add a3, sp, a3
-; RV32-NEXT:    addi a3, a3, 16
-; RV32-NEXT:    vs4r.v v12, (a3) # Unknown-size Folded Spill
-; RV32-NEXT:    addi a3, a1, 128
+; RV32-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
+; RV32-NEXT:    csrr a4, vlenb
+; RV32-NEXT:    li a5, 26
+; RV32-NEXT:    mul a4, a4, a5
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    addi a4, a4, 16
+; RV32-NEXT:    vs4r.v v12, (a4) # Unknown-size Folded Spill
 ; RV32-NEXT:    lui a4, %hi(.LCPI6_0)
 ; RV32-NEXT:    addi a4, a4, %lo(.LCPI6_0)
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; RV32-NEXT:    lui a5, %hi(.LCPI6_1)
 ; RV32-NEXT:    addi a5, a5, %lo(.LCPI6_1)
 ; RV32-NEXT:    lui a6, 1
-; RV32-NEXT:    vle32.v v8, (a4)
+; RV32-NEXT:    vle16.v v8, (a4)
+; RV32-NEXT:    addi a4, sp, 16
+; RV32-NEXT:    vs4r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT:    vle16.v v8, (a5)
 ; RV32-NEXT:    csrr a4, vlenb
-; RV32-NEXT:    slli a7, a4, 3
-; RV32-NEXT:    add a4, a7, a4
+; RV32-NEXT:    slli a4, a4, 2
 ; RV32-NEXT:    add a4, sp, a4
 ; RV32-NEXT:    addi a4, a4, 16
-; RV32-NEXT:    vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT:    vs4r.v v8, (a4) # Unknown-size Folded Spill
 ; RV32-NEXT:    vle32.v v16, (a1)
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a4, 73
+; RV32-NEXT:    li a4, 54
 ; RV32-NEXT:    mul a1, a1, a4
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV32-NEXT:    vle32.v v8, (a5)
+; RV32-NEXT:    vle32.v v24, (a3)
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a4, 25
-; RV32-NEXT:    mul a1, a1, a4
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT:    vle32.v v8, (a3)
-; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 49
+; RV32-NEXT:    li a3, 30
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
 ; RV32-NEXT:    addi a1, a6, -64
-; RV32-NEXT:    vmv.s.x v24, a1
-; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a1, a1, 3
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs1r.v v24, (a1) # Unknown-size Folded Spill
-; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 3
-; RV32-NEXT:    add a1, a3, a1
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v24, v16, v0
+; RV32-NEXT:    vmv.s.x v0, a1
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    li a3, 14
+; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl1r.v v2, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vmv1r.v v0, v2
+; RV32-NEXT:    vs1r.v v0, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vl4r.v v4, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v8, v16, v4
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 25
-; RV32-NEXT:    mul a1, a1, a3
+; RV32-NEXT:    slli a1, a1, 2
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v24, v8, v16, v0.t
+; RV32-NEXT:    vl4r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v8, v24, v16, v0.t
 ; RV32-NEXT:    vsetivli zero, 12, e32, m4, tu, ma
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 41
+; RV32-NEXT:    li a3, 26
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl4r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vmv.v.v v8, v24
+; RV32-NEXT:    vl4r.v v12, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vmv.v.v v12, v8
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 41
+; RV32-NEXT:    li a3, 26
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs4r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT:    vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 37
-; RV32-NEXT:    mul a1, a1, a3
+; RV32-NEXT:    slli a1, a1, 3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl4r.v v12, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vadd.vi v16, v12, -2
+; RV32-NEXT:    vl2r.v v10, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vadd.vi v8, v10, -2
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 57
+; RV32-NEXT:    li a3, 38
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v8, v24, v16
-; RV32-NEXT:    vadd.vi v16, v12, -8
+; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v12, v16, v8
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT:    vadd.vi v8, v10, -8
+; RV32-NEXT:    vmv2r.v v22, v10
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 5
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    li a3, 22
+; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vmv1r.v v1, v0
+; RV32-NEXT:    vl1r.v v20, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vmv1r.v v0, v20
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 6
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    li a3, 46
+; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v8, v24, v16, v0.t
-; RV32-NEXT:    vmv.v.v v4, v8
+; RV32-NEXT:    vrgatherei16.vv v12, v24, v8, v0.t
+; RV32-NEXT:    vmv.v.v v16, v12
 ; RV32-NEXT:    lui a1, %hi(.LCPI6_2)
 ; RV32-NEXT:    addi a1, a1, %lo(.LCPI6_2)
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; RV32-NEXT:    lui a3, %hi(.LCPI6_3)
 ; RV32-NEXT:    addi a3, a3, %lo(.LCPI6_3)
-; RV32-NEXT:    vle32.v v16, (a1)
-; RV32-NEXT:    vle32.v v8, (a3)
+; RV32-NEXT:    vle16.v v24, (a1)
+; RV32-NEXT:    vle16.v v8, (a3)
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 25
-; RV32-NEXT:    mul a1, a1, a3
+; RV32-NEXT:    slli a1, a1, 2
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vs4r.v v8, (a1) # Unknown-size Folded Spill
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 73
+; RV32-NEXT:    li a3, 54
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v24, v8, v16
-; RV32-NEXT:    vmv1r.v v0, v2
+; RV32-NEXT:    vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v8, v0, v24
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 49
+; RV32-NEXT:    li a3, 14
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 25
+; RV32-NEXT:    li a3, 30
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v24, v8, v16, v0.t
-; RV32-NEXT:    vsetivli zero, 12, e32, m4, tu, ma
-; RV32-NEXT:    vmv.v.v v4, v24
+; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 25
-; RV32-NEXT:    mul a1, a1, a3
+; RV32-NEXT:    slli a1, a1, 2
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs4r.v v4, (a1) # Unknown-size Folded Spill
-; RV32-NEXT:    lui a1, %hi(.LCPI6_4)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI6_4)
-; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
-; RV32-NEXT:    vle32.v v8, (a1)
+; RV32-NEXT:    vl4r.v v4, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v8, v24, v4, v0.t
+; RV32-NEXT:    vsetivli zero, 12, e32, m4, tu, ma
+; RV32-NEXT:    vmv.v.v v16, v8
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 57
+; RV32-NEXT:    li a3, 14
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v12, v16, v8
+; RV32-NEXT:    vs4r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    lui a1, %hi(.LCPI6_4)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI6_4)
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vle16.v v8, (a1)
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 37
+; RV32-NEXT:    li a3, 38
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl4r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vadd.vi v8, v8, -6
-; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 3
-; RV32-NEXT:    add a1, a3, a1
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs4r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT:    vmv1r.v v0, v1
+; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v12, v24, v8
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT:    vadd.vi v8, v22, -6
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 6
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    slli a1, a1, 2
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v12, v16, v8, v0.t
+; RV32-NEXT:    vs2r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vmv1r.v v0, v20
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 37
+; RV32-NEXT:    li a3, 46
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
+; RV32-NEXT:    vmv.v.v v16, v12
 ; RV32-NEXT:    lui a1, %hi(.LCPI6_5)
 ; RV32-NEXT:    addi a1, a1, %lo(.LCPI6_5)
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; RV32-NEXT:    lui a3, %hi(.LCPI6_6)
 ; RV32-NEXT:    addi a3, a3, %lo(.LCPI6_6)
-; RV32-NEXT:    vle32.v v16, (a1)
-; RV32-NEXT:    vle32.v v8, (a3)
-; RV32-NEXT:    addi a1, sp, 16
-; RV32-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT:    li a1, 960
-; RV32-NEXT:    vmv.s.x v0, a1
+; RV32-NEXT:    vle16.v v20, (a1)
+; RV32-NEXT:    vle16.v v8, (a3)
 ; RV32-NEXT:    csrr a1, vlenb
 ; RV32-NEXT:    slli a1, a1, 3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs1r.v v0, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vs4r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    li a1, 960
+; RV32-NEXT:    vmv.s.x v24, a1
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vs1r.v v24, (a1) # Unknown-size Folded Spill
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 73
+; RV32-NEXT:    li a3, 54
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v8, v24, v16
+; RV32-NEXT:    vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v8, v0, v20
+; RV32-NEXT:    vmv1r.v v0, v24
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 49
+; RV32-NEXT:    li a3, 30
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    addi a1, sp, 16
 ; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; RV32-NEXT:    vsetivli zero, 10, e32, m4, tu, ma
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 37
-; RV32-NEXT:    mul a1, a1, a3
+; RV32-NEXT:    slli a1, a1, 3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl4r.v v12, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vmv.v.v v12, v8
+; RV32-NEXT:    vl4r.v v20, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v8, v24, v20, v0.t
+; RV32-NEXT:    vsetivli zero, 10, e32, m4, tu, ma
+; RV32-NEXT:    vmv.v.v v16, v8
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 37
-; RV32-NEXT:    mul a1, a1, a3
+; RV32-NEXT:    slli a1, a1, 3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vs4r.v v16, (a1) # Unknown-size Folded Spill
 ; RV32-NEXT:    lui a1, %hi(.LCPI6_7)
 ; RV32-NEXT:    addi a1, a1, %lo(.LCPI6_7)
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
-; RV32-NEXT:    vle32.v v8, (a1)
+; RV32-NEXT:    vle16.v v8, (a1)
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 57
+; RV32-NEXT:    li a3, 38
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v12, v24, v8
+; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v12, v16, v8
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 5
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    li a3, 22
+; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 4
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    li a3, 12
+; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl4r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vl2r.v v8, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 6
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    li a3, 46
+; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v12, v24, v8, v0.t
+; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 5
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    li a3, 22
+; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vs4r.v v12, (a1) # Unknown-size Folded Spill
@@ -466,216 +445,194 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; RV32-NEXT:    lui a3, %hi(.LCPI6_9)
 ; RV32-NEXT:    addi a3, a3, %lo(.LCPI6_9)
-; RV32-NEXT:    vle32.v v24, (a1)
-; RV32-NEXT:    vle32.v v8, (a3)
-; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 4
-; RV32-NEXT:    add a1, a3, a1
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vle16.v v16, (a1)
+; RV32-NEXT:    vle16.v v20, (a3)
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 73
+; RV32-NEXT:    li a3, 54
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v8, v0, v24
-; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a1, a1, 3
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vrgatherei16.vv v8, v0, v16
+; RV32-NEXT:    addi a1, sp, 16
 ; RV32-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 4
-; RV32-NEXT:    add a1, a3, a1
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT:    vrgatherei16.vv v8, v24, v20, v0.t
 ; RV32-NEXT:    vsetivli zero, 10, e32, m4, tu, ma
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 5
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    li a3, 22
+; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vl4r.v v12, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    vmv.v.v v12, v8
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 5
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    li a3, 22
+; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vs4r.v v12, (a1) # Unknown-size Folded Spill
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; RV32-NEXT:    lui a1, %hi(.LCPI6_10)
 ; RV32-NEXT:    addi a1, a1, %lo(.LCPI6_10)
-; RV32-NEXT:    vle32.v v8, (a1)
+; RV32-NEXT:    vle16.v v12, (a1)
 ; RV32-NEXT:    lui a1, 15
-; RV32-NEXT:    vmv.s.x v1, a1
+; RV32-NEXT:    vmv.s.x v0, a1
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 57
+; RV32-NEXT:    li a3, 12
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vs1r.v v0, (a1) # Unknown-size Folded Spill
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 3
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    li a3, 38
+; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl4r.v v12, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v4, v16, v12
-; RV32-NEXT:    vmv1r.v v0, v1
+; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 6
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    slli a1, a1, 2
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v4, v16, v8, v0.t
+; RV32-NEXT:    vl2r.v v14, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v8, v24, v14
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    li a3, 46
+; RV32-NEXT:    mul a1, a1, a3
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v8, v24, v12, v0.t
 ; RV32-NEXT:    lui a1, %hi(.LCPI6_11)
 ; RV32-NEXT:    addi a1, a1, %lo(.LCPI6_11)
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; RV32-NEXT:    lui a3, %hi(.LCPI6_12)
 ; RV32-NEXT:    addi a3, a3, %lo(.LCPI6_12)
-; RV32-NEXT:    vle32.v v24, (a1)
-; RV32-NEXT:    vle32.v v8, (a3)
+; RV32-NEXT:    vle16.v v12, (a1)
+; RV32-NEXT:    vle16.v v16, (a3)
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 57
-; RV32-NEXT:    mul a1, a1, a3
+; RV32-NEXT:    slli a1, a1, 2
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vs4r.v v16, (a1) # Unknown-size Folded Spill
 ; RV32-NEXT:    li a1, 1008
-; RV32-NEXT:    vmv.s.x v2, a1
+; RV32-NEXT:    vmv.s.x v16, a1
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 73
+; RV32-NEXT:    li a3, 38
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v8, v16, v24
-; RV32-NEXT:    vmv1r.v v0, v2
+; RV32-NEXT:    vs1r.v v16, (a1) # Unknown-size Folded Spill
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 49
+; RV32-NEXT:    li a3, 54
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v24, v0, v12
+; RV32-NEXT:    vmv1r.v v0, v16
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 57
+; RV32-NEXT:    li a3, 30
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vl4r.v v12, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v24, v16, v12, v0.t
 ; RV32-NEXT:    vsetivli zero, 10, e32, m4, tu, ma
-; RV32-NEXT:    vmv.v.v v4, v8
+; RV32-NEXT:    vmv.v.v v8, v24
 ; RV32-NEXT:    lui a1, %hi(.LCPI6_13)
 ; RV32-NEXT:    addi a1, a1, %lo(.LCPI6_13)
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
-; RV32-NEXT:    vle32.v v8, (a1)
-; RV32-NEXT:    vmv1r.v v0, v1
+; RV32-NEXT:    vle16.v v16, (a1)
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 45
+; RV32-NEXT:    li a3, 12
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl4r.v v12, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a3, a1, 6
-; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    li a3, 18
+; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v12, v16, v8, v0.t
+; RV32-NEXT:    vl4r.v v12, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a3, 45
+; RV32-NEXT:    li a3, 46
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v12, v24, v16, v0.t
 ; RV32-NEXT:    lui a1, %hi(.LCPI6_14)
 ; RV32-NEXT:    addi a1, a1, %lo(.LCPI6_14)
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; RV32-NEXT:    lui a2, %hi(.LCPI6_15)
 ; RV32-NEXT:    addi a2, a2, %lo(.LCPI6_15)
-; RV32-NEXT:    vle32.v v16, (a1)
-; RV32-NEXT:    vle32.v v8, (a2)
+; RV32-NEXT:    vle16.v v0, (a1)
+; RV32-NEXT:    vle16.v v4, (a2)
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a2, a1, 6
-; RV32-NEXT:    add a1, a2, a1
+; RV32-NEXT:    li a2, 54
+; RV32-NEXT:    mul a1, a1, a2
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vrgatherei16.vv v24, v16, v0
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a2, 73
+; RV32-NEXT:    li a2, 38
 ; RV32-NEXT:    mul a1, a1, a2
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v8, v24, v16
-; RV32-NEXT:    vmv1r.v v0, v2
+; RV32-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a2, 49
+; RV32-NEXT:    li a2, 30
 ; RV32-NEXT:    mul a1, a1, a2
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a2, a1, 6
-; RV32-NEXT:    add a1, a2, a1
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT:    vrgatherei16.vv v24, v16, v4, v0.t
 ; RV32-NEXT:    vsetivli zero, 10, e32, m4, tu, ma
-; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a2, 45
-; RV32-NEXT:    mul a1, a1, a2
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    addi a1, a1, 16
-; RV32-NEXT:    vl4r.v v12, (a1) # Unknown-size Folded Reload
-; RV32-NEXT:    vmv.v.v v12, v8
+; RV32-NEXT:    vmv.v.v v12, v24
 ; RV32-NEXT:    addi a1, a0, 320
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; RV32-NEXT:    vse32.v v12, (a1)
 ; RV32-NEXT:    addi a1, a0, 256
-; RV32-NEXT:    vse32.v v4, (a1)
+; RV32-NEXT:    vse32.v v8, (a1)
 ; RV32-NEXT:    addi a1, a0, 192
 ; RV32-NEXT:    csrr a2, vlenb
-; RV32-NEXT:    slli a3, a2, 5
-; RV32-NEXT:    add a2, a3, a2
+; RV32-NEXT:    li a3, 22
+; RV32-NEXT:    mul a2, a2, a3
 ; RV32-NEXT:    add a2, sp, a2
 ; RV32-NEXT:    addi a2, a2, 16
 ; RV32-NEXT:    vl4r.v v8, (a2) # Unknown-size Folded Reload
 ; RV32-NEXT:    vse32.v v8, (a1)
 ; RV32-NEXT:    addi a1, a0, 128
 ; RV32-NEXT:    csrr a2, vlenb
-; RV32-NEXT:    li a3, 37
-; RV32-NEXT:    mul a2, a2, a3
+; RV32-NEXT:    slli a2, a2, 3
 ; RV32-NEXT:    add a2, sp, a2
 ; RV32-NEXT:    addi a2, a2, 16
 ; RV32-NEXT:    vl4r.v v8, (a2) # Unknown-size Folded Reload
 ; RV32-NEXT:    vse32.v v8, (a1)
 ; RV32-NEXT:    addi a1, a0, 64
 ; RV32-NEXT:    csrr a2, vlenb
-; RV32-NEXT:    li a3, 25
+; RV32-NEXT:    li a3, 14
 ; RV32-NEXT:    mul a2, a2, a3
 ; RV32-NEXT:    add a2, sp, a2
 ; RV32-NEXT:    addi a2, a2, 16
 ; RV32-NEXT:    vl4r.v v8, (a2) # Unknown-size Folded Reload
 ; RV32-NEXT:    vse32.v v8, (a1)
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    li a2, 41
+; RV32-NEXT:    li a2, 26
 ; RV32-NEXT:    mul a1, a1, a2
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vl4r.v v8, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    vse32.v v8, (a0)
 ; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    li a1, 82
+; RV32-NEXT:    li a1, 62
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
 ; RV32-NEXT:    addi sp, sp, 16
@@ -686,15 +643,15 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    csrr a2, vlenb
-; RV64-NEXT:    li a3, 84
+; RV64-NEXT:    li a3, 52
 ; RV64-NEXT:    mul a2, a2, a3
 ; RV64-NEXT:    sub sp, sp, a2
-; RV64-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd4, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 84 * vlenb
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x34, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 52 * vlenb
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    addi a2, a1, 256
 ; RV64-NEXT:    vle64.v v16, (a2)
 ; RV64-NEXT:    csrr a2, vlenb
-; RV64-NEXT:    li a3, 52
+; RV64-NEXT:    li a3, 27
 ; RV64-NEXT:    mul a2, a2, a3
 ; RV64-NEXT:    add a2, sp, a2
 ; RV64-NEXT:    addi a2, a2, 16
@@ -702,14 +659,14 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    addi a2, a1, 128
 ; RV64-NEXT:    vle64.v v8, (a2)
 ; RV64-NEXT:    csrr a2, vlenb
-; RV64-NEXT:    li a3, 60
+; RV64-NEXT:    li a3, 35
 ; RV64-NEXT:    mul a2, a2, a3
 ; RV64-NEXT:    add a2, sp, a2
 ; RV64-NEXT:    addi a2, a2, 16
 ; RV64-NEXT:    vs8r.v v8, (a2) # Unknown-size Folded Spill
 ; RV64-NEXT:    vle64.v v8, (a1)
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 68
+; RV64-NEXT:    li a2, 43
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
@@ -717,507 +674,342 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vrgather.vi v8, v16, 4
 ; RV64-NEXT:    li a1, 128
-; RV64-NEXT:    vmv.s.x v0, a1
+; RV64-NEXT:    vmv.s.x v4, a1
 ; RV64-NEXT:    vsetivli zero, 8, e64, m8, ta, ma
 ; RV64-NEXT:    vslidedown.vi v24, v16, 8
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 44
+; RV64-NEXT:    li a2, 19
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v4
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 24
-; RV64-NEXT:    mul a1, a1, a2
+; RV64-NEXT:    slli a2, a1, 1
+; RV64-NEXT:    add a1, a2, a1
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs1r.v v0, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vs1r.v v4, (a1) # Unknown-size Folded Spill
 ; RV64-NEXT:    vrgather.vi v8, v24, 2, v0.t
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 40
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs4r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT:    vmv.v.v v20, v8
+; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-NEXT:    li a1, 6
 ; RV64-NEXT:    vid.v v8
-; RV64-NEXT:    vmul.vx v24, v8, a1
+; RV64-NEXT:    vmul.vx v2, v8, a1
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 76
+; RV64-NEXT:    li a2, 43
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vrgatherei16.vv v8, v24, v2
+; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; RV64-NEXT:    li a1, 56
-; RV64-NEXT:    vmv.s.x v0, a1
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 20
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs1r.v v0, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 68
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v16, v8, v24
-; RV64-NEXT:    vadd.vi v8, v24, -16
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 5
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vmv.s.x v1, a1
+; RV64-NEXT:    vadd.vi v16, v2, -16
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v1
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 60
+; RV64-NEXT:    li a2, 35
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 5
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v16, v8, v24, v0.t
+; RV64-NEXT:    vrgatherei16.vv v8, v24, v16, v0.t
 ; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; RV64-NEXT:    vmv.v.v v20, v8
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 40
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl4r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vmv.v.v v8, v16
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 40
-; RV64-NEXT:    mul a1, a1, a2
+; RV64-NEXT:    slli a2, a1, 4
+; RV64-NEXT:    sub a1, a2, a1
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs4r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vs4r.v v20, (a1) # Unknown-size Folded Spill
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 52
+; RV64-NEXT:    li a2, 27
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
 ; RV64-NEXT:    vrgather.vi v8, v16, 5
+; RV64-NEXT:    vmv1r.v v0, v4
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 24
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 44
+; RV64-NEXT:    li a2, 19
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
 ; RV64-NEXT:    vrgather.vi v8, v16, 3, v0.t
+; RV64-NEXT:    vmv.v.v v4, v8
+; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT:    addi a1, sp, 16
+; RV64-NEXT:    vs2r.v v2, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vadd.vi v24, v2, 1
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 5
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs4r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 76
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vadd.vi v0, v16, 1
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 68
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v8, v24, v0
-; RV64-NEXT:    vadd.vi v24, v16, -15
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 20
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 60
+; RV64-NEXT:    li a2, 43
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 5
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl4r.v v12, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vmv.v.v v12, v8
+; RV64-NEXT:    vrgatherei16.vv v8, v16, v24
+; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT:    vadd.vi v24, v2, -15
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 5
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs4r.v v12, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 76
+; RV64-NEXT:    li a2, 11
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vadd.vi v8, v0, 2
+; RV64-NEXT:    vs2r.v v24, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v1
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 68
+; RV64-NEXT:    li a2, 35
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v16, v24, v8
-; RV64-NEXT:    li a1, 24
-; RV64-NEXT:    vmv.s.x v24, a1
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 12
+; RV64-NEXT:    li a2, 11
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs1r.v v24, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vadd.vi v8, v0, -14
-; RV64-NEXT:    vmv1r.v v0, v24
+; RV64-NEXT:    vl2r.v v2, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vrgatherei16.vv v8, v24, v2, v0.t
+; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; RV64-NEXT:    vmv.v.v v4, v8
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 60
+; RV64-NEXT:    li a2, 11
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v16, v24, v8, v0.t
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vmv.v.i v8, 6
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 4
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs4r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vmv.s.x v12, zero
-; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; RV64-NEXT:    vs4r.v v4, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-NEXT:    addi a1, sp, 16
-; RV64-NEXT:    vs4r.v v12, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vslideup.vi v8, v12, 5
+; RV64-NEXT:    vl2r.v v2, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vadd.vi v4, v2, 2
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vrgatherei16.vv v8, v16, v4
+; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT:    li a1, 24
+; RV64-NEXT:    vmv.s.x v4, a1
+; RV64-NEXT:    vadd.vi v16, v2, -14
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v4
+; RV64-NEXT:    vrgatherei16.vv v8, v24, v16, v0.t
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vmv.v.i v12, 6
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 52
+; RV64-NEXT:    li a2, 27
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v12, v24, v8
+; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vmv4r.v v24, v16
+; RV64-NEXT:    vrgatherei16.vv v16, v24, v12
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 24
-; RV64-NEXT:    mul a1, a1, a2
+; RV64-NEXT:    slli a2, a1, 1
+; RV64-NEXT:    add a1, a2, a1
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vl1r.v v1, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vmv1r.v v0, v1
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 44
+; RV64-NEXT:    li a2, 19
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vi v12, v24, 4, v0.t
+; RV64-NEXT:    vrgather.vi v16, v24, 4, v0.t
 ; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT:    vmv.v.v v12, v16
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 20
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs4r.v v12, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 76
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vadd.vi v0, v8, 3
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 68
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v16, v24, v0
-; RV64-NEXT:    vadd.vi v8, v8, -13
+; RV64-NEXT:    vmv.v.v v16, v8
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 12
-; RV64-NEXT:    mul a1, a1, a2
+; RV64-NEXT:    slli a2, a1, 3
+; RV64-NEXT:    sub a1, a2, a1
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vs4r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT:    vadd.vi v28, v2, 3
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 60
+; RV64-NEXT:    li a2, 43
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v16, v24, v8, v0.t
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vmv.v.i v12, 7
+; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vrgatherei16.vv v8, v16, v28
+; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT:    vadd.vi v28, v2, -13
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v4
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 12
+; RV64-NEXT:    li a2, 35
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs4r.v v12, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vsetivli zero, 8, e64, m1, ta, ma
-; RV64-NEXT:    vmv.v.i v4, 1
-; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
-; RV64-NEXT:    vslideup.vi v12, v4, 5
+; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vrgatherei16.vv v8, v16, v28, v0.t
+; RV64-NEXT:    lui a1, 16
+; RV64-NEXT:    addiw a1, a1, 7
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v12, a1
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 52
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v16, v24, v12
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 24
+; RV64-NEXT:    li a2, 27
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vrgatherei16.vv v24, v16, v12
+; RV64-NEXT:    vmv1r.v v0, v1
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 44
+; RV64-NEXT:    li a2, 19
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vi v16, v8, 5, v0.t
+; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vrgather.vi v24, v16, 5, v0.t
 ; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
+; RV64-NEXT:    vmv.v.v v24, v8
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vmv.v.v v16, v8
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    slli a2, a1, 1
+; RV64-NEXT:    add a1, a2, a1
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs4r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT:    vs4r.v v24, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    lui a1, 96
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a1
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    li a1, 192
+; RV64-NEXT:    vmv.s.x v0, a1
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 76
-; RV64-NEXT:    mul a1, a1, a2
+; RV64-NEXT:    slli a1, a1, 1
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vadd.vi v8, v8, 4
+; RV64-NEXT:    vs1r.v v0, (a1) # Unknown-size Folded Spill
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 68
+; RV64-NEXT:    li a2, 27
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v16, v24, v8
+; RV64-NEXT:    vrgather.vi v4, v24, 2
+; RV64-NEXT:    vrgatherei16.vv v4, v16, v8, v0.t
+; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT:    vadd.vi v26, v2, 4
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 24
+; RV64-NEXT:    li a2, 43
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vrgatherei16.vv v8, v16, v26
+; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; RV64-NEXT:    li a1, 28
 ; RV64-NEXT:    vmv.s.x v1, a1
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 76
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vadd.vi v16, v8, -12
+; RV64-NEXT:    vadd.vi v16, v2, -12
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vmv1r.v v0, v1
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 60
+; RV64-NEXT:    li a2, 35
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 24
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v8, v24, v16, v0.t
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 24
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
-; RV64-NEXT:    addi a1, sp, 16
-; RV64-NEXT:    vl4r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 4
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl4r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vslideup.vi v24, v8, 6
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT:    li a1, 192
-; RV64-NEXT:    vmv.s.x v2, a1
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 52
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vi v16, v8, 2
-; RV64-NEXT:    vmv1r.v v0, v2
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 44
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vmv.v.v v12, v16
-; RV64-NEXT:    vrgather.vv v12, v8, v24, v0.t
+; RV64-NEXT:    vrgatherei16.vv v8, v24, v16, v0.t
 ; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
+; RV64-NEXT:    vmv.v.v v4, v8
+; RV64-NEXT:    lui a1, 112
+; RV64-NEXT:    addiw a1, a1, 1
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v12, a1
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 24
+; RV64-NEXT:    li a2, 27
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vmv.v.v v12, v16
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 24
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs4r.v v12, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT:    vrgather.vi v8, v16, 3
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 76
-; RV64-NEXT:    mul a1, a1, a2
+; RV64-NEXT:    slli a1, a1, 1
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vadd.vi v16, v8, 5
+; RV64-NEXT:    vl1r.v v0, (a1) # Unknown-size Folded Reload
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 68
+; RV64-NEXT:    li a2, 19
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v24, v8, v16
+; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vrgatherei16.vv v8, v16, v12, v0.t
+; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT:    vadd.vi v12, v2, 5
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 76
+; RV64-NEXT:    li a2, 43
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vadd.vi v16, v8, -11
+; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vrgatherei16.vv v16, v24, v12
+; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT:    vadd.vi v12, v2, -11
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vmv1r.v v0, v1
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 60
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v24, v8, v16, v0.t
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 76
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
-; RV64-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 12
+; RV64-NEXT:    li a2, 35
 ; RV64-NEXT:    mul a1, a1, a2
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl4r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vslideup.vi v24, v4, 6
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 52
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vi v12, v8, 3
-; RV64-NEXT:    vmv1r.v v0, v2
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 44
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vrgather.vv v12, v16, v24, v0.t
+; RV64-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vrgatherei16.vv v16, v24, v12, v0.t
 ; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 76
-; RV64-NEXT:    mul a1, a1, a2
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    addi a1, a1, 16
-; RV64-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT:    vmv.v.v v12, v16
+; RV64-NEXT:    vmv.v.v v8, v16
 ; RV64-NEXT:    addi a1, a0, 320
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vse64.v v12, (a1)
-; RV64-NEXT:    addi a1, a0, 256
-; RV64-NEXT:    csrr a2, vlenb
-; RV64-NEXT:    li a3, 24
-; RV64-NEXT:    mul a2, a2, a3
-; RV64-NEXT:    add a2, sp, a2
-; RV64-NEXT:    addi a2, a2, 16
-; RV64-NEXT:    vl4r.v v8, (a2) # Unknown-size Folded Reload
 ; RV64-NEXT:    vse64.v v8, (a1)
+; RV64-NEXT:    addi a1, a0, 256
+; RV64-NEXT:    vse64.v v4, (a1)
 ; RV64-NEXT:    addi a1, a0, 192
 ; RV64-NEXT:    csrr a2, vlenb
-; RV64-NEXT:    slli a2, a2, 2
+; RV64-NEXT:    slli a3, a2, 1
+; RV64-NEXT:    add a2, a3, a2
 ; RV64-NEXT:    add a2, sp, a2
 ; RV64-NEXT:    addi a2, a2, 16
 ; RV64-NEXT:    vl4r.v v8, (a2) # Unknown-size Folded Reload
 ; RV64-NEXT:    vse64.v v8, (a1)
 ; RV64-NEXT:    addi a1, a0, 128
 ; RV64-NEXT:    csrr a2, vlenb
-; RV64-NEXT:    li a3, 20
-; RV64-NEXT:    mul a2, a2, a3
+; RV64-NEXT:    slli a3, a2, 3
+; RV64-NEXT:    sub a2, a3, a2
 ; RV64-NEXT:    add a2, sp, a2
 ; RV64-NEXT:    addi a2, a2, 16
 ; RV64-NEXT:    vl4r.v v8, (a2) # Unknown-size Folded Reload
 ; RV64-NEXT:    vse64.v v8, (a1)
 ; RV64-NEXT:    addi a1, a0, 64
 ; RV64-NEXT:    csrr a2, vlenb
-; RV64-NEXT:    slli a2, a2, 5
+; RV64-NEXT:    li a3, 11
+; RV64-NEXT:    mul a2, a2, a3
 ; RV64-NEXT:    add a2, sp, a2
 ; RV64-NEXT:    addi a2, a2, 16
 ; RV64-NEXT:    vl4r.v v8, (a2) # Unknown-size Folded Reload
 ; RV64-NEXT:    vse64.v v8, (a1)
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    li a2, 40
-; RV64-NEXT:    mul a1, a1, a2
+; RV64-NEXT:    slli a2, a1, 4
+; RV64-NEXT:    sub a1, a2, a1
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vl4r.v v8, (a1) # Unknown-size Folded Reload
 ; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    li a1, 84
+; RV64-NEXT:    li a1, 52
 ; RV64-NEXT:    mul a0, a0, a1
 ; RV64-NEXT:    add sp, sp, a0
 ; RV64-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
index d9e5fd85e73f687..45f59d3a28a7c1d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
@@ -377,10 +377,11 @@ define <4 x i32> @reverse_v4i32(<4 x i32> %a) {
 define <8 x i32> @reverse_v8i32(<8 x i32> %a) {
 ; CHECK-LABEL: reverse_v8i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vid.v v10
 ; CHECK-NEXT:    vrsub.vi v12, v10, 7
-; CHECK-NEXT:    vrgather.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %res = call <8 x i32> @llvm.experimental.vector.reverse.v8i32(<8 x i32> %a)
@@ -390,10 +391,11 @@ define <8 x i32> @reverse_v8i32(<8 x i32> %a) {
 define <16 x i32> @reverse_v16i32(<16 x i32> %a) {
 ; CHECK-LABEL: reverse_v16i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vid.v v12
 ; CHECK-NEXT:    vrsub.vi v16, v12, 15
-; CHECK-NEXT:    vrgather.vv v12, v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %res = call <16 x i32> @llvm.experimental.vector.reverse.v16i32(<16 x i32> %a)
@@ -421,161 +423,29 @@ define <2 x i64> @reverse_v2i64(<2 x i64> %a) {
 }
 
 define <4 x i64> @reverse_v4i64(<4 x i64> %a) {
-; RV32-BITS-UNKNOWN-LABEL: reverse_v4i64:
-; RV32-BITS-UNKNOWN:       # %bb.0:
-; RV32-BITS-UNKNOWN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-BITS-UNKNOWN-NEXT:    vid.v v10
-; RV32-BITS-UNKNOWN-NEXT:    vrsub.vi v12, v10, 3
-; RV32-BITS-UNKNOWN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-BITS-UNKNOWN-NEXT:    vrgatherei16.vv v10, v8, v12
-; RV32-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v10
-; RV32-BITS-UNKNOWN-NEXT:    ret
-;
-; RV32-BITS-256-LABEL: reverse_v4i64:
-; RV32-BITS-256:       # %bb.0:
-; RV32-BITS-256-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-BITS-256-NEXT:    vid.v v10
-; RV32-BITS-256-NEXT:    vrsub.vi v12, v10, 3
-; RV32-BITS-256-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-BITS-256-NEXT:    vrgatherei16.vv v10, v8, v12
-; RV32-BITS-256-NEXT:    vmv.v.v v8, v10
-; RV32-BITS-256-NEXT:    ret
-;
-; RV32-BITS-512-LABEL: reverse_v4i64:
-; RV32-BITS-512:       # %bb.0:
-; RV32-BITS-512-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-BITS-512-NEXT:    vid.v v10
-; RV32-BITS-512-NEXT:    vrsub.vi v12, v10, 3
-; RV32-BITS-512-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-BITS-512-NEXT:    vrgatherei16.vv v10, v8, v12
-; RV32-BITS-512-NEXT:    vmv.v.v v8, v10
-; RV32-BITS-512-NEXT:    ret
-;
-; RV64-BITS-UNKNOWN-LABEL: reverse_v4i64:
-; RV64-BITS-UNKNOWN:       # %bb.0:
-; RV64-BITS-UNKNOWN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-BITS-UNKNOWN-NEXT:    vid.v v10
-; RV64-BITS-UNKNOWN-NEXT:    vrsub.vi v12, v10, 3
-; RV64-BITS-UNKNOWN-NEXT:    vrgather.vv v10, v8, v12
-; RV64-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v10
-; RV64-BITS-UNKNOWN-NEXT:    ret
-;
-; RV64-BITS-256-LABEL: reverse_v4i64:
-; RV64-BITS-256:       # %bb.0:
-; RV64-BITS-256-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-BITS-256-NEXT:    vid.v v10
-; RV64-BITS-256-NEXT:    vrsub.vi v12, v10, 3
-; RV64-BITS-256-NEXT:    vrgather.vv v10, v8, v12
-; RV64-BITS-256-NEXT:    vmv.v.v v8, v10
-; RV64-BITS-256-NEXT:    ret
-;
-; RV64-BITS-512-LABEL: reverse_v4i64:
-; RV64-BITS-512:       # %bb.0:
-; RV64-BITS-512-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-BITS-512-NEXT:    vid.v v10
-; RV64-BITS-512-NEXT:    vrsub.vi v12, v10, 3
-; RV64-BITS-512-NEXT:    vrgather.vv v10, v8, v12
-; RV64-BITS-512-NEXT:    vmv.v.v v8, v10
-; RV64-BITS-512-NEXT:    ret
-;
-; RV32-ZVBB-LABEL: reverse_v4i64:
-; RV32-ZVBB:       # %bb.0:
-; RV32-ZVBB-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-ZVBB-NEXT:    vid.v v10
-; RV32-ZVBB-NEXT:    vrsub.vi v12, v10, 3
-; RV32-ZVBB-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-ZVBB-NEXT:    vrgatherei16.vv v10, v8, v12
-; RV32-ZVBB-NEXT:    vmv.v.v v8, v10
-; RV32-ZVBB-NEXT:    ret
-;
-; RV64-ZVBB-LABEL: reverse_v4i64:
-; RV64-ZVBB:       # %bb.0:
-; RV64-ZVBB-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-ZVBB-NEXT:    vid.v v10
-; RV64-ZVBB-NEXT:    vrsub.vi v12, v10, 3
-; RV64-ZVBB-NEXT:    vrgather.vv v10, v8, v12
-; RV64-ZVBB-NEXT:    vmv.v.v v8, v10
-; RV64-ZVBB-NEXT:    ret
+; CHECK-LABEL: reverse_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vrsub.vi v12, v10, 3
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
   %res = call <4 x i64> @llvm.experimental.vector.reverse.v4i64(<4 x i64> %a)
   ret <4 x i64> %res
 }
 
 define <8 x i64> @reverse_v8i64(<8 x i64> %a) {
-; RV32-BITS-UNKNOWN-LABEL: reverse_v8i64:
-; RV32-BITS-UNKNOWN:       # %bb.0:
-; RV32-BITS-UNKNOWN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-BITS-UNKNOWN-NEXT:    vid.v v12
-; RV32-BITS-UNKNOWN-NEXT:    vrsub.vi v16, v12, 7
-; RV32-BITS-UNKNOWN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-BITS-UNKNOWN-NEXT:    vrgatherei16.vv v12, v8, v16
-; RV32-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v12
-; RV32-BITS-UNKNOWN-NEXT:    ret
-;
-; RV32-BITS-256-LABEL: reverse_v8i64:
-; RV32-BITS-256:       # %bb.0:
-; RV32-BITS-256-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-BITS-256-NEXT:    vid.v v12
-; RV32-BITS-256-NEXT:    vrsub.vi v16, v12, 7
-; RV32-BITS-256-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-BITS-256-NEXT:    vrgatherei16.vv v12, v8, v16
-; RV32-BITS-256-NEXT:    vmv.v.v v8, v12
-; RV32-BITS-256-NEXT:    ret
-;
-; RV32-BITS-512-LABEL: reverse_v8i64:
-; RV32-BITS-512:       # %bb.0:
-; RV32-BITS-512-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-BITS-512-NEXT:    vid.v v12
-; RV32-BITS-512-NEXT:    vrsub.vi v16, v12, 7
-; RV32-BITS-512-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-BITS-512-NEXT:    vrgatherei16.vv v12, v8, v16
-; RV32-BITS-512-NEXT:    vmv.v.v v8, v12
-; RV32-BITS-512-NEXT:    ret
-;
-; RV64-BITS-UNKNOWN-LABEL: reverse_v8i64:
-; RV64-BITS-UNKNOWN:       # %bb.0:
-; RV64-BITS-UNKNOWN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-BITS-UNKNOWN-NEXT:    vid.v v12
-; RV64-BITS-UNKNOWN-NEXT:    vrsub.vi v16, v12, 7
-; RV64-BITS-UNKNOWN-NEXT:    vrgather.vv v12, v8, v16
-; RV64-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v12
-; RV64-BITS-UNKNOWN-NEXT:    ret
-;
-; RV64-BITS-256-LABEL: reverse_v8i64:
-; RV64-BITS-256:       # %bb.0:
-; RV64-BITS-256-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-BITS-256-NEXT:    vid.v v12
-; RV64-BITS-256-NEXT:    vrsub.vi v16, v12, 7
-; RV64-BITS-256-NEXT:    vrgather.vv v12, v8, v16
-; RV64-BITS-256-NEXT:    vmv.v.v v8, v12
-; RV64-BITS-256-NEXT:    ret
-;
-; RV64-BITS-512-LABEL: reverse_v8i64:
-; RV64-BITS-512:       # %bb.0:
-; RV64-BITS-512-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-BITS-512-NEXT:    vid.v v12
-; RV64-BITS-512-NEXT:    vrsub.vi v16, v12, 7
-; RV64-BITS-512-NEXT:    vrgather.vv v12, v8, v16
-; RV64-BITS-512-NEXT:    vmv.v.v v8, v12
-; RV64-BITS-512-NEXT:    ret
-;
-; RV32-ZVBB-LABEL: reverse_v8i64:
-; RV32-ZVBB:       # %bb.0:
-; RV32-ZVBB-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-ZVBB-NEXT:    vid.v v12
-; RV32-ZVBB-NEXT:    vrsub.vi v16, v12, 7
-; RV32-ZVBB-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-ZVBB-NEXT:    vrgatherei16.vv v12, v8, v16
-; RV32-ZVBB-NEXT:    vmv.v.v v8, v12
-; RV32-ZVBB-NEXT:    ret
-;
-; RV64-ZVBB-LABEL: reverse_v8i64:
-; RV64-ZVBB:       # %bb.0:
-; RV64-ZVBB-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-ZVBB-NEXT:    vid.v v12
-; RV64-ZVBB-NEXT:    vrsub.vi v16, v12, 7
-; RV64-ZVBB-NEXT:    vrgather.vv v12, v8, v16
-; RV64-ZVBB-NEXT:    vmv.v.v v8, v12
-; RV64-ZVBB-NEXT:    ret
+; CHECK-LABEL: reverse_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vid.v v12
+; CHECK-NEXT:    vrsub.vi v16, v12, 7
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
   %res = call <8 x i64> @llvm.experimental.vector.reverse.v8i64(<8 x i64> %a)
   ret <8 x i64> %res
 }
@@ -703,10 +573,11 @@ define <4 x float> @reverse_v4f32(<4 x float> %a) {
 define <8 x float> @reverse_v8f32(<8 x float> %a) {
 ; CHECK-LABEL: reverse_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vid.v v10
 ; CHECK-NEXT:    vrsub.vi v12, v10, 7
-; CHECK-NEXT:    vrgather.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %res = call <8 x float> @llvm.experimental.vector.reverse.v8f32(<8 x float> %a)
@@ -716,10 +587,11 @@ define <8 x float> @reverse_v8f32(<8 x float> %a) {
 define <16 x float> @reverse_v16f32(<16 x float> %a) {
 ; CHECK-LABEL: reverse_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vid.v v12
 ; CHECK-NEXT:    vrsub.vi v16, v12, 15
-; CHECK-NEXT:    vrgather.vv v12, v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %res = call <16 x float> @llvm.experimental.vector.reverse.v16f32(<16 x float> %a)
@@ -747,161 +619,29 @@ define <2 x double> @reverse_v2f64(<2 x double> %a) {
 }
 
 define <4 x double> @reverse_v4f64(<4 x double> %a) {
-; RV32-BITS-UNKNOWN-LABEL: reverse_v4f64:
-; RV32-BITS-UNKNOWN:       # %bb.0:
-; RV32-BITS-UNKNOWN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-BITS-UNKNOWN-NEXT:    vid.v v10
-; RV32-BITS-UNKNOWN-NEXT:    vrsub.vi v12, v10, 3
-; RV32-BITS-UNKNOWN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-BITS-UNKNOWN-NEXT:    vrgatherei16.vv v10, v8, v12
-; RV32-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v10
-; RV32-BITS-UNKNOWN-NEXT:    ret
-;
-; RV32-BITS-256-LABEL: reverse_v4f64:
-; RV32-BITS-256:       # %bb.0:
-; RV32-BITS-256-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-BITS-256-NEXT:    vid.v v10
-; RV32-BITS-256-NEXT:    vrsub.vi v12, v10, 3
-; RV32-BITS-256-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-BITS-256-NEXT:    vrgatherei16.vv v10, v8, v12
-; RV32-BITS-256-NEXT:    vmv.v.v v8, v10
-; RV32-BITS-256-NEXT:    ret
-;
-; RV32-BITS-512-LABEL: reverse_v4f64:
-; RV32-BITS-512:       # %bb.0:
-; RV32-BITS-512-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-BITS-512-NEXT:    vid.v v10
-; RV32-BITS-512-NEXT:    vrsub.vi v12, v10, 3
-; RV32-BITS-512-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-BITS-512-NEXT:    vrgatherei16.vv v10, v8, v12
-; RV32-BITS-512-NEXT:    vmv.v.v v8, v10
-; RV32-BITS-512-NEXT:    ret
-;
-; RV64-BITS-UNKNOWN-LABEL: reverse_v4f64:
-; RV64-BITS-UNKNOWN:       # %bb.0:
-; RV64-BITS-UNKNOWN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-BITS-UNKNOWN-NEXT:    vid.v v10
-; RV64-BITS-UNKNOWN-NEXT:    vrsub.vi v12, v10, 3
-; RV64-BITS-UNKNOWN-NEXT:    vrgather.vv v10, v8, v12
-; RV64-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v10
-; RV64-BITS-UNKNOWN-NEXT:    ret
-;
-; RV64-BITS-256-LABEL: reverse_v4f64:
-; RV64-BITS-256:       # %bb.0:
-; RV64-BITS-256-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-BITS-256-NEXT:    vid.v v10
-; RV64-BITS-256-NEXT:    vrsub.vi v12, v10, 3
-; RV64-BITS-256-NEXT:    vrgather.vv v10, v8, v12
-; RV64-BITS-256-NEXT:    vmv.v.v v8, v10
-; RV64-BITS-256-NEXT:    ret
-;
-; RV64-BITS-512-LABEL: reverse_v4f64:
-; RV64-BITS-512:       # %bb.0:
-; RV64-BITS-512-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-BITS-512-NEXT:    vid.v v10
-; RV64-BITS-512-NEXT:    vrsub.vi v12, v10, 3
-; RV64-BITS-512-NEXT:    vrgather.vv v10, v8, v12
-; RV64-BITS-512-NEXT:    vmv.v.v v8, v10
-; RV64-BITS-512-NEXT:    ret
-;
-; RV32-ZVBB-LABEL: reverse_v4f64:
-; RV32-ZVBB:       # %bb.0:
-; RV32-ZVBB-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-ZVBB-NEXT:    vid.v v10
-; RV32-ZVBB-NEXT:    vrsub.vi v12, v10, 3
-; RV32-ZVBB-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-ZVBB-NEXT:    vrgatherei16.vv v10, v8, v12
-; RV32-ZVBB-NEXT:    vmv.v.v v8, v10
-; RV32-ZVBB-NEXT:    ret
-;
-; RV64-ZVBB-LABEL: reverse_v4f64:
-; RV64-ZVBB:       # %bb.0:
-; RV64-ZVBB-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-ZVBB-NEXT:    vid.v v10
-; RV64-ZVBB-NEXT:    vrsub.vi v12, v10, 3
-; RV64-ZVBB-NEXT:    vrgather.vv v10, v8, v12
-; RV64-ZVBB-NEXT:    vmv.v.v v8, v10
-; RV64-ZVBB-NEXT:    ret
+; CHECK-LABEL: reverse_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vrsub.vi v12, v10, 3
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
   %res = call <4 x double> @llvm.experimental.vector.reverse.v4f64(<4 x double> %a)
   ret <4 x double> %res
 }
 
 define <8 x double> @reverse_v8f64(<8 x double> %a) {
-; RV32-BITS-UNKNOWN-LABEL: reverse_v8f64:
-; RV32-BITS-UNKNOWN:       # %bb.0:
-; RV32-BITS-UNKNOWN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-BITS-UNKNOWN-NEXT:    vid.v v12
-; RV32-BITS-UNKNOWN-NEXT:    vrsub.vi v16, v12, 7
-; RV32-BITS-UNKNOWN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-BITS-UNKNOWN-NEXT:    vrgatherei16.vv v12, v8, v16
-; RV32-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v12
-; RV32-BITS-UNKNOWN-NEXT:    ret
-;
-; RV32-BITS-256-LABEL: reverse_v8f64:
-; RV32-BITS-256:       # %bb.0:
-; RV32-BITS-256-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-BITS-256-NEXT:    vid.v v12
-; RV32-BITS-256-NEXT:    vrsub.vi v16, v12, 7
-; RV32-BITS-256-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-BITS-256-NEXT:    vrgatherei16.vv v12, v8, v16
-; RV32-BITS-256-NEXT:    vmv.v.v v8, v12
-; RV32-BITS-256-NEXT:    ret
-;
-; RV32-BITS-512-LABEL: reverse_v8f64:
-; RV32-BITS-512:       # %bb.0:
-; RV32-BITS-512-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-BITS-512-NEXT:    vid.v v12
-; RV32-BITS-512-NEXT:    vrsub.vi v16, v12, 7
-; RV32-BITS-512-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-BITS-512-NEXT:    vrgatherei16.vv v12, v8, v16
-; RV32-BITS-512-NEXT:    vmv.v.v v8, v12
-; RV32-BITS-512-NEXT:    ret
-;
-; RV64-BITS-UNKNOWN-LABEL: reverse_v8f64:
-; RV64-BITS-UNKNOWN:       # %bb.0:
-; RV64-BITS-UNKNOWN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-BITS-UNKNOWN-NEXT:    vid.v v12
-; RV64-BITS-UNKNOWN-NEXT:    vrsub.vi v16, v12, 7
-; RV64-BITS-UNKNOWN-NEXT:    vrgather.vv v12, v8, v16
-; RV64-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v12
-; RV64-BITS-UNKNOWN-NEXT:    ret
-;
-; RV64-BITS-256-LABEL: reverse_v8f64:
-; RV64-BITS-256:       # %bb.0:
-; RV64-BITS-256-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-BITS-256-NEXT:    vid.v v12
-; RV64-BITS-256-NEXT:    vrsub.vi v16, v12, 7
-; RV64-BITS-256-NEXT:    vrgather.vv v12, v8, v16
-; RV64-BITS-256-NEXT:    vmv.v.v v8, v12
-; RV64-BITS-256-NEXT:    ret
-;
-; RV64-BITS-512-LABEL: reverse_v8f64:
-; RV64-BITS-512:       # %bb.0:
-; RV64-BITS-512-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-BITS-512-NEXT:    vid.v v12
-; RV64-BITS-512-NEXT:    vrsub.vi v16, v12, 7
-; RV64-BITS-512-NEXT:    vrgather.vv v12, v8, v16
-; RV64-BITS-512-NEXT:    vmv.v.v v8, v12
-; RV64-BITS-512-NEXT:    ret
-;
-; RV32-ZVBB-LABEL: reverse_v8f64:
-; RV32-ZVBB:       # %bb.0:
-; RV32-ZVBB-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-ZVBB-NEXT:    vid.v v12
-; RV32-ZVBB-NEXT:    vrsub.vi v16, v12, 7
-; RV32-ZVBB-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-ZVBB-NEXT:    vrgatherei16.vv v12, v8, v16
-; RV32-ZVBB-NEXT:    vmv.v.v v8, v12
-; RV32-ZVBB-NEXT:    ret
-;
-; RV64-ZVBB-LABEL: reverse_v8f64:
-; RV64-ZVBB:       # %bb.0:
-; RV64-ZVBB-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-ZVBB-NEXT:    vid.v v12
-; RV64-ZVBB-NEXT:    vrsub.vi v16, v12, 7
-; RV64-ZVBB-NEXT:    vrgather.vv v12, v8, v16
-; RV64-ZVBB-NEXT:    vmv.v.v v8, v12
-; RV64-ZVBB-NEXT:    ret
+; CHECK-LABEL: reverse_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vid.v v12
+; CHECK-NEXT:    vrsub.vi v16, v12, 7
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
   %res = call <8 x double> @llvm.experimental.vector.reverse.v8f64(<8 x double> %a)
   ret <8 x double> %res
 }
@@ -913,8 +653,8 @@ define <3 x i64> @reverse_v3i64(<3 x i64> %a) {
 ; RV32-BITS-UNKNOWN-NEXT:    lui a0, %hi(.LCPI44_0)
 ; RV32-BITS-UNKNOWN-NEXT:    addi a0, a0, %lo(.LCPI44_0)
 ; RV32-BITS-UNKNOWN-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-BITS-UNKNOWN-NEXT:    vle32.v v12, (a0)
-; RV32-BITS-UNKNOWN-NEXT:    vrgather.vv v10, v8, v12
+; RV32-BITS-UNKNOWN-NEXT:    vle16.v v12, (a0)
+; RV32-BITS-UNKNOWN-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV32-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v10
 ; RV32-BITS-UNKNOWN-NEXT:    ret
 ;
@@ -923,8 +663,8 @@ define <3 x i64> @reverse_v3i64(<3 x i64> %a) {
 ; RV32-BITS-256-NEXT:    lui a0, %hi(.LCPI44_0)
 ; RV32-BITS-256-NEXT:    addi a0, a0, %lo(.LCPI44_0)
 ; RV32-BITS-256-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-BITS-256-NEXT:    vle32.v v12, (a0)
-; RV32-BITS-256-NEXT:    vrgather.vv v10, v8, v12
+; RV32-BITS-256-NEXT:    vle16.v v12, (a0)
+; RV32-BITS-256-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV32-BITS-256-NEXT:    vmv.v.v v8, v10
 ; RV32-BITS-256-NEXT:    ret
 ;
@@ -933,35 +673,38 @@ define <3 x i64> @reverse_v3i64(<3 x i64> %a) {
 ; RV32-BITS-512-NEXT:    lui a0, %hi(.LCPI44_0)
 ; RV32-BITS-512-NEXT:    addi a0, a0, %lo(.LCPI44_0)
 ; RV32-BITS-512-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-BITS-512-NEXT:    vle32.v v12, (a0)
-; RV32-BITS-512-NEXT:    vrgather.vv v10, v8, v12
+; RV32-BITS-512-NEXT:    vle16.v v12, (a0)
+; RV32-BITS-512-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV32-BITS-512-NEXT:    vmv.v.v v8, v10
 ; RV32-BITS-512-NEXT:    ret
 ;
 ; RV64-BITS-UNKNOWN-LABEL: reverse_v3i64:
 ; RV64-BITS-UNKNOWN:       # %bb.0:
-; RV64-BITS-UNKNOWN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-BITS-UNKNOWN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV64-BITS-UNKNOWN-NEXT:    vid.v v10
 ; RV64-BITS-UNKNOWN-NEXT:    vrsub.vi v12, v10, 2
-; RV64-BITS-UNKNOWN-NEXT:    vrgather.vv v10, v8, v12
+; RV64-BITS-UNKNOWN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-BITS-UNKNOWN-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV64-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v10
 ; RV64-BITS-UNKNOWN-NEXT:    ret
 ;
 ; RV64-BITS-256-LABEL: reverse_v3i64:
 ; RV64-BITS-256:       # %bb.0:
-; RV64-BITS-256-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-BITS-256-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV64-BITS-256-NEXT:    vid.v v10
 ; RV64-BITS-256-NEXT:    vrsub.vi v12, v10, 2
-; RV64-BITS-256-NEXT:    vrgather.vv v10, v8, v12
+; RV64-BITS-256-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-BITS-256-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV64-BITS-256-NEXT:    vmv.v.v v8, v10
 ; RV64-BITS-256-NEXT:    ret
 ;
 ; RV64-BITS-512-LABEL: reverse_v3i64:
 ; RV64-BITS-512:       # %bb.0:
-; RV64-BITS-512-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-BITS-512-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV64-BITS-512-NEXT:    vid.v v10
 ; RV64-BITS-512-NEXT:    vrsub.vi v12, v10, 2
-; RV64-BITS-512-NEXT:    vrgather.vv v10, v8, v12
+; RV64-BITS-512-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-BITS-512-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV64-BITS-512-NEXT:    vmv.v.v v8, v10
 ; RV64-BITS-512-NEXT:    ret
 ;
@@ -970,17 +713,18 @@ define <3 x i64> @reverse_v3i64(<3 x i64> %a) {
 ; RV32-ZVBB-NEXT:    lui a0, %hi(.LCPI44_0)
 ; RV32-ZVBB-NEXT:    addi a0, a0, %lo(.LCPI44_0)
 ; RV32-ZVBB-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-ZVBB-NEXT:    vle32.v v12, (a0)
-; RV32-ZVBB-NEXT:    vrgather.vv v10, v8, v12
+; RV32-ZVBB-NEXT:    vle16.v v12, (a0)
+; RV32-ZVBB-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV32-ZVBB-NEXT:    vmv.v.v v8, v10
 ; RV32-ZVBB-NEXT:    ret
 ;
 ; RV64-ZVBB-LABEL: reverse_v3i64:
 ; RV64-ZVBB:       # %bb.0:
-; RV64-ZVBB-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-ZVBB-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV64-ZVBB-NEXT:    vid.v v10
 ; RV64-ZVBB-NEXT:    vrsub.vi v12, v10, 2
-; RV64-ZVBB-NEXT:    vrgather.vv v10, v8, v12
+; RV64-ZVBB-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-ZVBB-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV64-ZVBB-NEXT:    vmv.v.v v8, v10
 ; RV64-ZVBB-NEXT:    ret
   %res = call <3 x i64> @llvm.experimental.vector.reverse.v3i64(<3 x i64> %a)
@@ -993,8 +737,8 @@ define <6 x i64> @reverse_v6i64(<6 x i64> %a) {
 ; RV32-BITS-UNKNOWN-NEXT:    lui a0, %hi(.LCPI45_0)
 ; RV32-BITS-UNKNOWN-NEXT:    addi a0, a0, %lo(.LCPI45_0)
 ; RV32-BITS-UNKNOWN-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-BITS-UNKNOWN-NEXT:    vle32.v v16, (a0)
-; RV32-BITS-UNKNOWN-NEXT:    vrgather.vv v12, v8, v16
+; RV32-BITS-UNKNOWN-NEXT:    vle16.v v16, (a0)
+; RV32-BITS-UNKNOWN-NEXT:    vrgatherei16.vv v12, v8, v16
 ; RV32-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v12
 ; RV32-BITS-UNKNOWN-NEXT:    ret
 ;
@@ -1003,8 +747,8 @@ define <6 x i64> @reverse_v6i64(<6 x i64> %a) {
 ; RV32-BITS-256-NEXT:    lui a0, %hi(.LCPI45_0)
 ; RV32-BITS-256-NEXT:    addi a0, a0, %lo(.LCPI45_0)
 ; RV32-BITS-256-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-BITS-256-NEXT:    vle32.v v16, (a0)
-; RV32-BITS-256-NEXT:    vrgather.vv v12, v8, v16
+; RV32-BITS-256-NEXT:    vle16.v v16, (a0)
+; RV32-BITS-256-NEXT:    vrgatherei16.vv v12, v8, v16
 ; RV32-BITS-256-NEXT:    vmv.v.v v8, v12
 ; RV32-BITS-256-NEXT:    ret
 ;
@@ -1013,35 +757,38 @@ define <6 x i64> @reverse_v6i64(<6 x i64> %a) {
 ; RV32-BITS-512-NEXT:    lui a0, %hi(.LCPI45_0)
 ; RV32-BITS-512-NEXT:    addi a0, a0, %lo(.LCPI45_0)
 ; RV32-BITS-512-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-BITS-512-NEXT:    vle32.v v16, (a0)
-; RV32-BITS-512-NEXT:    vrgather.vv v12, v8, v16
+; RV32-BITS-512-NEXT:    vle16.v v16, (a0)
+; RV32-BITS-512-NEXT:    vrgatherei16.vv v12, v8, v16
 ; RV32-BITS-512-NEXT:    vmv.v.v v8, v12
 ; RV32-BITS-512-NEXT:    ret
 ;
 ; RV64-BITS-UNKNOWN-LABEL: reverse_v6i64:
 ; RV64-BITS-UNKNOWN:       # %bb.0:
-; RV64-BITS-UNKNOWN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-BITS-UNKNOWN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV64-BITS-UNKNOWN-NEXT:    vid.v v12
 ; RV64-BITS-UNKNOWN-NEXT:    vrsub.vi v16, v12, 5
-; RV64-BITS-UNKNOWN-NEXT:    vrgather.vv v12, v8, v16
+; RV64-BITS-UNKNOWN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV64-BITS-UNKNOWN-NEXT:    vrgatherei16.vv v12, v8, v16
 ; RV64-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v12
 ; RV64-BITS-UNKNOWN-NEXT:    ret
 ;
 ; RV64-BITS-256-LABEL: reverse_v6i64:
 ; RV64-BITS-256:       # %bb.0:
-; RV64-BITS-256-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-BITS-256-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV64-BITS-256-NEXT:    vid.v v12
 ; RV64-BITS-256-NEXT:    vrsub.vi v16, v12, 5
-; RV64-BITS-256-NEXT:    vrgather.vv v12, v8, v16
+; RV64-BITS-256-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV64-BITS-256-NEXT:    vrgatherei16.vv v12, v8, v16
 ; RV64-BITS-256-NEXT:    vmv.v.v v8, v12
 ; RV64-BITS-256-NEXT:    ret
 ;
 ; RV64-BITS-512-LABEL: reverse_v6i64:
 ; RV64-BITS-512:       # %bb.0:
-; RV64-BITS-512-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-BITS-512-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV64-BITS-512-NEXT:    vid.v v12
 ; RV64-BITS-512-NEXT:    vrsub.vi v16, v12, 5
-; RV64-BITS-512-NEXT:    vrgather.vv v12, v8, v16
+; RV64-BITS-512-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV64-BITS-512-NEXT:    vrgatherei16.vv v12, v8, v16
 ; RV64-BITS-512-NEXT:    vmv.v.v v8, v12
 ; RV64-BITS-512-NEXT:    ret
 ;
@@ -1050,17 +797,18 @@ define <6 x i64> @reverse_v6i64(<6 x i64> %a) {
 ; RV32-ZVBB-NEXT:    lui a0, %hi(.LCPI45_0)
 ; RV32-ZVBB-NEXT:    addi a0, a0, %lo(.LCPI45_0)
 ; RV32-ZVBB-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-ZVBB-NEXT:    vle32.v v16, (a0)
-; RV32-ZVBB-NEXT:    vrgather.vv v12, v8, v16
+; RV32-ZVBB-NEXT:    vle16.v v16, (a0)
+; RV32-ZVBB-NEXT:    vrgatherei16.vv v12, v8, v16
 ; RV32-ZVBB-NEXT:    vmv.v.v v8, v12
 ; RV32-ZVBB-NEXT:    ret
 ;
 ; RV64-ZVBB-LABEL: reverse_v6i64:
 ; RV64-ZVBB:       # %bb.0:
-; RV64-ZVBB-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-ZVBB-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV64-ZVBB-NEXT:    vid.v v12
 ; RV64-ZVBB-NEXT:    vrsub.vi v16, v12, 5
-; RV64-ZVBB-NEXT:    vrgather.vv v12, v8, v16
+; RV64-ZVBB-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV64-ZVBB-NEXT:    vrgatherei16.vv v12, v8, v16
 ; RV64-ZVBB-NEXT:    vmv.v.v v8, v12
 ; RV64-ZVBB-NEXT:    ret
   %res = call <6 x i64> @llvm.experimental.vector.reverse.v6i64(<6 x i64> %a)
@@ -1074,8 +822,8 @@ define <12 x i64> @reverse_v12i64(<12 x i64> %a) {
 ; RV32-BITS-UNKNOWN-NEXT:    addi a0, a0, %lo(.LCPI46_0)
 ; RV32-BITS-UNKNOWN-NEXT:    li a1, 32
 ; RV32-BITS-UNKNOWN-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; RV32-BITS-UNKNOWN-NEXT:    vle32.v v24, (a0)
-; RV32-BITS-UNKNOWN-NEXT:    vrgather.vv v16, v8, v24
+; RV32-BITS-UNKNOWN-NEXT:    vle16.v v24, (a0)
+; RV32-BITS-UNKNOWN-NEXT:    vrgatherei16.vv v16, v8, v24
 ; RV32-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v16
 ; RV32-BITS-UNKNOWN-NEXT:    ret
 ;
@@ -1085,8 +833,8 @@ define <12 x i64> @reverse_v12i64(<12 x i64> %a) {
 ; RV32-BITS-256-NEXT:    addi a0, a0, %lo(.LCPI46_0)
 ; RV32-BITS-256-NEXT:    li a1, 32
 ; RV32-BITS-256-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; RV32-BITS-256-NEXT:    vle32.v v24, (a0)
-; RV32-BITS-256-NEXT:    vrgather.vv v16, v8, v24
+; RV32-BITS-256-NEXT:    vle16.v v24, (a0)
+; RV32-BITS-256-NEXT:    vrgatherei16.vv v16, v8, v24
 ; RV32-BITS-256-NEXT:    vmv.v.v v8, v16
 ; RV32-BITS-256-NEXT:    ret
 ;
@@ -1096,35 +844,38 @@ define <12 x i64> @reverse_v12i64(<12 x i64> %a) {
 ; RV32-BITS-512-NEXT:    addi a0, a0, %lo(.LCPI46_0)
 ; RV32-BITS-512-NEXT:    li a1, 32
 ; RV32-BITS-512-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; RV32-BITS-512-NEXT:    vle32.v v24, (a0)
-; RV32-BITS-512-NEXT:    vrgather.vv v16, v8, v24
+; RV32-BITS-512-NEXT:    vle16.v v24, (a0)
+; RV32-BITS-512-NEXT:    vrgatherei16.vv v16, v8, v24
 ; RV32-BITS-512-NEXT:    vmv.v.v v8, v16
 ; RV32-BITS-512-NEXT:    ret
 ;
 ; RV64-BITS-UNKNOWN-LABEL: reverse_v12i64:
 ; RV64-BITS-UNKNOWN:       # %bb.0:
-; RV64-BITS-UNKNOWN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-BITS-UNKNOWN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-BITS-UNKNOWN-NEXT:    vid.v v16
 ; RV64-BITS-UNKNOWN-NEXT:    vrsub.vi v24, v16, 11
-; RV64-BITS-UNKNOWN-NEXT:    vrgather.vv v16, v8, v24
+; RV64-BITS-UNKNOWN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-BITS-UNKNOWN-NEXT:    vrgatherei16.vv v16, v8, v24
 ; RV64-BITS-UNKNOWN-NEXT:    vmv.v.v v8, v16
 ; RV64-BITS-UNKNOWN-NEXT:    ret
 ;
 ; RV64-BITS-256-LABEL: reverse_v12i64:
 ; RV64-BITS-256:       # %bb.0:
-; RV64-BITS-256-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-BITS-256-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-BITS-256-NEXT:    vid.v v16
 ; RV64-BITS-256-NEXT:    vrsub.vi v24, v16, 11
-; RV64-BITS-256-NEXT:    vrgather.vv v16, v8, v24
+; RV64-BITS-256-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-BITS-256-NEXT:    vrgatherei16.vv v16, v8, v24
 ; RV64-BITS-256-NEXT:    vmv.v.v v8, v16
 ; RV64-BITS-256-NEXT:    ret
 ;
 ; RV64-BITS-512-LABEL: reverse_v12i64:
 ; RV64-BITS-512:       # %bb.0:
-; RV64-BITS-512-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-BITS-512-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-BITS-512-NEXT:    vid.v v16
 ; RV64-BITS-512-NEXT:    vrsub.vi v24, v16, 11
-; RV64-BITS-512-NEXT:    vrgather.vv v16, v8, v24
+; RV64-BITS-512-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-BITS-512-NEXT:    vrgatherei16.vv v16, v8, v24
 ; RV64-BITS-512-NEXT:    vmv.v.v v8, v16
 ; RV64-BITS-512-NEXT:    ret
 ;
@@ -1134,17 +885,18 @@ define <12 x i64> @reverse_v12i64(<12 x i64> %a) {
 ; RV32-ZVBB-NEXT:    addi a0, a0, %lo(.LCPI46_0)
 ; RV32-ZVBB-NEXT:    li a1, 32
 ; RV32-ZVBB-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; RV32-ZVBB-NEXT:    vle32.v v24, (a0)
-; RV32-ZVBB-NEXT:    vrgather.vv v16, v8, v24
+; RV32-ZVBB-NEXT:    vle16.v v24, (a0)
+; RV32-ZVBB-NEXT:    vrgatherei16.vv v16, v8, v24
 ; RV32-ZVBB-NEXT:    vmv.v.v v8, v16
 ; RV32-ZVBB-NEXT:    ret
 ;
 ; RV64-ZVBB-LABEL: reverse_v12i64:
 ; RV64-ZVBB:       # %bb.0:
-; RV64-ZVBB-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-ZVBB-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-ZVBB-NEXT:    vid.v v16
 ; RV64-ZVBB-NEXT:    vrsub.vi v24, v16, 11
-; RV64-ZVBB-NEXT:    vrgather.vv v16, v8, v24
+; RV64-ZVBB-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-ZVBB-NEXT:    vrgatherei16.vv v16, v8, v24
 ; RV64-ZVBB-NEXT:    vmv.v.v v8, v16
 ; RV64-ZVBB-NEXT:    ret
   %res = call <12 x i64> @llvm.experimental.vector.reverse.v12i64(<12 x i64> %a)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll
index 4059dcbd178b979..05121b4a18200f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll
@@ -640,8 +640,8 @@ define <8 x i32> @shuffle_v8i32_as_i64(<8 x i32> %v) {
 ; ZVBB-ZVE32X-NEXT:    lui a0, %hi(.LCPI22_0)
 ; ZVBB-ZVE32X-NEXT:    addi a0, a0, %lo(.LCPI22_0)
 ; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e32, m8, ta, ma
-; ZVBB-ZVE32X-NEXT:    vle32.v v24, (a0)
-; ZVBB-ZVE32X-NEXT:    vrgather.vv v16, v8, v24
+; ZVBB-ZVE32X-NEXT:    vle16.v v24, (a0)
+; ZVBB-ZVE32X-NEXT:    vrgatherei16.vv v16, v8, v24
 ; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v16
 ; ZVBB-ZVE32X-NEXT:    ret
   %shuffle = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
@@ -836,8 +836,8 @@ define <8 x float> @shuffle_v8f32_as_i64(<8 x float> %v) {
 ; ZVBB-ZVE32X-NEXT:    lui a0, %hi(.LCPI27_0)
 ; ZVBB-ZVE32X-NEXT:    addi a0, a0, %lo(.LCPI27_0)
 ; ZVBB-ZVE32X-NEXT:    vsetivli zero, 8, e32, m8, ta, ma
-; ZVBB-ZVE32X-NEXT:    vle32.v v24, (a0)
-; ZVBB-ZVE32X-NEXT:    vrgather.vv v16, v8, v24
+; ZVBB-ZVE32X-NEXT:    vle16.v v24, (a0)
+; ZVBB-ZVE32X-NEXT:    vrgatherei16.vv v16, v8, v24
 ; ZVBB-ZVE32X-NEXT:    vmv.v.v v8, v16
 ; ZVBB-ZVE32X-NEXT:    ret
   %shuffle = shufflevector <8 x float> %v, <8 x float> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
diff --git a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
index 70b4ab2c63c100d..b26b4755514064f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
@@ -335,15 +335,16 @@ define <8 x i32> @v4i32_2(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: v4i32_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT:    vid.v v14
-; CHECK-NEXT:    vrsub.vi v16, v14, 7
-; CHECK-NEXT:    vrgather.vv v10, v8, v16
-; CHECK-NEXT:    vrsub.vi v8, v14, 3
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vid.v v9
+; CHECK-NEXT:    vrsub.vi v13, v9, 7
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v13
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vrsub.vi v8, v9, 3
 ; CHECK-NEXT:    vmv.v.i v0, 15
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT:    vrgather.vv v10, v12, v8, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v10, v12, v8, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %v8i32 = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -353,10 +354,11 @@ define <8 x i32> @v4i32_2(<4 x i32> %a, <4 x i32> %b) {
 define <8 x i32> @v8i32(<8 x i32> %a) {
 ; CHECK-LABEL: v8i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vid.v v10
 ; CHECK-NEXT:    vrsub.vi v12, v10, 7
-; CHECK-NEXT:    vrgather.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %v8i32 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -367,14 +369,17 @@ define <16 x i32> @v8i32_2(<8 x i32> %a, <8 x i32> %b) {
 ; CHECK-LABEL: v8i32_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
-; CHECK-NEXT:    vid.v v20
-; CHECK-NEXT:    vrsub.vi v24, v20, 15
-; CHECK-NEXT:    vrgather.vv v12, v8, v24
-; CHECK-NEXT:    vrsub.vi v8, v20, 7
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vrsub.vi v18, v10, 15
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v18
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vrsub.vi v8, v10, 7
 ; CHECK-NEXT:    li a0, 255
 ; CHECK-NEXT:    vmv.s.x v0, a0
-; CHECK-NEXT:    vrgather.vv v12, v16, v8, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %v16i32 = shufflevector <8 x i32> %a, <8 x i32> %b,  <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -384,10 +389,11 @@ define <16 x i32> @v8i32_2(<8 x i32> %a, <8 x i32> %b) {
 define <16 x i32> @v16i32(<16 x i32> %a) {
 ; CHECK-LABEL: v16i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vid.v v12
 ; CHECK-NEXT:    vrsub.vi v16, v12, 15
-; CHECK-NEXT:    vrgather.vv v12, v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %v16i32 = shufflevector <16 x i32> %a, <16 x i32> undef,  <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -400,17 +406,20 @@ define <32 x i32> @v16i32_2(<16 x i32> %a, <16 x i32> %b) {
 ; RV32-NEXT:    lui a0, %hi(.LCPI23_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI23_0)
 ; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; RV32-NEXT:    vle32.v v0, (a0)
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    vle16.v v20, (a0)
 ; RV32-NEXT:    vmv4r.v v24, v12
 ; RV32-NEXT:    vmv4r.v v16, v8
-; RV32-NEXT:    vrgather.vv v8, v16, v0
+; RV32-NEXT:    vrgatherei16.vv v8, v16, v20
+; RV32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; RV32-NEXT:    vid.v v16
 ; RV32-NEXT:    vrsub.vi v16, v16, 15
 ; RV32-NEXT:    lui a0, 16
 ; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV32-NEXT:    vmv.s.x v0, a0
-; RV32-NEXT:    vrgather.vv v8, v24, v16, v0.t
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v8, v24, v16, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: v16i32_2:
@@ -418,17 +427,20 @@ define <32 x i32> @v16i32_2(<16 x i32> %a, <16 x i32> %b) {
 ; RV64-NEXT:    lui a0, %hi(.LCPI23_0)
 ; RV64-NEXT:    addi a0, a0, %lo(.LCPI23_0)
 ; RV64-NEXT:    li a1, 32
-; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; RV64-NEXT:    vle32.v v0, (a0)
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT:    vle16.v v20, (a0)
 ; RV64-NEXT:    vmv4r.v v24, v12
 ; RV64-NEXT:    vmv4r.v v16, v8
-; RV64-NEXT:    vrgather.vv v8, v16, v0
+; RV64-NEXT:    vrgatherei16.vv v8, v16, v20
+; RV64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; RV64-NEXT:    vid.v v16
 ; RV64-NEXT:    vrsub.vi v16, v16, 15
 ; RV64-NEXT:    lui a0, 16
 ; RV64-NEXT:    addiw a0, a0, -1
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV64-NEXT:    vmv.s.x v0, a0
-; RV64-NEXT:    vrgather.vv v8, v24, v16, v0.t
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; RV64-NEXT:    vrgatherei16.vv v8, v24, v16, v0.t
 ; RV64-NEXT:    ret
   %v32i32 = shufflevector <16 x i32> %a, <16 x i32> %b,  <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   ret <32 x i32> %v32i32
@@ -462,59 +474,35 @@ define <4 x i64> @v2i64_2(<2 x i64> %a, < 2 x i64> %b) {
 }
 
 define <4 x i64> @v4i64(<4 x i64> %a) {
-; RV32-LABEL: v4i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT:    vid.v v10
-; RV32-NEXT:    vrsub.vi v12, v10, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-NEXT:    vrgatherei16.vv v10, v8, v12
-; RV32-NEXT:    vmv.v.v v8, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: v4i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vid.v v10
-; RV64-NEXT:    vrsub.vi v12, v10, 3
-; RV64-NEXT:    vrgather.vv v10, v8, v12
-; RV64-NEXT:    vmv.v.v v8, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vrsub.vi v12, v10, 3
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
   %v4i64 = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   ret <4 x i64> %v4i64
 }
 
 define <8 x i64> @v4i64_2(<4 x i64> %a, <4 x i64> %b) {
-; RV32-LABEL: v4i64_2:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vmv2r.v v16, v10
-; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT:    vid.v v10
-; RV32-NEXT:    vrsub.vi v11, v10, 7
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-NEXT:    vrgatherei16.vv v12, v8, v11
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT:    vrsub.vi v8, v10, 3
-; RV32-NEXT:    vmv.v.i v0, 15
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
-; RV32-NEXT:    vmv.v.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: v4i64_2:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vmv2r.v v16, v10
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vid.v v20
-; RV64-NEXT:    vrsub.vi v24, v20, 7
-; RV64-NEXT:    vrgather.vv v12, v8, v24
-; RV64-NEXT:    vrsub.vi v8, v20, 3
-; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; RV64-NEXT:    vmv.v.i v0, 15
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT:    vrgather.vv v12, v16, v8, v0.t
-; RV64-NEXT:    vmv.v.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: v4i64_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmv2r.v v16, v10
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vrsub.vi v11, v10, 7
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v11
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vrsub.vi v8, v10, 3
+; CHECK-NEXT:    vmv.v.i v0, 15
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
   %v8i64 = shufflevector <4 x i64> %a, <4 x i64> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   ret <8 x i64> %v8i64
 }
@@ -678,15 +666,16 @@ define <8 x float> @v4f32_2(<4 x float> %a, <4 x float> %b) {
 ; CHECK-LABEL: v4f32_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT:    vid.v v14
-; CHECK-NEXT:    vrsub.vi v16, v14, 7
-; CHECK-NEXT:    vrgather.vv v10, v8, v16
-; CHECK-NEXT:    vrsub.vi v8, v14, 3
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vid.v v9
+; CHECK-NEXT:    vrsub.vi v13, v9, 7
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v13
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vrsub.vi v8, v9, 3
 ; CHECK-NEXT:    vmv.v.i v0, 15
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT:    vrgather.vv v10, v12, v8, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v10, v12, v8, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %v8f32 = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -696,10 +685,11 @@ define <8 x float> @v4f32_2(<4 x float> %a, <4 x float> %b) {
 define <8 x float> @v8f32(<8 x float> %a) {
 ; CHECK-LABEL: v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vid.v v10
 ; CHECK-NEXT:    vrsub.vi v12, v10, 7
-; CHECK-NEXT:    vrgather.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %v8f32 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -710,14 +700,17 @@ define <16 x float> @v8f32_2(<8 x float> %a, <8 x float> %b) {
 ; CHECK-LABEL: v8f32_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
-; CHECK-NEXT:    vid.v v20
-; CHECK-NEXT:    vrsub.vi v24, v20, 15
-; CHECK-NEXT:    vrgather.vv v12, v8, v24
-; CHECK-NEXT:    vrsub.vi v8, v20, 7
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vrsub.vi v18, v10, 15
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v18
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vrsub.vi v8, v10, 7
 ; CHECK-NEXT:    li a0, 255
 ; CHECK-NEXT:    vmv.s.x v0, a0
-; CHECK-NEXT:    vrgather.vv v12, v16, v8, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %v16f32 = shufflevector <8 x float> %a, <8 x float> %b, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -752,59 +745,35 @@ define <4 x double> @v2f64_2(<2 x double> %a, < 2 x double> %b) {
 }
 
 define <4 x double> @v4f64(<4 x double> %a) {
-; RV32-LABEL: v4f64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT:    vid.v v10
-; RV32-NEXT:    vrsub.vi v12, v10, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-NEXT:    vrgatherei16.vv v10, v8, v12
-; RV32-NEXT:    vmv.v.v v8, v10
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: v4f64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vid.v v10
-; RV64-NEXT:    vrsub.vi v12, v10, 3
-; RV64-NEXT:    vrgather.vv v10, v8, v12
-; RV64-NEXT:    vmv.v.v v8, v10
-; RV64-NEXT:    ret
+; CHECK-LABEL: v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vrsub.vi v12, v10, 3
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
   %v4f64 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   ret <4 x double> %v4f64
 }
 
 define <8 x double> @v4f64_2(<4 x double> %a, <4 x double> %b) {
-; RV32-LABEL: v4f64_2:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vmv2r.v v16, v10
-; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT:    vid.v v10
-; RV32-NEXT:    vrsub.vi v11, v10, 7
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-NEXT:    vrgatherei16.vv v12, v8, v11
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT:    vrsub.vi v8, v10, 3
-; RV32-NEXT:    vmv.v.i v0, 15
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
-; RV32-NEXT:    vmv.v.v v8, v12
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: v4f64_2:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vmv2r.v v16, v10
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vid.v v20
-; RV64-NEXT:    vrsub.vi v24, v20, 7
-; RV64-NEXT:    vrgather.vv v12, v8, v24
-; RV64-NEXT:    vrsub.vi v8, v20, 3
-; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; RV64-NEXT:    vmv.v.i v0, 15
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT:    vrgather.vv v12, v16, v8, v0.t
-; RV64-NEXT:    vmv.v.v v8, v12
-; RV64-NEXT:    ret
+; CHECK-LABEL: v4f64_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmv2r.v v16, v10
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vrsub.vi v11, v10, 7
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v11
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vrsub.vi v8, v10, 3
+; CHECK-NEXT:    vmv.v.i v0, 15
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
   %v8f64 = shufflevector <4 x double> %a, <4 x double> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   ret <8 x double> %v8f64
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
index d113c9fd31bbff9..afe6c958f1e64be 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
@@ -76,8 +76,10 @@ define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) {
 ; RV64-NEXT:    lui a0, 12304
 ; RV64-NEXT:    addiw a0, a0, 512
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsext.vf8 v12, v10
-; RV64-NEXT:    vrgather.vv v10, v8, v12
+; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-NEXT:    vsext.vf2 v12, v10
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV64-NEXT:    vmv.v.v v8, v10
 ; RV64-NEXT:    ret
 	   %res = call <4 x i64> @llvm.experimental.vector.interleave2.v4i64(<2 x i64> %a, <2 x i64> %b)
@@ -180,8 +182,10 @@ define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double>
 ; RV64-NEXT:    lui a0, 12304
 ; RV64-NEXT:    addiw a0, a0, 512
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsext.vf8 v12, v10
-; RV64-NEXT:    vrgather.vv v10, v8, v12
+; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-NEXT:    vsext.vf2 v12, v10
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV64-NEXT:    vmv.v.v v8, v10
 ; RV64-NEXT:    ret
 	   %res = call <4 x double> @llvm.experimental.vector.interleave2.v4f64(<2 x double> %a, <2 x double> %b)

>From ae9507f62987b288e0831451f98a1b262349c438 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Fri, 15 Sep 2023 15:44:49 -0700
Subject: [PATCH 2/2] Address review comment

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 961c3942ce8976e..e3d6f52832feac1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4654,7 +4654,7 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
   // If the mask allows, we can do all the index computation in 16 bits.  This
   // requires less work and less register pressure at high LMUL, and creates
   // smaller constants which may be cheaper to materialize.
-  if (IndexVT.getScalarType().bitsGT(MVT::i16) && isUInt<16>(NumElts * 2) &&
+  if (IndexVT.getScalarType().bitsGT(MVT::i16) && isUInt<16>(NumElts - 1) &&
       (IndexVT.getSizeInBits() / Subtarget.getRealMinVLen()) > 1) {
     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
     IndexVT = IndexVT.changeVectorElementType(MVT::i16);



More information about the llvm-commits mailing list