[llvm] [RISCV] Support llvm.masked.expandload intrinsic (PR #101954)

Pengcheng Wang via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 6 00:14:04 PDT 2024


https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/101954

>From 81cf472f7806f22eb367b7424e26f0b0cb0e30ae Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Mon, 5 Aug 2024 17:30:35 +0800
Subject: [PATCH 1/2] [RISCV] Support llvm.masked.expandload intrinsic

We can use `viota.m` + indexed load to synthesize expanding load:
```
%res = llvm.masked.expandload(%ptr, %mask, %passthru)
->
%index = viota %mask
if elt_size > 8:
  %index = vsll.vi %index, log2(elt_size), %mask
%res = vluxei<n> %passthru, %ptr, %index, %mask
```

And if `%mask` is all ones, we can lower expanding load to a normal
unmasked load.

Fixes #101914
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |   30 +-
 .../Target/RISCV/RISCVTargetTransformInfo.cpp |   10 +
 .../Target/RISCV/RISCVTargetTransformInfo.h   |    2 +
 llvm/test/CodeGen/RISCV/rvv/expandload.ll     | 1549 +++++++++++++++++
 .../RISCV/rvv/fixed-vectors-expandload-fp.ll  | 1046 ++---------
 .../RISCV/rvv/fixed-vectors-expandload-int.ll |  917 +---------
 6 files changed, 1768 insertions(+), 1786 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/expandload.ll

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9ee60b9db2837..f6a69e427252b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -10733,6 +10733,7 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
   SDValue BasePtr = MemSD->getBasePtr();
 
   SDValue Mask, PassThru, VL;
+  bool IsExpandingLoad = false;
   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
     Mask = VPLoad->getMask();
     PassThru = DAG.getUNDEF(VT);
@@ -10741,6 +10742,7 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
     Mask = MLoad->getMask();
     PassThru = MLoad->getPassThru();
+    IsExpandingLoad = MLoad->isExpandingLoad();
   }
 
   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
@@ -10760,16 +10762,38 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
   if (!VL)
     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
 
-  unsigned IntID =
-      IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
+  SDValue Index;
+  if (!IsUnmasked && IsExpandingLoad) {
+    MVT IndexVT = ContainerVT;
+    if (ContainerVT.isFloatingPoint())
+      IndexVT = IndexVT.changeVectorElementTypeToInteger();
+
+    if (Subtarget.isRV32() && IndexVT.getVectorElementType().bitsGT(XLenVT))
+      IndexVT = IndexVT.changeVectorElementType(XLenVT);
+
+    Index = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
+                        DAG.getConstant(Intrinsic::riscv_viota, DL, XLenVT),
+                        DAG.getUNDEF(IndexVT), Mask, VL);
+    if (uint64_t EltSize = ContainerVT.getScalarSizeInBits(); EltSize > 8)
+      Index = DAG.getNode(RISCVISD::SHL_VL, DL, IndexVT, Index,
+                          DAG.getConstant(Log2_64(EltSize / 8), DL, IndexVT),
+                          DAG.getUNDEF(IndexVT), Mask, VL);
+  }
+
+  unsigned IntID = IsUnmasked        ? Intrinsic::riscv_vle
+                   : IsExpandingLoad ? Intrinsic::riscv_vluxei_mask
+                                     : Intrinsic::riscv_vle_mask;
   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
   if (IsUnmasked)
     Ops.push_back(DAG.getUNDEF(ContainerVT));
   else
     Ops.push_back(PassThru);
   Ops.push_back(BasePtr);
-  if (!IsUnmasked)
+  if (!IsUnmasked) {
+    if (IsExpandingLoad)
+      Ops.push_back(Index);
     Ops.push_back(Mask);
+  }
   Ops.push_back(VL);
   if (!IsUnmasked)
     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 4cd904c039a98..41b39accbf027 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -1966,6 +1966,16 @@ bool RISCVTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                   C2.ScaleCost, C2.ImmCost, C2.SetupCost);
 }
 
+bool RISCVTTIImpl::isLegalMaskedExpandLoad(Type *DataTy, Align Alignment) {
+  auto *VTy = dyn_cast<VectorType>(DataTy);
+  if (!VTy || VTy->isScalableTy())
+    return false;
+
+  if (!isLegalMaskedLoadStore(DataTy, Alignment))
+    return false;
+  return true;
+}
+
 bool RISCVTTIImpl::isLegalMaskedCompressStore(Type *DataTy, Align Alignment) {
   auto *VTy = dyn_cast<VectorType>(DataTy);
   if (!VTy || VTy->isScalableTy())
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 9c37a4f6ec2d0..192bb35613aad 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -281,6 +281,8 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
     return TLI->isLegalStridedLoadStore(DataTypeVT, Alignment);
   }
 
+  bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment);
+
   bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment);
 
   bool isVScaleKnownToBeAPowerOfTwo() const {
diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
new file mode 100644
index 0000000000000..ae51beb027285
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
@@ -0,0 +1,1549 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefix=RV64
+; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefix=RV32
+
+; Load + expand for i8 type
+
+define <1 x i8> @test_expandload_v1i8(ptr %base, <1 x i1> %mask, <1 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v1i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v1i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <1 x i8> @llvm.masked.expandload.v1i8(ptr align 1 %base, <1 x i1> %mask, <1 x i8> %passthru)
+  ret <1 x i8> %res
+}
+
+define <1 x i8> @test_expandload_v1i8_all_ones(ptr %base, <1 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v1i8_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT:    vle8.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v1i8_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT:    vle8.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <1 x i8> @llvm.masked.expandload.v1i8(ptr align 1 %base, <1 x i1> splat (i1 true), <1 x i8> %passthru)
+  ret <1 x i8> %res
+}
+
+define <2 x i8> @test_expandload_v2i8(ptr %base, <2 x i1> %mask, <2 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v2i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v2i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <2 x i8> @llvm.masked.expandload.v2i8(ptr align 1 %base, <2 x i1> %mask, <2 x i8> %passthru)
+  ret <2 x i8> %res
+}
+
+define <2 x i8> @test_expandload_v2i8_all_ones(ptr %base, <2 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v2i8_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT:    vle8.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v2i8_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT:    vle8.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <2 x i8> @llvm.masked.expandload.v2i8(ptr align 1 %base, <2 x i1> splat (i1 true), <2 x i8> %passthru)
+  ret <2 x i8> %res
+}
+
+define <4 x i8> @test_expandload_v4i8(ptr %base, <4 x i1> %mask, <4 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v4i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v4i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <4 x i8> @llvm.masked.expandload.v4i8(ptr align 1 %base, <4 x i1> %mask, <4 x i8> %passthru)
+  ret <4 x i8> %res
+}
+
+define <4 x i8> @test_expandload_v4i8_all_ones(ptr %base, <4 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v4i8_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT:    vle8.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v4i8_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT:    vle8.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <4 x i8> @llvm.masked.expandload.v4i8(ptr align 1 %base, <4 x i1> splat (i1 true), <4 x i8> %passthru)
+  ret <4 x i8> %res
+}
+
+define <8 x i8> @test_expandload_v8i8(ptr %base, <8 x i1> %mask, <8 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v8i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v8i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <8 x i8> @llvm.masked.expandload.v8i8(ptr align 1 %base, <8 x i1> %mask, <8 x i8> %passthru)
+  ret <8 x i8> %res
+}
+
+define <8 x i8> @test_expandload_v8i8_all_ones(ptr %base, <8 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v8i8_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT:    vle8.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v8i8_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT:    vle8.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <8 x i8> @llvm.masked.expandload.v8i8(ptr align 1 %base, <8 x i1> splat (i1 true), <8 x i8> %passthru)
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @test_expandload_v16i8(ptr %base, <16 x i1> %mask, <16 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v16i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v16i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <16 x i8> @llvm.masked.expandload.v16i8(ptr align 1 %base, <16 x i1> %mask, <16 x i8> %passthru)
+  ret <16 x i8> %res
+}
+
+define <16 x i8> @test_expandload_v16i8_all_ones(ptr %base, <16 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v16i8_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT:    vle8.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v16i8_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT:    vle8.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <16 x i8> @llvm.masked.expandload.v16i8(ptr align 1 %base, <16 x i1> splat (i1 true), <16 x i8> %passthru)
+  ret <16 x i8> %res
+}
+
+define <32 x i8> @test_expandload_v32i8(ptr %base, <32 x i1> %mask, <32 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v32i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 32
+; RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; RV64-NEXT:    viota.m v10, v0
+; RV64-NEXT:    vluxei8.v v8, (a0), v10, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v32i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; RV32-NEXT:    viota.m v10, v0
+; RV32-NEXT:    vluxei8.v v8, (a0), v10, v0.t
+; RV32-NEXT:    ret
+  %res = call <32 x i8> @llvm.masked.expandload.v32i8(ptr align 1 %base, <32 x i1> %mask, <32 x i8> %passthru)
+  ret <32 x i8> %res
+}
+
+define <32 x i8> @test_expandload_v32i8_all_ones(ptr %base, <32 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v32i8_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 32
+; RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT:    vle8.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v32i8_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT:    vle8.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <32 x i8> @llvm.masked.expandload.v32i8(ptr align 1 %base, <32 x i1> splat (i1 true), <32 x i8> %passthru)
+  ret <32 x i8> %res
+}
+
+define <64 x i8> @test_expandload_v64i8(ptr %base, <64 x i1> %mask, <64 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v64i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 64
+; RV64-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; RV64-NEXT:    viota.m v12, v0
+; RV64-NEXT:    vluxei8.v v8, (a0), v12, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v64i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 64
+; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; RV32-NEXT:    viota.m v12, v0
+; RV32-NEXT:    vluxei8.v v8, (a0), v12, v0.t
+; RV32-NEXT:    ret
+  %res = call <64 x i8> @llvm.masked.expandload.v64i8(ptr align 1 %base, <64 x i1> %mask, <64 x i8> %passthru)
+  ret <64 x i8> %res
+}
+
+define <64 x i8> @test_expandload_v64i8_all_ones(ptr %base, <64 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v64i8_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 64
+; RV64-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT:    vle8.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v64i8_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 64
+; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT:    vle8.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <64 x i8> @llvm.masked.expandload.v64i8(ptr align 1 %base, <64 x i1> splat (i1 true), <64 x i8> %passthru)
+  ret <64 x i8> %res
+}
+
+define <128 x i8> @test_expandload_v128i8(ptr %base, <128 x i1> %mask, <128 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v128i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; RV64-NEXT:    viota.m v16, v0
+; RV64-NEXT:    vluxei8.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v128i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 128
+; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; RV32-NEXT:    viota.m v16, v0
+; RV32-NEXT:    vluxei8.v v8, (a0), v16, v0.t
+; RV32-NEXT:    ret
+  %res = call <128 x i8> @llvm.masked.expandload.v128i8(ptr align 1 %base, <128 x i1> %mask, <128 x i8> %passthru)
+  ret <128 x i8> %res
+}
+
+define <128 x i8> @test_expandload_v128i8_all_ones(ptr %base, <128 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v128i8_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vle8.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v128i8_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 128
+; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT:    vle8.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <128 x i8> @llvm.masked.expandload.v128i8(ptr align 1 %base, <128 x i1> splat (i1 true), <128 x i8> %passthru)
+  ret <128 x i8> %res
+}
+
+define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v256i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    slli a2, a2, 4
+; RV64-NEXT:    sub sp, sp, a2
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT:    vmv1r.v v9, v0
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vle8.v v16, (a1)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v0, 1
+; RV64-NEXT:    vmv.x.s a1, v10
+; RV64-NEXT:    vmv.x.s a3, v0
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
+; RV64-NEXT:    viota.m v24, v8
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, sp, a2
+; RV64-NEXT:    addi a2, a2, 16
+; RV64-NEXT:    vs8r.v v24, (a2) # Unknown-size Folded Spill
+; RV64-NEXT:    cpop a2, a3
+; RV64-NEXT:    cpop a1, a1
+; RV64-NEXT:    add a2, a0, a2
+; RV64-NEXT:    add a1, a2, a1
+; RV64-NEXT:    vmv1r.v v0, v8
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, sp, a2
+; RV64-NEXT:    addi a2, a2, 16
+; RV64-NEXT:    vl8r.v v24, (a2) # Unknown-size Folded Reload
+; RV64-NEXT:    vluxei8.v v16, (a1), v24, v0.t
+; RV64-NEXT:    viota.m v24, v9
+; RV64-NEXT:    vmv1r.v v0, v9
+; RV64-NEXT:    addi a1, sp, 16
+; RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT:    vluxei8.v v8, (a0), v24, v0.t
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 4
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v256i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    slli a2, a2, 4
+; RV32-NEXT:    sub sp, sp, a2
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT:    vmv1r.v v9, v0
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vle8.v v16, (a1)
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v0, 1
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsrl.vx v11, v10, a1
+; RV32-NEXT:    vmv.x.s a3, v11
+; RV32-NEXT:    vsrl.vx v11, v0, a1
+; RV32-NEXT:    vmv.x.s a1, v11
+; RV32-NEXT:    vmv.x.s a4, v10
+; RV32-NEXT:    vmv.x.s a5, v0
+; RV32-NEXT:    cpop a1, a1
+; RV32-NEXT:    cpop a5, a5
+; RV32-NEXT:    add a1, a5, a1
+; RV32-NEXT:    cpop a3, a3
+; RV32-NEXT:    cpop a4, a4
+; RV32-NEXT:    add a3, a4, a3
+; RV32-NEXT:    add a1, a1, a3
+; RV32-NEXT:    add a1, a0, a1
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
+; RV32-NEXT:    viota.m v24, v8
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    addi a2, a2, 16
+; RV32-NEXT:    vs8r.v v24, (a2) # Unknown-size Folded Spill
+; RV32-NEXT:    vmv1r.v v0, v8
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    addi a2, a2, 16
+; RV32-NEXT:    vl8r.v v24, (a2) # Unknown-size Folded Reload
+; RV32-NEXT:    vluxei8.v v16, (a1), v24, v0.t
+; RV32-NEXT:    viota.m v24, v9
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vluxei8.v v8, (a0), v24, v0.t
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+  %res = call <256 x i8> @llvm.masked.expandload.v256i8(ptr align 1 %base, <256 x i1> %mask, <256 x i8> %passthru)
+  ret <256 x i8> %res
+}
+
+define <256 x i8> @test_expandload_v256i8_all_ones(ptr %base, <256 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v256i8_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vle8.v v8, (a0)
+; RV64-NEXT:    vmset.m v16
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a2, v16
+; RV64-NEXT:    cpop a2, a2
+; RV64-NEXT:    vslidedown.vi v16, v16, 1
+; RV64-NEXT:    vmv.x.s a3, v16
+; RV64-NEXT:    cpop a3, a3
+; RV64-NEXT:    add a0, a0, a2
+; RV64-NEXT:    add a0, a0, a3
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vle8.v v16, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v256i8_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 128
+; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT:    vmset.m v8
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vsrl.vx v9, v8, a2
+; RV32-NEXT:    vmv.x.s a3, v9
+; RV32-NEXT:    cpop a3, a3
+; RV32-NEXT:    vmv.x.s a4, v8
+; RV32-NEXT:    cpop a4, a4
+; RV32-NEXT:    add a3, a4, a3
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vsrl.vx v9, v8, a2
+; RV32-NEXT:    vmv.x.s a2, v9
+; RV32-NEXT:    cpop a2, a2
+; RV32-NEXT:    vmv.x.s a4, v8
+; RV32-NEXT:    cpop a4, a4
+; RV32-NEXT:    add a2, a4, a2
+; RV32-NEXT:    add a3, a0, a3
+; RV32-NEXT:    add a2, a3, a2
+; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT:    vle8.v v16, (a2)
+; RV32-NEXT:    vle8.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <256 x i8> @llvm.masked.expandload.v256i8(ptr align 1 %base, <256 x i1> splat (i1 true), <256 x i8> %passthru)
+  ret <256 x i8> %res
+}
+
+declare <1 x i8> @llvm.masked.expandload.v1i8(ptr, <1 x i1>, <1 x i8>)
+declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>)
+declare <4 x i8> @llvm.masked.expandload.v4i8(ptr, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.expandload.v8i8(ptr, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.expandload.v16i8(ptr, <16 x i1>, <16 x i8>)
+declare <32 x i8> @llvm.masked.expandload.v32i8(ptr, <32 x i1>, <32 x i8>)
+declare <64 x i8> @llvm.masked.expandload.v64i8(ptr, <64 x i1>, <64 x i8>)
+declare <128 x i8> @llvm.masked.expandload.v128i8(ptr, <128 x i1>, <128 x i8>)
+declare <256 x i8> @llvm.masked.expandload.v256i8(ptr, <256 x i1>, <256 x i8>)
+
+; Compress + store for i16 type
+
+define <1 x i16> @test_expandload_v1i16(ptr %base, <1 x i1> %mask, <1 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v1i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v1i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr align 2 %base, <1 x i1> %mask, <1 x i16> %passthru)
+  ret <1 x i16> %res
+}
+
+define <1 x i16> @test_expandload_v1i16_all_ones(ptr %base, <1 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v1i16_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    vle16.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v1i16_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vle16.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr align 2 %base, <1 x i1> splat (i1 true), <1 x i16> %passthru)
+  ret <1 x i16> %res
+}
+
+define <2 x i16> @test_expandload_v2i16(ptr %base, <2 x i1> %mask, <2 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr align 2 %base, <2 x i1> %mask, <2 x i16> %passthru)
+  ret <2 x i16> %res
+}
+
+define <2 x i16> @test_expandload_v2i16_all_ones(ptr %base, <2 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v2i16_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-NEXT:    vle16.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v2i16_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT:    vle16.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr align 2 %base, <2 x i1> splat (i1 true), <2 x i16> %passthru)
+  ret <2 x i16> %res
+}
+
+define <4 x i16> @test_expandload_v4i16(ptr %base, <4 x i1> %mask, <4 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr align 2 %base, <4 x i1> %mask, <4 x i16> %passthru)
+  ret <4 x i16> %res
+}
+
+define <4 x i16> @test_expandload_v4i16_all_ones(ptr %base, <4 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v4i16_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT:    vle16.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v4i16_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT:    vle16.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr align 2 %base, <4 x i1> splat (i1 true), <4 x i16> %passthru)
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @test_expandload_v8i16(ptr %base, <8 x i1> %mask, <8 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr align 2 %base, <8 x i1> %mask, <8 x i16> %passthru)
+  ret <8 x i16> %res
+}
+
+define <8 x i16> @test_expandload_v8i16_all_ones(ptr %base, <8 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v8i16_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT:    vle16.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v8i16_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vle16.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr align 2 %base, <8 x i1> splat (i1 true), <8 x i16> %passthru)
+  ret <8 x i16> %res
+}
+
+define <16 x i16> @test_expandload_v16i16(ptr %base, <16 x i1> %mask, <16 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v16i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT:    viota.m v10, v0
+; RV64-NEXT:    vsll.vi v10, v10, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v10, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v16i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT:    viota.m v10, v0
+; RV32-NEXT:    vsll.vi v10, v10, 1, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v10, v0.t
+; RV32-NEXT:    ret
+  %res = call <16 x i16> @llvm.masked.expandload.v16i16(ptr align 2 %base, <16 x i1> %mask, <16 x i16> %passthru)
+  ret <16 x i16> %res
+}
+
+define <16 x i16> @test_expandload_v16i16_all_ones(ptr %base, <16 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v16i16_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT:    vle16.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v16i16_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT:    vle16.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <16 x i16> @llvm.masked.expandload.v16i16(ptr align 2 %base, <16 x i1> splat (i1 true), <16 x i16> %passthru)
+  ret <16 x i16> %res
+}
+
+define <32 x i16> @test_expandload_v32i16(ptr %base, <32 x i1> %mask, <32 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v32i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 32
+; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; RV64-NEXT:    viota.m v12, v0
+; RV64-NEXT:    vsll.vi v12, v12, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v12, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v32i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; RV32-NEXT:    viota.m v12, v0
+; RV32-NEXT:    vsll.vi v12, v12, 1, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v12, v0.t
+; RV32-NEXT:    ret
+  %res = call <32 x i16> @llvm.masked.expandload.v32i16(ptr align 2 %base, <32 x i1> %mask, <32 x i16> %passthru)
+  ret <32 x i16> %res
+}
+
+define <32 x i16> @test_expandload_v32i16_all_ones(ptr %base, <32 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v32i16_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 32
+; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; RV64-NEXT:    vle16.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v32i16_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; RV32-NEXT:    vle16.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <32 x i16> @llvm.masked.expandload.v32i16(ptr align 2 %base, <32 x i1> splat (i1 true), <32 x i16> %passthru)
+  ret <32 x i16> %res
+}
+
+define <64 x i16> @test_expandload_v64i16(ptr %base, <64 x i1> %mask, <64 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v64i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 64
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT:    viota.m v16, v0
+; RV64-NEXT:    vsll.vi v16, v16, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v64i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 64
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT:    viota.m v16, v0
+; RV32-NEXT:    vsll.vi v16, v16, 1, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v16, v0.t
+; RV32-NEXT:    ret
+  %res = call <64 x i16> @llvm.masked.expandload.v64i16(ptr align 2 %base, <64 x i1> %mask, <64 x i16> %passthru)
+  ret <64 x i16> %res
+}
+
+define <64 x i16> @test_expandload_v64i16_all_ones(ptr %base, <64 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v64i16_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 64
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT:    vle16.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v64i16_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 64
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT:    vle16.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <64 x i16> @llvm.masked.expandload.v64i16(ptr align 2 %base, <64 x i1> splat (i1 true), <64 x i16> %passthru)
+  ret <64 x i16> %res
+}
+
+define <128 x i16> @test_expandload_v128i16(ptr %base, <128 x i1> %mask, <128 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v128i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 4
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 16
+; RV64-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vmv1r.v v7, v0
+; RV64-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v0, v0, 8
+; RV64-NEXT:    li a1, 64
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT:    viota.m v16, v0
+; RV64-NEXT:    vsll.vi v16, v16, 1, v0.t
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a2, v7
+; RV64-NEXT:    cpop a2, a2
+; RV64-NEXT:    slli a2, a2, 1
+; RV64-NEXT:    add a2, a0, a2
+; RV64-NEXT:    csrr a3, vlenb
+; RV64-NEXT:    slli a3, a3, 3
+; RV64-NEXT:    add a3, sp, a3
+; RV64-NEXT:    addi a3, a3, 16
+; RV64-NEXT:    vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV64-NEXT:    addi a3, sp, 16
+; RV64-NEXT:    vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; RV64-NEXT:    vluxei16.v v16, (a2), v24, v0.t
+; RV64-NEXT:    viota.m v24, v7
+; RV64-NEXT:    vmv1r.v v0, v7
+; RV64-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
+; RV64-NEXT:    vsll.vi v24, v24, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v24, v0.t
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 4
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v128i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 4
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vmv1r.v v24, v0
+; RV32-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v0, v0, 8
+; RV32-NEXT:    li a1, 64
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT:    viota.m v8, v0
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vsrl.vx v8, v24, a2
+; RV32-NEXT:    vmv.x.s a2, v8
+; RV32-NEXT:    cpop a2, a2
+; RV32-NEXT:    vmv.x.s a3, v24
+; RV32-NEXT:    cpop a3, a3
+; RV32-NEXT:    add a2, a3, a2
+; RV32-NEXT:    slli a2, a2, 1
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    addi a3, sp, 16
+; RV32-NEXT:    vl8r.v v8, (a3) # Unknown-size Folded Reload
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; RV32-NEXT:    vluxei16.v v16, (a2), v8, v0.t
+; RV32-NEXT:    viota.m v8, v24
+; RV32-NEXT:    vmv1r.v v0, v24
+; RV32-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
+; RV32-NEXT:    vsll.vi v8, v8, 1, v0.t
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v24, v0.t
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+  %res = call <128 x i16> @llvm.masked.expandload.v128i16(ptr align 2 %base, <128 x i1> %mask, <128 x i16> %passthru)
+  ret <128 x i16> %res
+}
+
+define <128 x i16> @test_expandload_v128i16_all_ones(ptr %base, <128 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v128i16_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 64
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT:    vle16.v v8, (a0)
+; RV64-NEXT:    vmset.m v16
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a2, v16
+; RV64-NEXT:    cpop a2, a2
+; RV64-NEXT:    slli a2, a2, 1
+; RV64-NEXT:    add a0, a0, a2
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT:    vle16.v v16, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v128i16_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 64
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT:    vle16.v v8, (a0)
+; RV32-NEXT:    vmset.m v16
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vsrl.vx v17, v16, a2
+; RV32-NEXT:    vmv.x.s a2, v17
+; RV32-NEXT:    cpop a2, a2
+; RV32-NEXT:    vmv.x.s a3, v16
+; RV32-NEXT:    cpop a3, a3
+; RV32-NEXT:    add a2, a3, a2
+; RV32-NEXT:    slli a2, a2, 1
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT:    vle16.v v16, (a0)
+; RV32-NEXT:    ret
+  %res = call <128 x i16> @llvm.masked.expandload.v128i16(ptr align 2 %base, <128 x i1> splat (i1 true), <128 x i16> %passthru)
+  ret <128 x i16> %res
+}
+
+declare <1 x i16> @llvm.masked.expandload.v1i16(ptr, <1 x i1>, <1 x i16>)
+declare <2 x i16> @llvm.masked.expandload.v2i16(ptr, <2 x i1>, <2 x i16>)
+declare <4 x i16> @llvm.masked.expandload.v4i16(ptr, <4 x i1>, <4 x i16>)
+declare <8 x i16> @llvm.masked.expandload.v8i16(ptr, <8 x i1>, <8 x i16>)
+declare <16 x i16> @llvm.masked.expandload.v16i16(ptr, <16 x i1>, <16 x i16>)
+declare <32 x i16> @llvm.masked.expandload.v32i16(ptr, <32 x i1>, <32 x i16>)
+declare <64 x i16> @llvm.masked.expandload.v64i16(ptr, <64 x i1>, <64 x i16>)
+declare <128 x i16> @llvm.masked.expandload.v128i16(ptr, <128 x i1>, <128 x i16>)
+
+; Compress + store for i32 type
+
+define <1 x i32> @test_expandload_v1i32(ptr %base, <1 x i1> %mask, <1 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v1i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vluxei32.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v1i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <1 x i32> @llvm.masked.expandload.v1i32(ptr align 4 %base, <1 x i1> %mask, <1 x i32> %passthru)
+  ret <1 x i32> %res
+}
+
+define <1 x i32> @test_expandload_v1i32_all_ones(ptr %base, <1 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v1i32_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    vle32.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v1i32_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <1 x i32> @llvm.masked.expandload.v1i32(ptr align 4 %base, <1 x i1> splat (i1 true), <1 x i32> %passthru)
+  ret <1 x i32> %res
+}
+
+define <2 x i32> @test_expandload_v2i32(ptr %base, <2 x i1> %mask, <2 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vluxei32.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <2 x i32> @llvm.masked.expandload.v2i32(ptr align 4 %base, <2 x i1> %mask, <2 x i32> %passthru)
+  ret <2 x i32> %res
+}
+
+define <2 x i32> @test_expandload_v2i32_all_ones(ptr %base, <2 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v2i32_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-NEXT:    vle32.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v2i32_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <2 x i32> @llvm.masked.expandload.v2i32(ptr align 4 %base, <2 x i1> splat (i1 true), <2 x i32> %passthru)
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @test_expandload_v4i32(ptr %base, <4 x i1> %mask, <4 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; RV64-NEXT:    vluxei32.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <4 x i32> @llvm.masked.expandload.v4i32(ptr align 4 %base, <4 x i1> %mask, <4 x i32> %passthru)
+  ret <4 x i32> %res
+}
+
+define <4 x i32> @test_expandload_v4i32_all_ones(ptr %base, <4 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v4i32_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vle32.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v4i32_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <4 x i32> @llvm.masked.expandload.v4i32(ptr align 4 %base, <4 x i1> splat (i1 true), <4 x i32> %passthru)
+  ret <4 x i32> %res
+}
+
+define <8 x i32> @test_expandload_v8i32(ptr %base, <8 x i1> %mask, <8 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT:    viota.m v10, v0
+; RV64-NEXT:    vsll.vi v10, v10, 2, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT:    vluxei32.v v8, (a0), v10, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    viota.m v10, v0
+; RV32-NEXT:    vsll.vi v10, v10, 2, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v10, v0.t
+; RV32-NEXT:    ret
+  %res = call <8 x i32> @llvm.masked.expandload.v8i32(ptr align 4 %base, <8 x i1> %mask, <8 x i32> %passthru)
+  ret <8 x i32> %res
+}
+
+define <8 x i32> @test_expandload_v8i32_all_ones(ptr %base, <8 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v8i32_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT:    vle32.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v8i32_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <8 x i32> @llvm.masked.expandload.v8i32(ptr align 4 %base, <8 x i1> splat (i1 true), <8 x i32> %passthru)
+  ret <8 x i32> %res
+}
+
+define <16 x i32> @test_expandload_v16i32(ptr %base, <16 x i1> %mask, <16 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v16i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT:    viota.m v12, v0
+; RV64-NEXT:    vsll.vi v12, v12, 2, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV64-NEXT:    vluxei32.v v8, (a0), v12, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v16i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    viota.m v12, v0
+; RV32-NEXT:    vsll.vi v12, v12, 2, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT:    ret
+  %res = call <16 x i32> @llvm.masked.expandload.v16i32(ptr align 4 %base, <16 x i1> %mask, <16 x i32> %passthru)
+  ret <16 x i32> %res
+}
+
+define <16 x i32> @test_expandload_v16i32_all_ones(ptr %base, <16 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v16i32_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT:    vle32.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v16i32_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <16 x i32> @llvm.masked.expandload.v16i32(ptr align 4 %base, <16 x i1> splat (i1 true), <16 x i32> %passthru)
+  ret <16 x i32> %res
+}
+
+define <32 x i32> @test_expandload_v32i32(ptr %base, <32 x i1> %mask, <32 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v32i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 32
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT:    viota.m v16, v0
+; RV64-NEXT:    vsll.vi v16, v16, 2, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; RV64-NEXT:    vluxei32.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v32i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    viota.m v16, v0
+; RV32-NEXT:    vsll.vi v16, v16, 2, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT:    ret
+  %res = call <32 x i32> @llvm.masked.expandload.v32i32(ptr align 4 %base, <32 x i1> %mask, <32 x i32> %passthru)
+  ret <32 x i32> %res
+}
+
+define <32 x i32> @test_expandload_v32i32_all_ones(ptr %base, <32 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v32i32_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 32
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT:    vle32.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v32i32_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <32 x i32> @llvm.masked.expandload.v32i32(ptr align 4 %base, <32 x i1> splat (i1 true), <32 x i32> %passthru)
+  ret <32 x i32> %res
+}
+
+define <64 x i32> @test_expandload_v64i32(ptr %base, <64 x i1> %mask, <64 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v64i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 4
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 16
+; RV64-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vmv1r.v v7, v0
+; RV64-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v0, v0, 4
+; RV64-NEXT:    li a1, 32
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT:    viota.m v16, v0
+; RV64-NEXT:    vsll.vi v16, v16, 2, v0.t
+; RV64-NEXT:    addi a1, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vmv.x.s a1, v7
+; RV64-NEXT:    cpopw a1, a1
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    add a1, a0, a1
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, sp, a2
+; RV64-NEXT:    addi a2, a2, 16
+; RV64-NEXT:    vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vl8r.v v24, (a2) # Unknown-size Folded Reload
+; RV64-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; RV64-NEXT:    vluxei32.v v16, (a1), v24, v0.t
+; RV64-NEXT:    viota.m v24, v7
+; RV64-NEXT:    vmv1r.v v0, v7
+; RV64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; RV64-NEXT:    vsll.vi v24, v24, 2, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; RV64-NEXT:    vluxei32.v v8, (a0), v24, v0.t
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 4
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v64i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 4
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 16
+; RV32-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vmv1r.v v7, v0
+; RV32-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v0, v0, 4
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    viota.m v16, v0
+; RV32-NEXT:    vsll.vi v16, v16, 2, v0.t
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vmv.x.s a1, v7
+; RV32-NEXT:    cpop a1, a1
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    add a1, a0, a1
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    addi a2, a2, 16
+; RV32-NEXT:    vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vl8r.v v24, (a2) # Unknown-size Folded Reload
+; RV32-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v16, (a1), v24, v0.t
+; RV32-NEXT:    viota.m v24, v7
+; RV32-NEXT:    vmv1r.v v0, v7
+; RV32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; RV32-NEXT:    vsll.vi v24, v24, 2, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v24, v0.t
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+  %res = call <64 x i32> @llvm.masked.expandload.v64i32(ptr align 4 %base, <64 x i1> %mask, <64 x i32> %passthru)
+  ret <64 x i32> %res
+}
+
+define <64 x i32> @test_expandload_v64i32_all_ones(ptr %base, <64 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v64i32_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 32
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT:    vle32.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 128
+; RV64-NEXT:    vle32.v v16, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v64i32_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:    vmset.m v16
+; RV32-NEXT:    vmv.x.s a1, v16
+; RV32-NEXT:    cpop a1, a1
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    vle32.v v16, (a0)
+; RV32-NEXT:    ret
+  %res = call <64 x i32> @llvm.masked.expandload.v64i32(ptr align 4 %base, <64 x i1> splat (i1 true), <64 x i32> %passthru)
+  ret <64 x i32> %res
+}
+
+declare <1 x i32> @llvm.masked.expandload.v1i32(ptr, <1 x i1>, <1 x i32>)
+declare <2 x i32> @llvm.masked.expandload.v2i32(ptr, <2 x i1>, <2 x i32>)
+declare <4 x i32> @llvm.masked.expandload.v4i32(ptr, <4 x i1>, <4 x i32>)
+declare <8 x i32> @llvm.masked.expandload.v8i32(ptr, <8 x i1>, <8 x i32>)
+declare <16 x i32> @llvm.masked.expandload.v16i32(ptr, <16 x i1>, <16 x i32>)
+declare <32 x i32> @llvm.masked.expandload.v32i32(ptr, <32 x i1>, <32 x i32>)
+declare <64 x i32> @llvm.masked.expandload.v64i32(ptr, <64 x i1>, <64 x i32>)
+
+; Compress + store for i64 type
+
+define <1 x i64> @test_expandload_v1i64(ptr %base, <1 x i1> %mask, <1 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <1 x i64> @llvm.masked.expandload.v1i64(ptr align 8 %base, <1 x i1> %mask, <1 x i64> %passthru)
+  ret <1 x i64> %res
+}
+
+define <1 x i64> @test_expandload_v1i64_all_ones(ptr %base, <1 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v1i64_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v1i64_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <1 x i64> @llvm.masked.expandload.v1i64(ptr align 8 %base, <1 x i1> splat (i1 true), <1 x i64> %passthru)
+  ret <1 x i64> %res
+}
+
+define <2 x i64> @test_expandload_v2i64(ptr %base, <2 x i1> %mask, <2 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v9, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
+; RV32-NEXT:    ret
+  %res = call <2 x i64> @llvm.masked.expandload.v2i64(ptr align 8 %base, <2 x i1> %mask, <2 x i64> %passthru)
+  ret <2 x i64> %res
+}
+
+define <2 x i64> @test_expandload_v2i64_all_ones(ptr %base, <2 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v2i64_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v2i64_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <2 x i64> @llvm.masked.expandload.v2i64(ptr align 8 %base, <2 x i1> splat (i1 true), <2 x i64> %passthru)
+  ret <2 x i64> %res
+}
+
+define <4 x i64> @test_expandload_v4i64(ptr %base, <4 x i1> %mask, <4 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    viota.m v10, v0
+; RV64-NEXT:    vsll.vi v10, v10, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v10, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    viota.m v10, v0
+; RV32-NEXT:    vsll.vi v10, v10, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v10, v0.t
+; RV32-NEXT:    ret
+  %res = call <4 x i64> @llvm.masked.expandload.v4i64(ptr align 8 %base, <4 x i1> %mask, <4 x i64> %passthru)
+  ret <4 x i64> %res
+}
+
+define <4 x i64> @test_expandload_v4i64_all_ones(ptr %base, <4 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v4i64_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v4i64_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <4 x i64> @llvm.masked.expandload.v4i64(ptr align 8 %base, <4 x i1> splat (i1 true), <4 x i64> %passthru)
+  ret <4 x i64> %res
+}
+
+define <8 x i64> @test_expandload_v8i64(ptr %base, <8 x i1> %mask, <8 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    viota.m v12, v0
+; RV64-NEXT:    vsll.vi v12, v12, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v12, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    viota.m v12, v0
+; RV32-NEXT:    vsll.vi v12, v12, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT:    ret
+  %res = call <8 x i64> @llvm.masked.expandload.v8i64(ptr align 8 %base, <8 x i1> %mask, <8 x i64> %passthru)
+  ret <8 x i64> %res
+}
+
+define <8 x i64> @test_expandload_v8i64_all_ones(ptr %base, <8 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v8i64_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v8i64_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <8 x i64> @llvm.masked.expandload.v8i64(ptr align 8 %base, <8 x i1> splat (i1 true), <8 x i64> %passthru)
+  ret <8 x i64> %res
+}
+
+define <16 x i64> @test_expandload_v16i64(ptr %base, <16 x i1> %mask, <16 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v16i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    viota.m v16, v0
+; RV64-NEXT:    vsll.vi v16, v16, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v16i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    viota.m v16, v0
+; RV32-NEXT:    vsll.vi v16, v16, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT:    ret
+  %res = call <16 x i64> @llvm.masked.expandload.v16i64(ptr align 8 %base, <16 x i1> %mask, <16 x i64> %passthru)
+  ret <16 x i64> %res
+}
+
+define <16 x i64> @test_expandload_v16i64_all_ones(ptr %base, <16 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v16i64_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v16i64_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    ret
+  %res = call <16 x i64> @llvm.masked.expandload.v16i64(ptr align 8 %base, <16 x i1> splat (i1 true), <16 x i64> %passthru)
+  ret <16 x i64> %res
+}
+
+define <32 x i64> @test_expandload_v32i64(ptr %base, <32 x i1> %mask, <32 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v32i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 4
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 16
+; RV64-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vmv1r.v v7, v0
+; RV64-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT:    vslidedown.vi v0, v0, 2
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    viota.m v16, v0
+; RV64-NEXT:    vsll.vi v16, v16, 3, v0.t
+; RV64-NEXT:    addi a1, sp, 16
+; RV64-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v7
+; RV64-NEXT:    zext.h a1, a1
+; RV64-NEXT:    cpopw a1, a1
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    add a1, a0, a1
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, sp, a2
+; RV64-NEXT:    addi a2, a2, 16
+; RV64-NEXT:    vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vl8r.v v24, (a2) # Unknown-size Folded Reload
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT:    vluxei64.v v16, (a1), v24, v0.t
+; RV64-NEXT:    viota.m v24, v7
+; RV64-NEXT:    vmv1r.v v0, v7
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vsll.vi v24, v24, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v24, v0.t
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 4
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v32i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vmv1r.v v24, v0
+; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT:    vslidedown.vi v0, v0, 2
+; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT:    viota.m v28, v0
+; RV32-NEXT:    vsll.vi v28, v28, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v24
+; RV32-NEXT:    zext.h a1, a1
+; RV32-NEXT:    cpop a1, a1
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    add a1, a0, a1
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v16, (a1), v28, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT:    viota.m v28, v24
+; RV32-NEXT:    vmv1r.v v0, v24
+; RV32-NEXT:    vsll.vi v28, v28, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+  %res = call <32 x i64> @llvm.masked.expandload.v32i64(ptr align 8 %base, <32 x i1> %mask, <32 x i64> %passthru)
+  ret <32 x i64> %res
+}
+
+define <32 x i64> @test_expandload_v32i64_all_ones(ptr %base, <32 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v32i64_all_ones:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 128
+; RV64-NEXT:    vle64.v v16, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v32i64_all_ones:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 128
+; RV32-NEXT:    vle64.v v16, (a0)
+; RV32-NEXT:    ret
+  %res = call <32 x i64> @llvm.masked.expandload.v32i64(ptr align 8 %base, <32 x i1> splat (i1 true), <32 x i64> %passthru)
+  ret <32 x i64> %res
+}
+
+declare <1 x i64> @llvm.masked.expandload.v1i64(ptr, <1 x i1>, <1 x i64>)
+declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>)
+declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>)
+declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>)
+declare <16 x i64> @llvm.masked.expandload.v16i64(ptr, <16 x i1>, <16 x i64>)
+declare <32 x i64> @llvm.masked.expandload.v32i64(ptr, <32 x i1>, <32 x i64>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
index 8b31166e313de..a83036b2f9342 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
@@ -6,24 +6,20 @@ declare <1 x half> @llvm.masked.expandload.v1f16(ptr, <1 x i1>, <1 x half>)
 define <1 x half> @expandload_v1f16(ptr %base, <1 x half> %src0, <1 x i1> %mask) {
 ; RV32-LABEL: expandload_v1f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT:    vfirst.m a1, v0
-; RV32-NEXT:    bnez a1, .LBB0_2
-; RV32-NEXT:  # %bb.1: # %cond.load
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT:    vle16.v v8, (a0)
-; RV32-NEXT:  .LBB0_2: # %else
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v1f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT:    vfirst.m a1, v0
-; RV64-NEXT:    bnez a1, .LBB0_2
-; RV64-NEXT:  # %bb.1: # %cond.load
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-NEXT:    vle16.v v8, (a0)
-; RV64-NEXT:  .LBB0_2: # %else
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; RV64-NEXT:    ret
   %res = call <1 x half> @llvm.masked.expandload.v1f16(ptr align 2 %base, <1 x i1> %mask, <1 x half> %src0)
   ret <1 x half>%res
@@ -33,54 +29,20 @@ declare <2 x half> @llvm.masked.expandload.v2f16(ptr, <2 x i1>, <2 x half>)
 define <2 x half> @expandload_v2f16(ptr %base, <2 x half> %src0, <2 x i1> %mask) {
 ; RV32-LABEL: expandload_v2f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB1_3
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    bnez a1, .LBB1_4
-; RV32-NEXT:  .LBB1_2: # %else2
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB1_3: # %cond.load
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e16, m2, tu, ma
-; RV32-NEXT:    vfmv.s.f v8, fa5
-; RV32-NEXT:    addi a0, a0, 2
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    beqz a1, .LBB1_2
-; RV32-NEXT:  .LBB1_4: # %cond.load1
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v2f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB1_3
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    bnez a1, .LBB1_4
-; RV64-NEXT:  .LBB1_2: # %else2
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB1_3: # %cond.load
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e16, m2, tu, ma
-; RV64-NEXT:    vfmv.s.f v8, fa5
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    beqz a1, .LBB1_2
-; RV64-NEXT:  .LBB1_4: # %cond.load1
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; RV64-NEXT:    ret
   %res = call <2 x half> @llvm.masked.expandload.v2f16(ptr align 2 %base, <2 x i1> %mask, <2 x half> %src0)
   ret <2 x half>%res
@@ -90,98 +52,20 @@ declare <4 x half> @llvm.masked.expandload.v4f16(ptr, <4 x i1>, <4 x half>)
 define <4 x half> @expandload_v4f16(ptr %base, <4 x half> %src0, <4 x i1> %mask) {
 ; RV32-LABEL: expandload_v4f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB2_5
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    bnez a2, .LBB2_6
-; RV32-NEXT:  .LBB2_2: # %else2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    bnez a2, .LBB2_7
-; RV32-NEXT:  .LBB2_3: # %else6
-; RV32-NEXT:    andi a1, a1, 8
-; RV32-NEXT:    bnez a1, .LBB2_8
-; RV32-NEXT:  .LBB2_4: # %else10
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB2_5: # %cond.load
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e16, m2, tu, ma
-; RV32-NEXT:    vfmv.s.f v8, fa5
-; RV32-NEXT:    addi a0, a0, 2
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    beqz a2, .LBB2_2
-; RV32-NEXT:  .LBB2_6: # %cond.load1
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 1
-; RV32-NEXT:    addi a0, a0, 2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    beqz a2, .LBB2_3
-; RV32-NEXT:  .LBB2_7: # %cond.load5
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vslideup.vi v8, v9, 2
-; RV32-NEXT:    addi a0, a0, 2
-; RV32-NEXT:    andi a1, a1, 8
-; RV32-NEXT:    beqz a1, .LBB2_4
-; RV32-NEXT:  .LBB2_8: # %cond.load9
-; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vslideup.vi v8, v9, 3
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v4f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB2_5
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    bnez a2, .LBB2_6
-; RV64-NEXT:  .LBB2_2: # %else2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    bnez a2, .LBB2_7
-; RV64-NEXT:  .LBB2_3: # %else6
-; RV64-NEXT:    andi a1, a1, 8
-; RV64-NEXT:    bnez a1, .LBB2_8
-; RV64-NEXT:  .LBB2_4: # %else10
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB2_5: # %cond.load
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e16, m2, tu, ma
-; RV64-NEXT:    vfmv.s.f v8, fa5
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    beqz a2, .LBB2_2
-; RV64-NEXT:  .LBB2_6: # %cond.load1
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v9, 1
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    beqz a2, .LBB2_3
-; RV64-NEXT:  .LBB2_7: # %cond.load5
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vslideup.vi v8, v9, 2
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    andi a1, a1, 8
-; RV64-NEXT:    beqz a1, .LBB2_4
-; RV64-NEXT:  .LBB2_8: # %cond.load9
-; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vslideup.vi v8, v9, 3
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; RV64-NEXT:    ret
   %res = call <4 x half> @llvm.masked.expandload.v4f16(ptr align 2 %base, <4 x i1> %mask, <4 x half> %src0)
   ret <4 x half>%res
@@ -191,186 +75,20 @@ declare <8 x half> @llvm.masked.expandload.v8f16(ptr, <8 x i1>, <8 x half>)
 define <8 x half> @expandload_v8f16(ptr %base, <8 x half> %src0, <8 x i1> %mask) {
 ; RV32-LABEL: expandload_v8f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB3_9
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    bnez a2, .LBB3_10
-; RV32-NEXT:  .LBB3_2: # %else2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    bnez a2, .LBB3_11
-; RV32-NEXT:  .LBB3_3: # %else6
-; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    bnez a2, .LBB3_12
-; RV32-NEXT:  .LBB3_4: # %else10
-; RV32-NEXT:    andi a2, a1, 16
-; RV32-NEXT:    bnez a2, .LBB3_13
-; RV32-NEXT:  .LBB3_5: # %else14
-; RV32-NEXT:    andi a2, a1, 32
-; RV32-NEXT:    bnez a2, .LBB3_14
-; RV32-NEXT:  .LBB3_6: # %else18
-; RV32-NEXT:    andi a2, a1, 64
-; RV32-NEXT:    bnez a2, .LBB3_15
-; RV32-NEXT:  .LBB3_7: # %else22
-; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    bnez a1, .LBB3_16
-; RV32-NEXT:  .LBB3_8: # %else26
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB3_9: # %cond.load
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e16, m2, tu, ma
-; RV32-NEXT:    vfmv.s.f v8, fa5
-; RV32-NEXT:    addi a0, a0, 2
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    beqz a2, .LBB3_2
-; RV32-NEXT:  .LBB3_10: # %cond.load1
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 1
-; RV32-NEXT:    addi a0, a0, 2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    beqz a2, .LBB3_3
-; RV32-NEXT:  .LBB3_11: # %cond.load5
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vslideup.vi v8, v9, 2
-; RV32-NEXT:    addi a0, a0, 2
-; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    beqz a2, .LBB3_4
-; RV32-NEXT:  .LBB3_12: # %cond.load9
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vslideup.vi v8, v9, 3
-; RV32-NEXT:    addi a0, a0, 2
-; RV32-NEXT:    andi a2, a1, 16
-; RV32-NEXT:    beqz a2, .LBB3_5
-; RV32-NEXT:  .LBB3_13: # %cond.load13
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vslideup.vi v8, v9, 4
-; RV32-NEXT:    addi a0, a0, 2
-; RV32-NEXT:    andi a2, a1, 32
-; RV32-NEXT:    beqz a2, .LBB3_6
-; RV32-NEXT:  .LBB3_14: # %cond.load17
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vslideup.vi v8, v9, 5
-; RV32-NEXT:    addi a0, a0, 2
-; RV32-NEXT:    andi a2, a1, 64
-; RV32-NEXT:    beqz a2, .LBB3_7
-; RV32-NEXT:  .LBB3_15: # %cond.load21
-; RV32-NEXT:    flh fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vslideup.vi v8, v9, 6
-; RV32-NEXT:    addi a0, a0, 2
-; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    beqz a1, .LBB3_8
-; RV32-NEXT:  .LBB3_16: # %cond.load25
-; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vslideup.vi v8, v9, 7
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v8f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB3_9
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    bnez a2, .LBB3_10
-; RV64-NEXT:  .LBB3_2: # %else2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    bnez a2, .LBB3_11
-; RV64-NEXT:  .LBB3_3: # %else6
-; RV64-NEXT:    andi a2, a1, 8
-; RV64-NEXT:    bnez a2, .LBB3_12
-; RV64-NEXT:  .LBB3_4: # %else10
-; RV64-NEXT:    andi a2, a1, 16
-; RV64-NEXT:    bnez a2, .LBB3_13
-; RV64-NEXT:  .LBB3_5: # %else14
-; RV64-NEXT:    andi a2, a1, 32
-; RV64-NEXT:    bnez a2, .LBB3_14
-; RV64-NEXT:  .LBB3_6: # %else18
-; RV64-NEXT:    andi a2, a1, 64
-; RV64-NEXT:    bnez a2, .LBB3_15
-; RV64-NEXT:  .LBB3_7: # %else22
-; RV64-NEXT:    andi a1, a1, -128
-; RV64-NEXT:    bnez a1, .LBB3_16
-; RV64-NEXT:  .LBB3_8: # %else26
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB3_9: # %cond.load
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e16, m2, tu, ma
-; RV64-NEXT:    vfmv.s.f v8, fa5
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    beqz a2, .LBB3_2
-; RV64-NEXT:  .LBB3_10: # %cond.load1
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v9, 1
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    beqz a2, .LBB3_3
-; RV64-NEXT:  .LBB3_11: # %cond.load5
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vslideup.vi v8, v9, 2
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    andi a2, a1, 8
-; RV64-NEXT:    beqz a2, .LBB3_4
-; RV64-NEXT:  .LBB3_12: # %cond.load9
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vslideup.vi v8, v9, 3
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    andi a2, a1, 16
-; RV64-NEXT:    beqz a2, .LBB3_5
-; RV64-NEXT:  .LBB3_13: # %cond.load13
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vslideup.vi v8, v9, 4
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    andi a2, a1, 32
-; RV64-NEXT:    beqz a2, .LBB3_6
-; RV64-NEXT:  .LBB3_14: # %cond.load17
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vslideup.vi v8, v9, 5
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    andi a2, a1, 64
-; RV64-NEXT:    beqz a2, .LBB3_7
-; RV64-NEXT:  .LBB3_15: # %cond.load21
-; RV64-NEXT:    flh fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vslideup.vi v8, v9, 6
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    andi a1, a1, -128
-; RV64-NEXT:    beqz a1, .LBB3_8
-; RV64-NEXT:  .LBB3_16: # %cond.load25
-; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vslideup.vi v8, v9, 7
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 1, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; RV64-NEXT:    ret
   %res = call <8 x half> @llvm.masked.expandload.v8f16(ptr align 2 %base, <8 x i1> %mask, <8 x half> %src0)
   ret <8 x half>%res
@@ -380,24 +98,20 @@ declare <1 x float> @llvm.masked.expandload.v1f32(ptr, <1 x i1>, <1 x float>)
 define <1 x float> @expandload_v1f32(ptr %base, <1 x float> %src0, <1 x i1> %mask) {
 ; RV32-LABEL: expandload_v1f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT:    vfirst.m a1, v0
-; RV32-NEXT:    bnez a1, .LBB4_2
-; RV32-NEXT:  # %bb.1: # %cond.load
 ; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:  .LBB4_2: # %else
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v1f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT:    vfirst.m a1, v0
-; RV64-NEXT:    bnez a1, .LBB4_2
-; RV64-NEXT:  # %bb.1: # %cond.load
 ; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT:    vle32.v v8, (a0)
-; RV64-NEXT:  .LBB4_2: # %else
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; RV64-NEXT:    ret
   %res = call <1 x float> @llvm.masked.expandload.v1f32(ptr align 4 %base, <1 x i1> %mask, <1 x float> %src0)
   ret <1 x float>%res
@@ -407,54 +121,20 @@ declare <2 x float> @llvm.masked.expandload.v2f32(ptr, <2 x i1>, <2 x float>)
 define <2 x float> @expandload_v2f32(ptr %base, <2 x float> %src0, <2 x i1> %mask) {
 ; RV32-LABEL: expandload_v2f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB5_3
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    bnez a1, .LBB5_4
-; RV32-NEXT:  .LBB5_2: # %else2
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB5_3: # %cond.load
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e32, m4, tu, ma
-; RV32-NEXT:    vfmv.s.f v8, fa5
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    beqz a1, .LBB5_2
-; RV32-NEXT:  .LBB5_4: # %cond.load1
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v2f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB5_3
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    bnez a1, .LBB5_4
-; RV64-NEXT:  .LBB5_2: # %else2
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB5_3: # %cond.load
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e32, m4, tu, ma
-; RV64-NEXT:    vfmv.s.f v8, fa5
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    beqz a1, .LBB5_2
-; RV64-NEXT:  .LBB5_4: # %cond.load1
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; RV64-NEXT:    ret
   %res = call <2 x float> @llvm.masked.expandload.v2f32(ptr align 4 %base, <2 x i1> %mask, <2 x float> %src0)
   ret <2 x float>%res
@@ -464,98 +144,20 @@ declare <4 x float> @llvm.masked.expandload.v4f32(ptr, <4 x i1>, <4 x float>)
 define <4 x float> @expandload_v4f32(ptr %base, <4 x float> %src0, <4 x i1> %mask) {
 ; RV32-LABEL: expandload_v4f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB6_5
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    bnez a2, .LBB6_6
-; RV32-NEXT:  .LBB6_2: # %else2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    bnez a2, .LBB6_7
-; RV32-NEXT:  .LBB6_3: # %else6
-; RV32-NEXT:    andi a1, a1, 8
-; RV32-NEXT:    bnez a1, .LBB6_8
-; RV32-NEXT:  .LBB6_4: # %else10
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB6_5: # %cond.load
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e32, m4, tu, ma
-; RV32-NEXT:    vfmv.s.f v8, fa5
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    beqz a2, .LBB6_2
-; RV32-NEXT:  .LBB6_6: # %cond.load1
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 1
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    beqz a2, .LBB6_3
-; RV32-NEXT:  .LBB6_7: # %cond.load5
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vslideup.vi v8, v9, 2
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a1, a1, 8
-; RV32-NEXT:    beqz a1, .LBB6_4
-; RV32-NEXT:  .LBB6_8: # %cond.load9
-; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vslideup.vi v8, v9, 3
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v4f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB6_5
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    bnez a2, .LBB6_6
-; RV64-NEXT:  .LBB6_2: # %else2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    bnez a2, .LBB6_7
-; RV64-NEXT:  .LBB6_3: # %else6
-; RV64-NEXT:    andi a1, a1, 8
-; RV64-NEXT:    bnez a1, .LBB6_8
-; RV64-NEXT:  .LBB6_4: # %else10
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB6_5: # %cond.load
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e32, m4, tu, ma
-; RV64-NEXT:    vfmv.s.f v8, fa5
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    beqz a2, .LBB6_2
-; RV64-NEXT:  .LBB6_6: # %cond.load1
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v9, 1
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    beqz a2, .LBB6_3
-; RV64-NEXT:  .LBB6_7: # %cond.load5
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vslideup.vi v8, v9, 2
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a1, a1, 8
-; RV64-NEXT:    beqz a1, .LBB6_4
-; RV64-NEXT:  .LBB6_8: # %cond.load9
-; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
-; RV64-NEXT:    vslideup.vi v8, v9, 3
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 2, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; RV64-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; RV64-NEXT:    ret
   %res = call <4 x float> @llvm.masked.expandload.v4f32(ptr align 4 %base, <4 x i1> %mask, <4 x float> %src0)
   ret <4 x float>%res
@@ -565,186 +167,20 @@ declare <8 x float> @llvm.masked.expandload.v8f32(ptr, <8 x i1>, <8 x float>)
 define <8 x float> @expandload_v8f32(ptr %base, <8 x float> %src0, <8 x i1> %mask) {
 ; RV32-LABEL: expandload_v8f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB7_9
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    bnez a2, .LBB7_10
-; RV32-NEXT:  .LBB7_2: # %else2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    bnez a2, .LBB7_11
-; RV32-NEXT:  .LBB7_3: # %else6
-; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    bnez a2, .LBB7_12
-; RV32-NEXT:  .LBB7_4: # %else10
-; RV32-NEXT:    andi a2, a1, 16
-; RV32-NEXT:    bnez a2, .LBB7_13
-; RV32-NEXT:  .LBB7_5: # %else14
-; RV32-NEXT:    andi a2, a1, 32
-; RV32-NEXT:    bnez a2, .LBB7_14
-; RV32-NEXT:  .LBB7_6: # %else18
-; RV32-NEXT:    andi a2, a1, 64
-; RV32-NEXT:    bnez a2, .LBB7_15
-; RV32-NEXT:  .LBB7_7: # %else22
-; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    bnez a1, .LBB7_16
-; RV32-NEXT:  .LBB7_8: # %else26
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB7_9: # %cond.load
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e32, m4, tu, ma
-; RV32-NEXT:    vfmv.s.f v8, fa5
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    beqz a2, .LBB7_2
-; RV32-NEXT:  .LBB7_10: # %cond.load1
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT:    vfmv.s.f v10, fa5
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v10, 1
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    beqz a2, .LBB7_3
-; RV32-NEXT:  .LBB7_11: # %cond.load5
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; RV32-NEXT:    vfmv.s.f v10, fa5
-; RV32-NEXT:    vslideup.vi v8, v10, 2
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    beqz a2, .LBB7_4
-; RV32-NEXT:  .LBB7_12: # %cond.load9
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
-; RV32-NEXT:    vfmv.s.f v10, fa5
-; RV32-NEXT:    vslideup.vi v8, v10, 3
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 16
-; RV32-NEXT:    beqz a2, .LBB7_5
-; RV32-NEXT:  .LBB7_13: # %cond.load13
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
-; RV32-NEXT:    vfmv.s.f v10, fa5
-; RV32-NEXT:    vslideup.vi v8, v10, 4
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 32
-; RV32-NEXT:    beqz a2, .LBB7_6
-; RV32-NEXT:  .LBB7_14: # %cond.load17
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
-; RV32-NEXT:    vfmv.s.f v10, fa5
-; RV32-NEXT:    vslideup.vi v8, v10, 5
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 64
-; RV32-NEXT:    beqz a2, .LBB7_7
-; RV32-NEXT:  .LBB7_15: # %cond.load21
-; RV32-NEXT:    flw fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
-; RV32-NEXT:    vfmv.s.f v10, fa5
-; RV32-NEXT:    vslideup.vi v8, v10, 6
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    beqz a1, .LBB7_8
-; RV32-NEXT:  .LBB7_16: # %cond.load25
-; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vfmv.s.f v10, fa5
-; RV32-NEXT:    vslideup.vi v8, v10, 7
+; RV32-NEXT:    viota.m v10, v0
+; RV32-NEXT:    vsll.vi v10, v10, 2, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v10, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v8f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB7_9
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    bnez a2, .LBB7_10
-; RV64-NEXT:  .LBB7_2: # %else2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    bnez a2, .LBB7_11
-; RV64-NEXT:  .LBB7_3: # %else6
-; RV64-NEXT:    andi a2, a1, 8
-; RV64-NEXT:    bnez a2, .LBB7_12
-; RV64-NEXT:  .LBB7_4: # %else10
-; RV64-NEXT:    andi a2, a1, 16
-; RV64-NEXT:    bnez a2, .LBB7_13
-; RV64-NEXT:  .LBB7_5: # %else14
-; RV64-NEXT:    andi a2, a1, 32
-; RV64-NEXT:    bnez a2, .LBB7_14
-; RV64-NEXT:  .LBB7_6: # %else18
-; RV64-NEXT:    andi a2, a1, 64
-; RV64-NEXT:    bnez a2, .LBB7_15
-; RV64-NEXT:  .LBB7_7: # %else22
-; RV64-NEXT:    andi a1, a1, -128
-; RV64-NEXT:    bnez a1, .LBB7_16
-; RV64-NEXT:  .LBB7_8: # %else26
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB7_9: # %cond.load
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e32, m4, tu, ma
-; RV64-NEXT:    vfmv.s.f v8, fa5
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    beqz a2, .LBB7_2
-; RV64-NEXT:  .LBB7_10: # %cond.load1
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT:    vfmv.s.f v10, fa5
-; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v10, 1
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    beqz a2, .LBB7_3
-; RV64-NEXT:  .LBB7_11: # %cond.load5
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; RV64-NEXT:    vfmv.s.f v10, fa5
-; RV64-NEXT:    vslideup.vi v8, v10, 2
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 8
-; RV64-NEXT:    beqz a2, .LBB7_4
-; RV64-NEXT:  .LBB7_12: # %cond.load9
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
-; RV64-NEXT:    vfmv.s.f v10, fa5
-; RV64-NEXT:    vslideup.vi v8, v10, 3
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 16
-; RV64-NEXT:    beqz a2, .LBB7_5
-; RV64-NEXT:  .LBB7_13: # %cond.load13
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
-; RV64-NEXT:    vfmv.s.f v10, fa5
-; RV64-NEXT:    vslideup.vi v8, v10, 4
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 32
-; RV64-NEXT:    beqz a2, .LBB7_6
-; RV64-NEXT:  .LBB7_14: # %cond.load17
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
-; RV64-NEXT:    vfmv.s.f v10, fa5
-; RV64-NEXT:    vslideup.vi v8, v10, 5
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 64
-; RV64-NEXT:    beqz a2, .LBB7_7
-; RV64-NEXT:  .LBB7_15: # %cond.load21
-; RV64-NEXT:    flw fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
-; RV64-NEXT:    vfmv.s.f v10, fa5
-; RV64-NEXT:    vslideup.vi v8, v10, 6
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a1, a1, -128
-; RV64-NEXT:    beqz a1, .LBB7_8
-; RV64-NEXT:  .LBB7_16: # %cond.load25
-; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV64-NEXT:    vfmv.s.f v10, fa5
-; RV64-NEXT:    vslideup.vi v8, v10, 7
+; RV64-NEXT:    viota.m v10, v0
+; RV64-NEXT:    vsll.vi v10, v10, 2, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT:    vluxei32.v v8, (a0), v10, v0.t
 ; RV64-NEXT:    ret
   %res = call <8 x float> @llvm.masked.expandload.v8f32(ptr align 4 %base, <8 x i1> %mask, <8 x float> %src0)
   ret <8 x float>%res
@@ -754,24 +190,20 @@ declare <1 x double> @llvm.masked.expandload.v1f64(ptr, <1 x i1>, <1 x double>)
 define <1 x double> @expandload_v1f64(ptr %base, <1 x double> %src0, <1 x i1> %mask) {
 ; RV32-LABEL: expandload_v1f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT:    vfirst.m a1, v0
-; RV32-NEXT:    bnez a1, .LBB8_2
-; RV32-NEXT:  # %bb.1: # %cond.load
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:  .LBB8_2: # %else
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v1f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT:    vfirst.m a1, v0
-; RV64-NEXT:    bnez a1, .LBB8_2
-; RV64-NEXT:  # %bb.1: # %cond.load
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:  .LBB8_2: # %else
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v9, v0.t
 ; RV64-NEXT:    ret
   %res = call <1 x double> @llvm.masked.expandload.v1f64(ptr align 8 %base, <1 x i1> %mask, <1 x double> %src0)
   ret <1 x double>%res
@@ -781,54 +213,20 @@ declare <2 x double> @llvm.masked.expandload.v2f64(ptr, <2 x i1>, <2 x double>)
 define <2 x double> @expandload_v2f64(ptr %base, <2 x double> %src0, <2 x i1> %mask) {
 ; RV32-LABEL: expandload_v2f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB9_3
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    bnez a1, .LBB9_4
-; RV32-NEXT:  .LBB9_2: # %else2
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB9_3: # %cond.load
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
-; RV32-NEXT:    vfmv.s.f v8, fa5
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    beqz a1, .LBB9_2
-; RV32-NEXT:  .LBB9_4: # %cond.load1
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT:    vfmv.s.f v9, fa5
-; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v2f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB9_3
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    bnez a1, .LBB9_4
-; RV64-NEXT:  .LBB9_2: # %else2
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB9_3: # %cond.load
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT:    vfmv.s.f v8, fa5
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    beqz a1, .LBB9_2
-; RV64-NEXT:  .LBB9_4: # %cond.load1
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v9, v0.t
 ; RV64-NEXT:    ret
   %res = call <2 x double> @llvm.masked.expandload.v2f64(ptr align 8 %base, <2 x i1> %mask, <2 x double> %src0)
   ret <2 x double>%res
@@ -838,98 +236,20 @@ declare <4 x double> @llvm.masked.expandload.v4f64(ptr, <4 x i1>, <4 x double>)
 define <4 x double> @expandload_v4f64(ptr %base, <4 x double> %src0, <4 x i1> %mask) {
 ; RV32-LABEL: expandload_v4f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB10_5
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    bnez a2, .LBB10_6
-; RV32-NEXT:  .LBB10_2: # %else2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    bnez a2, .LBB10_7
-; RV32-NEXT:  .LBB10_3: # %else6
-; RV32-NEXT:    andi a1, a1, 8
-; RV32-NEXT:    bnez a1, .LBB10_8
-; RV32-NEXT:  .LBB10_4: # %else10
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB10_5: # %cond.load
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
-; RV32-NEXT:    vfmv.s.f v8, fa5
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    beqz a2, .LBB10_2
-; RV32-NEXT:  .LBB10_6: # %cond.load1
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT:    vfmv.s.f v10, fa5
-; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v10, 1
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    beqz a2, .LBB10_3
-; RV32-NEXT:  .LBB10_7: # %cond.load5
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; RV32-NEXT:    vfmv.s.f v10, fa5
-; RV32-NEXT:    vslideup.vi v8, v10, 2
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a1, a1, 8
-; RV32-NEXT:    beqz a1, .LBB10_4
-; RV32-NEXT:  .LBB10_8: # %cond.load9
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT:    vfmv.s.f v10, fa5
-; RV32-NEXT:    vslideup.vi v8, v10, 3
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    viota.m v10, v0
+; RV32-NEXT:    vsll.vi v10, v10, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v10, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v4f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB10_5
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    bnez a2, .LBB10_6
-; RV64-NEXT:  .LBB10_2: # %else2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    bnez a2, .LBB10_7
-; RV64-NEXT:  .LBB10_3: # %else6
-; RV64-NEXT:    andi a1, a1, 8
-; RV64-NEXT:    bnez a1, .LBB10_8
-; RV64-NEXT:  .LBB10_4: # %else10
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB10_5: # %cond.load
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT:    vfmv.s.f v8, fa5
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    beqz a2, .LBB10_2
-; RV64-NEXT:  .LBB10_6: # %cond.load1
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vfmv.s.f v10, fa5
-; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v10, 1
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    beqz a2, .LBB10_3
-; RV64-NEXT:  .LBB10_7: # %cond.load5
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; RV64-NEXT:    vfmv.s.f v10, fa5
-; RV64-NEXT:    vslideup.vi v8, v10, 2
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a1, a1, 8
-; RV64-NEXT:    beqz a1, .LBB10_4
-; RV64-NEXT:  .LBB10_8: # %cond.load9
-; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vfmv.s.f v10, fa5
-; RV64-NEXT:    vslideup.vi v8, v10, 3
+; RV64-NEXT:    viota.m v10, v0
+; RV64-NEXT:    vsll.vi v10, v10, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v10, v0.t
 ; RV64-NEXT:    ret
   %res = call <4 x double> @llvm.masked.expandload.v4f64(ptr align 8 %base, <4 x i1> %mask, <4 x double> %src0)
   ret <4 x double>%res
@@ -939,186 +259,20 @@ declare <8 x double> @llvm.masked.expandload.v8f64(ptr, <8 x i1>, <8 x double>)
 define <8 x double> @expandload_v8f64(ptr %base, <8 x double> %src0, <8 x i1> %mask) {
 ; RV32-LABEL: expandload_v8f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB11_9
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    bnez a2, .LBB11_10
-; RV32-NEXT:  .LBB11_2: # %else2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    bnez a2, .LBB11_11
-; RV32-NEXT:  .LBB11_3: # %else6
-; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    bnez a2, .LBB11_12
-; RV32-NEXT:  .LBB11_4: # %else10
-; RV32-NEXT:    andi a2, a1, 16
-; RV32-NEXT:    bnez a2, .LBB11_13
-; RV32-NEXT:  .LBB11_5: # %else14
-; RV32-NEXT:    andi a2, a1, 32
-; RV32-NEXT:    bnez a2, .LBB11_14
-; RV32-NEXT:  .LBB11_6: # %else18
-; RV32-NEXT:    andi a2, a1, 64
-; RV32-NEXT:    bnez a2, .LBB11_15
-; RV32-NEXT:  .LBB11_7: # %else22
-; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    bnez a1, .LBB11_16
-; RV32-NEXT:  .LBB11_8: # %else26
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB11_9: # %cond.load
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
-; RV32-NEXT:    vfmv.s.f v8, fa5
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    beqz a2, .LBB11_2
-; RV32-NEXT:  .LBB11_10: # %cond.load1
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT:    vfmv.s.f v12, fa5
-; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v12, 1
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    beqz a2, .LBB11_3
-; RV32-NEXT:  .LBB11_11: # %cond.load5
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; RV32-NEXT:    vfmv.s.f v12, fa5
-; RV32-NEXT:    vslideup.vi v8, v12, 2
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    beqz a2, .LBB11_4
-; RV32-NEXT:  .LBB11_12: # %cond.load9
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
-; RV32-NEXT:    vfmv.s.f v12, fa5
-; RV32-NEXT:    vslideup.vi v8, v12, 3
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 16
-; RV32-NEXT:    beqz a2, .LBB11_5
-; RV32-NEXT:  .LBB11_13: # %cond.load13
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
-; RV32-NEXT:    vfmv.s.f v12, fa5
-; RV32-NEXT:    vslideup.vi v8, v12, 4
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 32
-; RV32-NEXT:    beqz a2, .LBB11_6
-; RV32-NEXT:  .LBB11_14: # %cond.load17
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
-; RV32-NEXT:    vfmv.s.f v12, fa5
-; RV32-NEXT:    vslideup.vi v8, v12, 5
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 64
-; RV32-NEXT:    beqz a2, .LBB11_7
-; RV32-NEXT:  .LBB11_15: # %cond.load21
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
-; RV32-NEXT:    vfmv.s.f v12, fa5
-; RV32-NEXT:    vslideup.vi v8, v12, 6
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    beqz a1, .LBB11_8
-; RV32-NEXT:  .LBB11_16: # %cond.load25
-; RV32-NEXT:    fld fa5, 0(a0)
-; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT:    vfmv.s.f v12, fa5
-; RV32-NEXT:    vslideup.vi v8, v12, 7
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    viota.m v12, v0
+; RV32-NEXT:    vsll.vi v12, v12, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v8f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB11_9
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    bnez a2, .LBB11_10
-; RV64-NEXT:  .LBB11_2: # %else2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    bnez a2, .LBB11_11
-; RV64-NEXT:  .LBB11_3: # %else6
-; RV64-NEXT:    andi a2, a1, 8
-; RV64-NEXT:    bnez a2, .LBB11_12
-; RV64-NEXT:  .LBB11_4: # %else10
-; RV64-NEXT:    andi a2, a1, 16
-; RV64-NEXT:    bnez a2, .LBB11_13
-; RV64-NEXT:  .LBB11_5: # %else14
-; RV64-NEXT:    andi a2, a1, 32
-; RV64-NEXT:    bnez a2, .LBB11_14
-; RV64-NEXT:  .LBB11_6: # %else18
-; RV64-NEXT:    andi a2, a1, 64
-; RV64-NEXT:    bnez a2, .LBB11_15
-; RV64-NEXT:  .LBB11_7: # %else22
-; RV64-NEXT:    andi a1, a1, -128
-; RV64-NEXT:    bnez a1, .LBB11_16
-; RV64-NEXT:  .LBB11_8: # %else26
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB11_9: # %cond.load
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT:    vfmv.s.f v8, fa5
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    beqz a2, .LBB11_2
-; RV64-NEXT:  .LBB11_10: # %cond.load1
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vfmv.s.f v12, fa5
-; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v12, 1
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    beqz a2, .LBB11_3
-; RV64-NEXT:  .LBB11_11: # %cond.load5
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; RV64-NEXT:    vfmv.s.f v12, fa5
-; RV64-NEXT:    vslideup.vi v8, v12, 2
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 8
-; RV64-NEXT:    beqz a2, .LBB11_4
-; RV64-NEXT:  .LBB11_12: # %cond.load9
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
-; RV64-NEXT:    vfmv.s.f v12, fa5
-; RV64-NEXT:    vslideup.vi v8, v12, 3
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 16
-; RV64-NEXT:    beqz a2, .LBB11_5
-; RV64-NEXT:  .LBB11_13: # %cond.load13
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT:    vfmv.s.f v12, fa5
-; RV64-NEXT:    vslideup.vi v8, v12, 4
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 32
-; RV64-NEXT:    beqz a2, .LBB11_6
-; RV64-NEXT:  .LBB11_14: # %cond.load17
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
-; RV64-NEXT:    vfmv.s.f v12, fa5
-; RV64-NEXT:    vslideup.vi v8, v12, 5
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 64
-; RV64-NEXT:    beqz a2, .LBB11_7
-; RV64-NEXT:  .LBB11_15: # %cond.load21
-; RV64-NEXT:    fld fa5, 0(a0)
-; RV64-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
-; RV64-NEXT:    vfmv.s.f v12, fa5
-; RV64-NEXT:    vslideup.vi v8, v12, 6
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a1, a1, -128
-; RV64-NEXT:    beqz a1, .LBB11_8
-; RV64-NEXT:  .LBB11_16: # %cond.load25
-; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vfmv.s.f v12, fa5
-; RV64-NEXT:    vslideup.vi v8, v12, 7
+; RV64-NEXT:    viota.m v12, v0
+; RV64-NEXT:    vsll.vi v12, v12, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v12, v0.t
 ; RV64-NEXT:    ret
   %res = call <8 x double> @llvm.masked.expandload.v8f64(ptr align 8 %base, <8 x i1> %mask, <8 x double> %src0)
   ret <8 x double>%res
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
index 5bf8b07efc1da..8299b5fa2cdde 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
@@ -6,13 +6,9 @@ declare <1 x i8> @llvm.masked.expandload.v1i8(ptr, <1 x i1>, <1 x i8>)
 define <1 x i8> @expandload_v1i8(ptr %base, <1 x i8> %src0, <1 x i1> %mask) {
 ; CHECK-LABEL: expandload_v1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vfirst.m a1, v0
-; CHECK-NEXT:    bnez a1, .LBB0_2
-; CHECK-NEXT:  # %bb.1: # %cond.load
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:  .LBB0_2: # %else
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
   %res = call <1 x i8> @llvm.masked.expandload.v1i8(ptr %base, <1 x i1> %mask, <1 x i8> %src0)
   ret <1 x i8>%res
@@ -22,28 +18,9 @@ declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>)
 define <2 x i8> @expandload_v2i8(ptr %base, <2 x i8> %src0, <2 x i1> %mask) {
 ; CHECK-LABEL: expandload_v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v0
-; CHECK-NEXT:    andi a2, a1, 1
-; CHECK-NEXT:    bnez a2, .LBB1_3
-; CHECK-NEXT:  # %bb.1: # %else
-; CHECK-NEXT:    andi a1, a1, 2
-; CHECK-NEXT:    bnez a1, .LBB1_4
-; CHECK-NEXT:  .LBB1_2: # %else2
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB1_3: # %cond.load
-; CHECK-NEXT:    lbu a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, ma
-; CHECK-NEXT:    vmv.s.x v8, a2
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    andi a1, a1, 2
-; CHECK-NEXT:    beqz a1, .LBB1_2
-; CHECK-NEXT:  .LBB1_4: # %cond.load1
-; CHECK-NEXT:    lbu a0, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
   %res = call <2 x i8> @llvm.masked.expandload.v2i8(ptr %base, <2 x i1> %mask, <2 x i8> %src0)
   ret <2 x i8>%res
@@ -53,50 +30,9 @@ declare <4 x i8> @llvm.masked.expandload.v4i8(ptr, <4 x i1>, <4 x i8>)
 define <4 x i8> @expandload_v4i8(ptr %base, <4 x i8> %src0, <4 x i1> %mask) {
 ; CHECK-LABEL: expandload_v4i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v0
-; CHECK-NEXT:    andi a2, a1, 1
-; CHECK-NEXT:    bnez a2, .LBB2_5
-; CHECK-NEXT:  # %bb.1: # %else
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    bnez a2, .LBB2_6
-; CHECK-NEXT:  .LBB2_2: # %else2
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    bnez a2, .LBB2_7
-; CHECK-NEXT:  .LBB2_3: # %else6
-; CHECK-NEXT:    andi a1, a1, 8
-; CHECK-NEXT:    bnez a1, .LBB2_8
-; CHECK-NEXT:  .LBB2_4: # %else10
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB2_5: # %cond.load
-; CHECK-NEXT:    lbu a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, ma
-; CHECK-NEXT:    vmv.s.x v8, a2
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    beqz a2, .LBB2_2
-; CHECK-NEXT:  .LBB2_6: # %cond.load1
-; CHECK-NEXT:    lbu a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 1
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    beqz a2, .LBB2_3
-; CHECK-NEXT:  .LBB2_7: # %cond.load5
-; CHECK-NEXT:    lbu a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    andi a1, a1, 8
-; CHECK-NEXT:    beqz a1, .LBB2_4
-; CHECK-NEXT:  .LBB2_8: # %cond.load9
-; CHECK-NEXT:    lbu a0, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
   %res = call <4 x i8> @llvm.masked.expandload.v4i8(ptr %base, <4 x i1> %mask, <4 x i8> %src0)
   ret <4 x i8>%res
@@ -106,94 +42,9 @@ declare <8 x i8> @llvm.masked.expandload.v8i8(ptr, <8 x i1>, <8 x i8>)
 define <8 x i8> @expandload_v8i8(ptr %base, <8 x i8> %src0, <8 x i1> %mask) {
 ; CHECK-LABEL: expandload_v8i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v0
-; CHECK-NEXT:    andi a2, a1, 1
-; CHECK-NEXT:    bnez a2, .LBB3_9
-; CHECK-NEXT:  # %bb.1: # %else
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    bnez a2, .LBB3_10
-; CHECK-NEXT:  .LBB3_2: # %else2
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    bnez a2, .LBB3_11
-; CHECK-NEXT:  .LBB3_3: # %else6
-; CHECK-NEXT:    andi a2, a1, 8
-; CHECK-NEXT:    bnez a2, .LBB3_12
-; CHECK-NEXT:  .LBB3_4: # %else10
-; CHECK-NEXT:    andi a2, a1, 16
-; CHECK-NEXT:    bnez a2, .LBB3_13
-; CHECK-NEXT:  .LBB3_5: # %else14
-; CHECK-NEXT:    andi a2, a1, 32
-; CHECK-NEXT:    bnez a2, .LBB3_14
-; CHECK-NEXT:  .LBB3_6: # %else18
-; CHECK-NEXT:    andi a2, a1, 64
-; CHECK-NEXT:    bnez a2, .LBB3_15
-; CHECK-NEXT:  .LBB3_7: # %else22
-; CHECK-NEXT:    andi a1, a1, -128
-; CHECK-NEXT:    bnez a1, .LBB3_16
-; CHECK-NEXT:  .LBB3_8: # %else26
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB3_9: # %cond.load
-; CHECK-NEXT:    lbu a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, ma
-; CHECK-NEXT:    vmv.s.x v8, a2
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    beqz a2, .LBB3_2
-; CHECK-NEXT:  .LBB3_10: # %cond.load1
-; CHECK-NEXT:    lbu a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 1
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    beqz a2, .LBB3_3
-; CHECK-NEXT:  .LBB3_11: # %cond.load5
-; CHECK-NEXT:    lbu a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 3, e8, mf2, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    andi a2, a1, 8
-; CHECK-NEXT:    beqz a2, .LBB3_4
-; CHECK-NEXT:  .LBB3_12: # %cond.load9
-; CHECK-NEXT:    lbu a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 3
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    andi a2, a1, 16
-; CHECK-NEXT:    beqz a2, .LBB3_5
-; CHECK-NEXT:  .LBB3_13: # %cond.load13
-; CHECK-NEXT:    lbu a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 5, e8, mf2, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 4
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    andi a2, a1, 32
-; CHECK-NEXT:    beqz a2, .LBB3_6
-; CHECK-NEXT:  .LBB3_14: # %cond.load17
-; CHECK-NEXT:    lbu a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 6, e8, mf2, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    andi a2, a1, 64
-; CHECK-NEXT:    beqz a2, .LBB3_7
-; CHECK-NEXT:  .LBB3_15: # %cond.load21
-; CHECK-NEXT:    lbu a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 7, e8, mf2, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 6
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    andi a1, a1, -128
-; CHECK-NEXT:    beqz a1, .LBB3_8
-; CHECK-NEXT:  .LBB3_16: # %cond.load25
-; CHECK-NEXT:    lbu a0, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 7
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
   %res = call <8 x i8> @llvm.masked.expandload.v8i8(ptr %base, <8 x i1> %mask, <8 x i8> %src0)
   ret <8 x i8>%res
@@ -203,13 +54,11 @@ declare <1 x i16> @llvm.masked.expandload.v1i16(ptr, <1 x i1>, <1 x i16>)
 define <1 x i16> @expandload_v1i16(ptr %base, <1 x i16> %src0, <1 x i1> %mask) {
 ; CHECK-LABEL: expandload_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vfirst.m a1, v0
-; CHECK-NEXT:    bnez a1, .LBB4_2
-; CHECK-NEXT:  # %bb.1: # %cond.load
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:  .LBB4_2: # %else
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vsll.vi v9, v9, 1, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
   %res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr align 2 %base, <1 x i1> %mask, <1 x i16> %src0)
   ret <1 x i16>%res
@@ -219,28 +68,11 @@ declare <2 x i16> @llvm.masked.expandload.v2i16(ptr, <2 x i1>, <2 x i16>)
 define <2 x i16> @expandload_v2i16(ptr %base, <2 x i16> %src0, <2 x i1> %mask) {
 ; CHECK-LABEL: expandload_v2i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v0
-; CHECK-NEXT:    andi a2, a1, 1
-; CHECK-NEXT:    bnez a2, .LBB5_3
-; CHECK-NEXT:  # %bb.1: # %else
-; CHECK-NEXT:    andi a1, a1, 2
-; CHECK-NEXT:    bnez a1, .LBB5_4
-; CHECK-NEXT:  .LBB5_2: # %else2
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB5_3: # %cond.load
-; CHECK-NEXT:    lh a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, ma
-; CHECK-NEXT:    vmv.s.x v8, a2
-; CHECK-NEXT:    addi a0, a0, 2
-; CHECK-NEXT:    andi a1, a1, 2
-; CHECK-NEXT:    beqz a1, .LBB5_2
-; CHECK-NEXT:  .LBB5_4: # %cond.load1
-; CHECK-NEXT:    lh a0, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vsll.vi v9, v9, 1, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
   %res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr align 2 %base, <2 x i1> %mask, <2 x i16> %src0)
   ret <2 x i16>%res
@@ -250,50 +82,11 @@ declare <4 x i16> @llvm.masked.expandload.v4i16(ptr, <4 x i1>, <4 x i16>)
 define <4 x i16> @expandload_v4i16(ptr %base, <4 x i16> %src0, <4 x i1> %mask) {
 ; CHECK-LABEL: expandload_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v0
-; CHECK-NEXT:    andi a2, a1, 1
-; CHECK-NEXT:    bnez a2, .LBB6_5
-; CHECK-NEXT:  # %bb.1: # %else
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    bnez a2, .LBB6_6
-; CHECK-NEXT:  .LBB6_2: # %else2
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    bnez a2, .LBB6_7
-; CHECK-NEXT:  .LBB6_3: # %else6
-; CHECK-NEXT:    andi a1, a1, 8
-; CHECK-NEXT:    bnez a1, .LBB6_8
-; CHECK-NEXT:  .LBB6_4: # %else10
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB6_5: # %cond.load
-; CHECK-NEXT:    lh a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, ma
-; CHECK-NEXT:    vmv.s.x v8, a2
-; CHECK-NEXT:    addi a0, a0, 2
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    beqz a2, .LBB6_2
-; CHECK-NEXT:  .LBB6_6: # %cond.load1
-; CHECK-NEXT:    lh a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 1
-; CHECK-NEXT:    addi a0, a0, 2
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    beqz a2, .LBB6_3
-; CHECK-NEXT:  .LBB6_7: # %cond.load5
-; CHECK-NEXT:    lh a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    addi a0, a0, 2
-; CHECK-NEXT:    andi a1, a1, 8
-; CHECK-NEXT:    beqz a1, .LBB6_4
-; CHECK-NEXT:  .LBB6_8: # %cond.load9
-; CHECK-NEXT:    lh a0, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vsll.vi v9, v9, 1, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
   %res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr align 2 %base, <4 x i1> %mask, <4 x i16> %src0)
   ret <4 x i16>%res
@@ -303,94 +96,11 @@ declare <8 x i16> @llvm.masked.expandload.v8i16(ptr, <8 x i1>, <8 x i16>)
 define <8 x i16> @expandload_v8i16(ptr %base, <8 x i16> %src0, <8 x i1> %mask) {
 ; CHECK-LABEL: expandload_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v0
-; CHECK-NEXT:    andi a2, a1, 1
-; CHECK-NEXT:    bnez a2, .LBB7_9
-; CHECK-NEXT:  # %bb.1: # %else
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    bnez a2, .LBB7_10
-; CHECK-NEXT:  .LBB7_2: # %else2
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    bnez a2, .LBB7_11
-; CHECK-NEXT:  .LBB7_3: # %else6
-; CHECK-NEXT:    andi a2, a1, 8
-; CHECK-NEXT:    bnez a2, .LBB7_12
-; CHECK-NEXT:  .LBB7_4: # %else10
-; CHECK-NEXT:    andi a2, a1, 16
-; CHECK-NEXT:    bnez a2, .LBB7_13
-; CHECK-NEXT:  .LBB7_5: # %else14
-; CHECK-NEXT:    andi a2, a1, 32
-; CHECK-NEXT:    bnez a2, .LBB7_14
-; CHECK-NEXT:  .LBB7_6: # %else18
-; CHECK-NEXT:    andi a2, a1, 64
-; CHECK-NEXT:    bnez a2, .LBB7_15
-; CHECK-NEXT:  .LBB7_7: # %else22
-; CHECK-NEXT:    andi a1, a1, -128
-; CHECK-NEXT:    bnez a1, .LBB7_16
-; CHECK-NEXT:  .LBB7_8: # %else26
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB7_9: # %cond.load
-; CHECK-NEXT:    lh a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, ma
-; CHECK-NEXT:    vmv.s.x v8, a2
-; CHECK-NEXT:    addi a0, a0, 2
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    beqz a2, .LBB7_2
-; CHECK-NEXT:  .LBB7_10: # %cond.load1
-; CHECK-NEXT:    lh a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 1
-; CHECK-NEXT:    addi a0, a0, 2
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    beqz a2, .LBB7_3
-; CHECK-NEXT:  .LBB7_11: # %cond.load5
-; CHECK-NEXT:    lh a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    addi a0, a0, 2
-; CHECK-NEXT:    andi a2, a1, 8
-; CHECK-NEXT:    beqz a2, .LBB7_4
-; CHECK-NEXT:  .LBB7_12: # %cond.load9
-; CHECK-NEXT:    lh a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 3
-; CHECK-NEXT:    addi a0, a0, 2
-; CHECK-NEXT:    andi a2, a1, 16
-; CHECK-NEXT:    beqz a2, .LBB7_5
-; CHECK-NEXT:  .LBB7_13: # %cond.load13
-; CHECK-NEXT:    lh a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 4
-; CHECK-NEXT:    addi a0, a0, 2
-; CHECK-NEXT:    andi a2, a1, 32
-; CHECK-NEXT:    beqz a2, .LBB7_6
-; CHECK-NEXT:  .LBB7_14: # %cond.load17
-; CHECK-NEXT:    lh a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    addi a0, a0, 2
-; CHECK-NEXT:    andi a2, a1, 64
-; CHECK-NEXT:    beqz a2, .LBB7_7
-; CHECK-NEXT:  .LBB7_15: # %cond.load21
-; CHECK-NEXT:    lh a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 6
-; CHECK-NEXT:    addi a0, a0, 2
-; CHECK-NEXT:    andi a1, a1, -128
-; CHECK-NEXT:    beqz a1, .LBB7_8
-; CHECK-NEXT:  .LBB7_16: # %cond.load25
-; CHECK-NEXT:    lh a0, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 7
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vsll.vi v9, v9, 1, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
   %res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr align 2 %base, <8 x i1> %mask, <8 x i16> %src0)
   ret <8 x i16>%res
@@ -400,13 +110,11 @@ declare <1 x i32> @llvm.masked.expandload.v1i32(ptr, <1 x i1>, <1 x i32>)
 define <1 x i32> @expandload_v1i32(ptr %base, <1 x i32> %src0, <1 x i1> %mask) {
 ; CHECK-LABEL: expandload_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vfirst.m a1, v0
-; CHECK-NEXT:    bnez a1, .LBB8_2
-; CHECK-NEXT:  # %bb.1: # %cond.load
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:  .LBB8_2: # %else
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vsll.vi v9, v9, 2, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
   %res = call <1 x i32> @llvm.masked.expandload.v1i32(ptr align 4 %base, <1 x i1> %mask, <1 x i32> %src0)
   ret <1 x i32>%res
@@ -416,28 +124,11 @@ declare <2 x i32> @llvm.masked.expandload.v2i32(ptr, <2 x i1>, <2 x i32>)
 define <2 x i32> @expandload_v2i32(ptr %base, <2 x i32> %src0, <2 x i1> %mask) {
 ; CHECK-LABEL: expandload_v2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v0
-; CHECK-NEXT:    andi a2, a1, 1
-; CHECK-NEXT:    bnez a2, .LBB9_3
-; CHECK-NEXT:  # %bb.1: # %else
-; CHECK-NEXT:    andi a1, a1, 2
-; CHECK-NEXT:    bnez a1, .LBB9_4
-; CHECK-NEXT:  .LBB9_2: # %else2
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB9_3: # %cond.load
-; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, tu, ma
-; CHECK-NEXT:    vmv.s.x v8, a2
-; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    andi a1, a1, 2
-; CHECK-NEXT:    beqz a1, .LBB9_2
-; CHECK-NEXT:  .LBB9_4: # %cond.load1
-; CHECK-NEXT:    lw a0, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vsll.vi v9, v9, 2, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
   %res = call <2 x i32> @llvm.masked.expandload.v2i32(ptr align 4 %base, <2 x i1> %mask, <2 x i32> %src0)
   ret <2 x i32>%res
@@ -447,50 +138,11 @@ declare <4 x i32> @llvm.masked.expandload.v4i32(ptr, <4 x i1>, <4 x i32>)
 define <4 x i32> @expandload_v4i32(ptr %base, <4 x i32> %src0, <4 x i1> %mask) {
 ; CHECK-LABEL: expandload_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v0
-; CHECK-NEXT:    andi a2, a1, 1
-; CHECK-NEXT:    bnez a2, .LBB10_5
-; CHECK-NEXT:  # %bb.1: # %else
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    bnez a2, .LBB10_6
-; CHECK-NEXT:  .LBB10_2: # %else2
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    bnez a2, .LBB10_7
-; CHECK-NEXT:  .LBB10_3: # %else6
-; CHECK-NEXT:    andi a1, a1, 8
-; CHECK-NEXT:    bnez a1, .LBB10_8
-; CHECK-NEXT:  .LBB10_4: # %else10
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB10_5: # %cond.load
-; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, tu, ma
-; CHECK-NEXT:    vmv.s.x v8, a2
-; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    beqz a2, .LBB10_2
-; CHECK-NEXT:  .LBB10_6: # %cond.load1
-; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 1
-; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    beqz a2, .LBB10_3
-; CHECK-NEXT:  .LBB10_7: # %cond.load5
-; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; CHECK-NEXT:    vmv.s.x v9, a2
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    andi a1, a1, 8
-; CHECK-NEXT:    beqz a1, .LBB10_4
-; CHECK-NEXT:  .LBB10_8: # %cond.load9
-; CHECK-NEXT:    lw a0, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vsll.vi v9, v9, 2, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
   %res = call <4 x i32> @llvm.masked.expandload.v4i32(ptr align 4 %base, <4 x i1> %mask, <4 x i32> %src0)
   ret <4 x i32>%res
@@ -500,94 +152,11 @@ declare <8 x i32> @llvm.masked.expandload.v8i32(ptr, <8 x i1>, <8 x i32>)
 define <8 x i32> @expandload_v8i32(ptr %base, <8 x i32> %src0, <8 x i1> %mask) {
 ; CHECK-LABEL: expandload_v8i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v0
-; CHECK-NEXT:    andi a2, a1, 1
-; CHECK-NEXT:    bnez a2, .LBB11_9
-; CHECK-NEXT:  # %bb.1: # %else
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    bnez a2, .LBB11_10
-; CHECK-NEXT:  .LBB11_2: # %else2
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    bnez a2, .LBB11_11
-; CHECK-NEXT:  .LBB11_3: # %else6
-; CHECK-NEXT:    andi a2, a1, 8
-; CHECK-NEXT:    bnez a2, .LBB11_12
-; CHECK-NEXT:  .LBB11_4: # %else10
-; CHECK-NEXT:    andi a2, a1, 16
-; CHECK-NEXT:    bnez a2, .LBB11_13
-; CHECK-NEXT:  .LBB11_5: # %else14
-; CHECK-NEXT:    andi a2, a1, 32
-; CHECK-NEXT:    bnez a2, .LBB11_14
-; CHECK-NEXT:  .LBB11_6: # %else18
-; CHECK-NEXT:    andi a2, a1, 64
-; CHECK-NEXT:    bnez a2, .LBB11_15
-; CHECK-NEXT:  .LBB11_7: # %else22
-; CHECK-NEXT:    andi a1, a1, -128
-; CHECK-NEXT:    bnez a1, .LBB11_16
-; CHECK-NEXT:  .LBB11_8: # %else26
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB11_9: # %cond.load
-; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, tu, ma
-; CHECK-NEXT:    vmv.s.x v8, a2
-; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    andi a2, a1, 2
-; CHECK-NEXT:    beqz a2, .LBB11_2
-; CHECK-NEXT:  .LBB11_10: # %cond.load1
-; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmv.s.x v10, a2
-; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    andi a2, a1, 4
-; CHECK-NEXT:    beqz a2, .LBB11_3
-; CHECK-NEXT:  .LBB11_11: # %cond.load5
-; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; CHECK-NEXT:    vmv.s.x v10, a2
-; CHECK-NEXT:    vslideup.vi v8, v10, 2
-; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    andi a2, a1, 8
-; CHECK-NEXT:    beqz a2, .LBB11_4
-; CHECK-NEXT:  .LBB11_12: # %cond.load9
-; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
-; CHECK-NEXT:    vmv.s.x v10, a2
-; CHECK-NEXT:    vslideup.vi v8, v10, 3
-; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    andi a2, a1, 16
-; CHECK-NEXT:    beqz a2, .LBB11_5
-; CHECK-NEXT:  .LBB11_13: # %cond.load13
-; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
-; CHECK-NEXT:    vmv.s.x v10, a2
-; CHECK-NEXT:    vslideup.vi v8, v10, 4
-; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    andi a2, a1, 32
-; CHECK-NEXT:    beqz a2, .LBB11_6
-; CHECK-NEXT:  .LBB11_14: # %cond.load17
-; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT:    vmv.s.x v10, a2
-; CHECK-NEXT:    vslideup.vi v8, v10, 5
-; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    andi a2, a1, 64
-; CHECK-NEXT:    beqz a2, .LBB11_7
-; CHECK-NEXT:  .LBB11_15: # %cond.load21
-; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT:    vmv.s.x v10, a2
-; CHECK-NEXT:    vslideup.vi v8, v10, 6
-; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    andi a1, a1, -128
-; CHECK-NEXT:    beqz a1, .LBB11_8
-; CHECK-NEXT:  .LBB11_16: # %cond.load25
-; CHECK-NEXT:    lw a0, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 7
+; CHECK-NEXT:    viota.m v10, v0
+; CHECK-NEXT:    vsll.vi v10, v10, 2, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
   %res = call <8 x i32> @llvm.masked.expandload.v8i32(ptr align 4 %base, <8 x i1> %mask, <8 x i32> %src0)
   ret <8 x i32>%res
@@ -597,32 +166,20 @@ declare <1 x i64> @llvm.masked.expandload.v1i64(ptr, <1 x i1>, <1 x i64>)
 define <1 x i64> @expandload_v1i64(ptr %base, <1 x i64> %src0, <1 x i1> %mask) {
 ; RV32-LABEL: expandload_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT:    vfirst.m a1, v0
-; RV32-NEXT:    bnez a1, .LBB12_2
-; RV32-NEXT:  # %bb.1: # %cond.load
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lw a1, 4(a0)
-; RV32-NEXT:    lw a0, 0(a0)
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:  .LBB12_2: # %else
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v1i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT:    vfirst.m a1, v0
-; RV64-NEXT:    bnez a1, .LBB12_2
-; RV64-NEXT:  # %bb.1: # %cond.load
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:  .LBB12_2: # %else
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v9, v0.t
 ; RV64-NEXT:    ret
   %res = call <1 x i64> @llvm.masked.expandload.v1i64(ptr align 8 %base, <1 x i1> %mask, <1 x i64> %src0)
   ret <1 x i64>%res
@@ -632,58 +189,20 @@ declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>)
 define <2 x i64> @expandload_v2i64(ptr %base, <2 x i64> %src0, <2 x i1> %mask) {
 ; RV32-LABEL: expandload_v2i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB13_3
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    bnez a1, .LBB13_4
-; RV32-NEXT:  .LBB13_2: # %else2
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB13_3: # %cond.load
-; RV32-NEXT:    lw a2, 0(a0)
-; RV32-NEXT:    lw a3, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    beqz a1, .LBB13_2
-; RV32-NEXT:  .LBB13_4: # %cond.load1
-; RV32-NEXT:    lw a1, 0(a0)
-; RV32-NEXT:    lw a0, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v9, v8, a1
-; RV32-NEXT:    vslide1down.vx v9, v9, a0
-; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    viota.m v9, v0
+; RV32-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v2i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB13_3
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    bnez a1, .LBB13_4
-; RV64-NEXT:  .LBB13_2: # %else2
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB13_3: # %cond.load
-; RV64-NEXT:    ld a2, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT:    vmv.s.x v8, a2
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    beqz a1, .LBB13_2
-; RV64-NEXT:  .LBB13_4: # %cond.load1
-; RV64-NEXT:    ld a0, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    viota.m v9, v0
+; RV64-NEXT:    vsll.vi v9, v9, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v9, v0.t
 ; RV64-NEXT:    ret
   %res = call <2 x i64> @llvm.masked.expandload.v2i64(ptr align 8 %base, <2 x i1> %mask, <2 x i64> %src0)
   ret <2 x i64>%res
@@ -693,108 +212,20 @@ declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>)
 define <4 x i64> @expandload_v4i64(ptr %base, <4 x i64> %src0, <4 x i1> %mask) {
 ; RV32-LABEL: expandload_v4i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB14_5
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    bnez a2, .LBB14_6
-; RV32-NEXT:  .LBB14_2: # %else2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    bnez a2, .LBB14_7
-; RV32-NEXT:  .LBB14_3: # %else6
-; RV32-NEXT:    andi a1, a1, 8
-; RV32-NEXT:    bnez a1, .LBB14_8
-; RV32-NEXT:  .LBB14_4: # %else10
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB14_5: # %cond.load
-; RV32-NEXT:    lw a2, 0(a0)
-; RV32-NEXT:    lw a3, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    beqz a2, .LBB14_2
-; RV32-NEXT:  .LBB14_6: # %cond.load1
-; RV32-NEXT:    lw a2, 0(a0)
-; RV32-NEXT:    lw a3, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v10, v8, a2
-; RV32-NEXT:    vslide1down.vx v10, v10, a3
-; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v10, 1
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    beqz a2, .LBB14_3
-; RV32-NEXT:  .LBB14_7: # %cond.load5
-; RV32-NEXT:    lw a2, 0(a0)
-; RV32-NEXT:    lw a3, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v10, v8, a2
-; RV32-NEXT:    vslide1down.vx v10, v10, a3
-; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v10, 2
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a1, a1, 8
-; RV32-NEXT:    beqz a1, .LBB14_4
-; RV32-NEXT:  .LBB14_8: # %cond.load9
-; RV32-NEXT:    lw a1, 0(a0)
-; RV32-NEXT:    lw a0, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v10, v8, a1
-; RV32-NEXT:    vslide1down.vx v10, v10, a0
-; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v10, 3
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    viota.m v10, v0
+; RV32-NEXT:    vsll.vi v10, v10, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v10, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v4i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB14_5
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    bnez a2, .LBB14_6
-; RV64-NEXT:  .LBB14_2: # %else2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    bnez a2, .LBB14_7
-; RV64-NEXT:  .LBB14_3: # %else6
-; RV64-NEXT:    andi a1, a1, 8
-; RV64-NEXT:    bnez a1, .LBB14_8
-; RV64-NEXT:  .LBB14_4: # %else10
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB14_5: # %cond.load
-; RV64-NEXT:    ld a2, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT:    vmv.s.x v8, a2
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    beqz a2, .LBB14_2
-; RV64-NEXT:  .LBB14_6: # %cond.load1
-; RV64-NEXT:    ld a2, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vmv.s.x v10, a2
-; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v10, 1
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    beqz a2, .LBB14_3
-; RV64-NEXT:  .LBB14_7: # %cond.load5
-; RV64-NEXT:    ld a2, 0(a0)
-; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; RV64-NEXT:    vmv.s.x v10, a2
-; RV64-NEXT:    vslideup.vi v8, v10, 2
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a1, a1, 8
-; RV64-NEXT:    beqz a1, .LBB14_4
-; RV64-NEXT:  .LBB14_8: # %cond.load9
-; RV64-NEXT:    ld a0, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vslideup.vi v8, v10, 3
+; RV64-NEXT:    viota.m v10, v0
+; RV64-NEXT:    vsll.vi v10, v10, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v10, v0.t
 ; RV64-NEXT:    ret
   %res = call <4 x i64> @llvm.masked.expandload.v4i64(ptr align 8 %base, <4 x i1> %mask, <4 x i64> %src0)
   ret <4 x i64>%res
@@ -804,208 +235,20 @@ declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>)
 define <8 x i64> @expandload_v8i64(ptr %base, <8 x i64> %src0, <8 x i1> %mask) {
 ; RV32-LABEL: expandload_v8i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB15_9
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    bnez a2, .LBB15_10
-; RV32-NEXT:  .LBB15_2: # %else2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    bnez a2, .LBB15_11
-; RV32-NEXT:  .LBB15_3: # %else6
-; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    bnez a2, .LBB15_12
-; RV32-NEXT:  .LBB15_4: # %else10
-; RV32-NEXT:    andi a2, a1, 16
-; RV32-NEXT:    bnez a2, .LBB15_13
-; RV32-NEXT:  .LBB15_5: # %else14
-; RV32-NEXT:    andi a2, a1, 32
-; RV32-NEXT:    bnez a2, .LBB15_14
-; RV32-NEXT:  .LBB15_6: # %else18
-; RV32-NEXT:    andi a2, a1, 64
-; RV32-NEXT:    bnez a2, .LBB15_15
-; RV32-NEXT:  .LBB15_7: # %else22
-; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    bnez a1, .LBB15_16
-; RV32-NEXT:  .LBB15_8: # %else26
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB15_9: # %cond.load
-; RV32-NEXT:    lw a2, 0(a0)
-; RV32-NEXT:    lw a3, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    beqz a2, .LBB15_2
-; RV32-NEXT:  .LBB15_10: # %cond.load1
-; RV32-NEXT:    lw a2, 0(a0)
-; RV32-NEXT:    lw a3, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a2
-; RV32-NEXT:    vslide1down.vx v12, v12, a3
-; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v12, 1
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    beqz a2, .LBB15_3
-; RV32-NEXT:  .LBB15_11: # %cond.load5
-; RV32-NEXT:    lw a2, 0(a0)
-; RV32-NEXT:    lw a3, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a2
-; RV32-NEXT:    vslide1down.vx v12, v12, a3
-; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v12, 2
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    beqz a2, .LBB15_4
-; RV32-NEXT:  .LBB15_12: # %cond.load9
-; RV32-NEXT:    lw a2, 0(a0)
-; RV32-NEXT:    lw a3, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a2
-; RV32-NEXT:    vslide1down.vx v12, v12, a3
-; RV32-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v12, 3
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 16
-; RV32-NEXT:    beqz a2, .LBB15_5
-; RV32-NEXT:  .LBB15_13: # %cond.load13
-; RV32-NEXT:    lw a2, 0(a0)
-; RV32-NEXT:    lw a3, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a2
-; RV32-NEXT:    vslide1down.vx v12, v12, a3
-; RV32-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v12, 4
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 32
-; RV32-NEXT:    beqz a2, .LBB15_6
-; RV32-NEXT:  .LBB15_14: # %cond.load17
-; RV32-NEXT:    lw a2, 0(a0)
-; RV32-NEXT:    lw a3, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a2
-; RV32-NEXT:    vslide1down.vx v12, v12, a3
-; RV32-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v12, 5
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 64
-; RV32-NEXT:    beqz a2, .LBB15_7
-; RV32-NEXT:  .LBB15_15: # %cond.load21
-; RV32-NEXT:    lw a2, 0(a0)
-; RV32-NEXT:    lw a3, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a2
-; RV32-NEXT:    vslide1down.vx v12, v12, a3
-; RV32-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v12, 6
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    beqz a1, .LBB15_8
-; RV32-NEXT:  .LBB15_16: # %cond.load25
-; RV32-NEXT:    lw a1, 0(a0)
-; RV32-NEXT:    lw a0, 4(a0)
-; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a1
-; RV32-NEXT:    vslide1down.vx v12, v12, a0
-; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v12, 7
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    viota.m v12, v0
+; RV32-NEXT:    vsll.vi v12, v12, 3, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v8i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB15_9
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    bnez a2, .LBB15_10
-; RV64-NEXT:  .LBB15_2: # %else2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    bnez a2, .LBB15_11
-; RV64-NEXT:  .LBB15_3: # %else6
-; RV64-NEXT:    andi a2, a1, 8
-; RV64-NEXT:    bnez a2, .LBB15_12
-; RV64-NEXT:  .LBB15_4: # %else10
-; RV64-NEXT:    andi a2, a1, 16
-; RV64-NEXT:    bnez a2, .LBB15_13
-; RV64-NEXT:  .LBB15_5: # %else14
-; RV64-NEXT:    andi a2, a1, 32
-; RV64-NEXT:    bnez a2, .LBB15_14
-; RV64-NEXT:  .LBB15_6: # %else18
-; RV64-NEXT:    andi a2, a1, 64
-; RV64-NEXT:    bnez a2, .LBB15_15
-; RV64-NEXT:  .LBB15_7: # %else22
-; RV64-NEXT:    andi a1, a1, -128
-; RV64-NEXT:    bnez a1, .LBB15_16
-; RV64-NEXT:  .LBB15_8: # %else26
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB15_9: # %cond.load
-; RV64-NEXT:    ld a2, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT:    vmv.s.x v8, a2
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    beqz a2, .LBB15_2
-; RV64-NEXT:  .LBB15_10: # %cond.load1
-; RV64-NEXT:    ld a2, 0(a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vmv.s.x v12, a2
-; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v12, 1
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    beqz a2, .LBB15_3
-; RV64-NEXT:  .LBB15_11: # %cond.load5
-; RV64-NEXT:    ld a2, 0(a0)
-; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; RV64-NEXT:    vmv.s.x v12, a2
-; RV64-NEXT:    vslideup.vi v8, v12, 2
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 8
-; RV64-NEXT:    beqz a2, .LBB15_4
-; RV64-NEXT:  .LBB15_12: # %cond.load9
-; RV64-NEXT:    ld a2, 0(a0)
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
-; RV64-NEXT:    vmv.s.x v12, a2
-; RV64-NEXT:    vslideup.vi v8, v12, 3
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 16
-; RV64-NEXT:    beqz a2, .LBB15_5
-; RV64-NEXT:  .LBB15_13: # %cond.load13
-; RV64-NEXT:    ld a2, 0(a0)
-; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT:    vmv.s.x v12, a2
-; RV64-NEXT:    vslideup.vi v8, v12, 4
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 32
-; RV64-NEXT:    beqz a2, .LBB15_6
-; RV64-NEXT:  .LBB15_14: # %cond.load17
-; RV64-NEXT:    ld a2, 0(a0)
-; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
-; RV64-NEXT:    vmv.s.x v12, a2
-; RV64-NEXT:    vslideup.vi v8, v12, 5
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a2, a1, 64
-; RV64-NEXT:    beqz a2, .LBB15_7
-; RV64-NEXT:  .LBB15_15: # %cond.load21
-; RV64-NEXT:    ld a2, 0(a0)
-; RV64-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
-; RV64-NEXT:    vmv.s.x v12, a2
-; RV64-NEXT:    vslideup.vi v8, v12, 6
-; RV64-NEXT:    addi a0, a0, 8
-; RV64-NEXT:    andi a1, a1, -128
-; RV64-NEXT:    beqz a1, .LBB15_8
-; RV64-NEXT:  .LBB15_16: # %cond.load25
-; RV64-NEXT:    ld a0, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vmv.s.x v12, a0
-; RV64-NEXT:    vslideup.vi v8, v12, 7
+; RV64-NEXT:    viota.m v12, v0
+; RV64-NEXT:    vsll.vi v12, v12, 3, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v12, v0.t
 ; RV64-NEXT:    ret
   %res = call <8 x i64> @llvm.masked.expandload.v8i64(ptr align 8 %base, <8 x i1> %mask, <8 x i64> %src0)
   ret <8 x i64>%res

>From c769a6950af6fa54dd5588ca1fed1c0ebbf45f97 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Tue, 6 Aug 2024 15:12:55 +0800
Subject: [PATCH 2/2] Handle index>256 cases (except for LMUL==8 case)

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 14 +++++++++-
 llvm/test/CodeGen/RISCV/rvv/expandload.ll   | 31 +++++++++++++++++++++
 2 files changed, 44 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f6a69e427252b..37b1d21330469 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -44,6 +44,7 @@
 #include "llvm/Support/KnownBits.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
 #include <optional>
 
 using namespace llvm;
@@ -10768,9 +10769,20 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
     if (ContainerVT.isFloatingPoint())
       IndexVT = IndexVT.changeVectorElementTypeToInteger();
 
-    if (Subtarget.isRV32() && IndexVT.getVectorElementType().bitsGT(XLenVT))
+    MVT IndexEltVT = IndexVT.getVectorElementType();
+    if (Subtarget.isRV32() && IndexEltVT.bitsGT(XLenVT))
       IndexVT = IndexVT.changeVectorElementType(XLenVT);
 
+    // If index vector is an i8 vector and the element count exceeds 256, we
+    // should change the element type of index vector to i16 to avoid overflow.
+    if (IndexEltVT == MVT::i8 &&
+        VT.getVectorElementCount().getKnownMinValue() > 256) {
+      // FIXME: Don't know how to make LMUL==8 case legal.
+      assert(getLMUL(IndexVT) != RISCVII::LMUL_8 &&
+             "We don't know how to lower LMUL==8 case");
+      IndexVT = IndexVT.changeVectorElementType(MVT::i16);
+    }
+
     Index = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
                         DAG.getConstant(Intrinsic::riscv_viota, DL, XLenVT),
                         DAG.getUNDEF(IndexVT), Mask, VL);
diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
index ae51beb027285..83118a2f5c2e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
@@ -1547,3 +1547,34 @@ declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>)
 declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>)
 declare <16 x i64> @llvm.masked.expandload.v16i64(ptr, <16 x i1>, <16 x i64>)
 declare <32 x i64> @llvm.masked.expandload.v32i64(ptr, <32 x i1>, <32 x i64>)
+
+define <512 x i8> @test_expandload_v512i8(ptr %base, <512 x i1> %mask, <512 x i8> %passthru) "target-features"="+zvl1024b" {
+; RV64-LABEL: test_expandload_v512i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 512
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT:    viota.m v16, v0
+; RV64-NEXT:    vsetvli zero, zero, e8, m4, ta, mu
+; RV64-NEXT:    vluxei16.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_expandload_v512i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 512
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT:    viota.m v16, v0
+; RV32-NEXT:    vsetvli zero, zero, e8, m4, ta, mu
+; RV32-NEXT:    vluxei16.v v8, (a0), v16, v0.t
+; RV32-NEXT:    ret
+  %res = call <512 x i8> @llvm.masked.expandload.v512i8(ptr align 1 %base, <512 x i1> %mask, <512 x i8> %passthru)
+  ret <512 x i8> %res
+}
+
+; FIXME: Don't know how to make it legal.
+; define <1024 x i8> @test_expandload_v1024i8(ptr %base, <1024 x i1> %mask, <1024 x i8> %passthru) "target-features"="+zvl1024b" {
+;   %res = call <1024 x i8> @llvm.masked.expandload.v1024i8(ptr align 1 %base, <1024 x i1> %mask, <1024 x i8> %passthru)
+;   ret <1024 x i8> %res
+; }
+
+declare <512 x i8> @llvm.masked.expandload.v512i8(ptr, <512 x i1>, <512 x i8>)
+declare <1024 x i8> @llvm.masked.expandload.v1024i8(ptr, <1024 x i1>, <1024 x i8>)



More information about the llvm-commits mailing list