[llvm] 61c283d - [ScalarizeMaskedMemIntrin] Use pointer alignment from pointer of masked.compressstore/expandload. (#83519)

via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 3 17:41:20 PST 2024


Author: Yeting Kuo
Date: 2024-03-04T09:41:16+08:00
New Revision: 61c283db4be0c8ff1832afa754a9a6b45dcf2b06

URL: https://github.com/llvm/llvm-project/commit/61c283db4be0c8ff1832afa754a9a6b45dcf2b06
DIFF: https://github.com/llvm/llvm-project/commit/61c283db4be0c8ff1832afa754a9a6b45dcf2b06.diff

LOG: [ScalarizeMaskedMemIntrin] Use pointer alignment from pointer of masked.compressstore/expandload. (#83519)

Previously we used Align(1) for all scalarized load/stores from
masked.compressstore/expandload.
For targets not supporting unaligned accesses, it make backend need to
split
aligned large width loads/stores to byte loads/stores.
To solve this performance issue, this patch preserves the alignment of
base
pointer after scalarizing.

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll

Modified: 
    llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
index c01d03f6447240..f362dc5708b799 100644
--- a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
+++ b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
@@ -627,6 +627,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
   Value *Ptr = CI->getArgOperand(0);
   Value *Mask = CI->getArgOperand(1);
   Value *PassThru = CI->getArgOperand(2);
+  Align Alignment = CI->getParamAlign(0).valueOrOne();
 
   auto *VecType = cast<FixedVectorType>(CI->getType());
 
@@ -644,6 +645,10 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
   // The result vector
   Value *VResult = PassThru;
 
+  // Adjust alignment for the scalar instruction.
+  const Align AdjustedAlignment =
+      commonAlignment(Alignment, EltTy->getPrimitiveSizeInBits() / 8);
+
   // Shorten the way if the mask is a vector of constants.
   // Create a build_vector pattern, with loads/poisons as necessary and then
   // shuffle blend with the pass through value.
@@ -659,7 +664,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
       } else {
         Value *NewPtr =
             Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
-        InsertElt = Builder.CreateAlignedLoad(EltTy, NewPtr, Align(1),
+        InsertElt = Builder.CreateAlignedLoad(EltTy, NewPtr, AdjustedAlignment,
                                               "Load" + Twine(Idx));
         ShuffleMask[Idx] = Idx;
         ++MemIndex;
@@ -713,7 +718,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
     CondBlock->setName("cond.load");
 
     Builder.SetInsertPoint(CondBlock->getTerminator());
-    LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, Align(1));
+    LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, AdjustedAlignment);
     Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
 
     // Move the pointer if there are more blocks to come.
@@ -755,6 +760,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
   Value *Src = CI->getArgOperand(0);
   Value *Ptr = CI->getArgOperand(1);
   Value *Mask = CI->getArgOperand(2);
+  Align Alignment = CI->getParamAlign(1).valueOrOne();
 
   auto *VecType = cast<FixedVectorType>(Src->getType());
 
@@ -767,6 +773,10 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
 
   Type *EltTy = VecType->getElementType();
 
+  // Adjust alignment for the scalar instruction.
+  const Align AdjustedAlignment =
+      commonAlignment(Alignment, EltTy->getPrimitiveSizeInBits() / 8);
+
   unsigned VectorWidth = VecType->getNumElements();
 
   // Shorten the way if the mask is a vector of constants.
@@ -778,7 +788,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
       Value *OneElt =
           Builder.CreateExtractElement(Src, Idx, "Elt" + Twine(Idx));
       Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
-      Builder.CreateAlignedStore(OneElt, NewPtr, Align(1));
+      Builder.CreateAlignedStore(OneElt, NewPtr, AdjustedAlignment);
       ++MemIndex;
     }
     CI->eraseFromParent();
@@ -824,7 +834,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
 
     Builder.SetInsertPoint(CondBlock->getTerminator());
     Value *OneElt = Builder.CreateExtractElement(Src, Idx);
-    Builder.CreateAlignedStore(OneElt, Ptr, Align(1));
+    Builder.CreateAlignedStore(OneElt, Ptr, AdjustedAlignment);
 
     // Move the pointer if there are more blocks to come.
     Value *NewPtr;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
new file mode 100644
index 00000000000000..52c52921e7e1d2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
@@ -0,0 +1,1079 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
+
+declare void @llvm.masked.compressstore.v1f16(<1 x half>, ptr, <1 x i1>)
+define void @compressstore_v1f16(ptr %base, <1 x half> %v, <1 x i1> %mask) {
+; RV32-LABEL: compressstore_v1f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB0_2
+; RV32-NEXT:  # %bb.1: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vse16.v v8, (a0)
+; RV32-NEXT:  .LBB0_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v1f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB0_2
+; RV64-NEXT:  # %bb.1: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    vse16.v v8, (a0)
+; RV64-NEXT:  .LBB0_2: # %else
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v1f16(<1 x half> %v, ptr align 2 %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2f16(<2 x half>, ptr, <2 x i1>)
+define void @compressstore_v2f16(ptr %base, <2 x half> %v, <2 x i1> %mask) {
+; RV32-LABEL: compressstore_v2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB1_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB1_4
+; RV32-NEXT:  .LBB1_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB1_3: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vse16.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB1_2
+; RV32-NEXT:  .LBB1_4: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vse16.v v8, (a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v2f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB1_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB1_4
+; RV64-NEXT:  .LBB1_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB1_3: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    vse16.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB1_2
+; RV64-NEXT:  .LBB1_4: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    vse16.v v8, (a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v2f16(<2 x half> %v, ptr align 2 %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4f16(<4 x half>, ptr, <4 x i1>)
+define void @compressstore_v4f16(ptr %base, <4 x half> %v, <4 x i1> %mask) {
+; RV32-LABEL: compressstore_v4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB2_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB2_6
+; RV32-NEXT:  .LBB2_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB2_7
+; RV32-NEXT:  .LBB2_3: # %else5
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB2_8
+; RV32-NEXT:  .LBB2_4: # %else8
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB2_5: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vse16.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB2_2
+; RV32-NEXT:  .LBB2_6: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vse16.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB2_3
+; RV32-NEXT:  .LBB2_7: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 2
+; RV32-NEXT:    vse16.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB2_4
+; RV32-NEXT:  .LBB2_8: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vse16.v v8, (a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB2_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB2_6
+; RV64-NEXT:  .LBB2_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB2_7
+; RV64-NEXT:  .LBB2_3: # %else5
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB2_8
+; RV64-NEXT:  .LBB2_4: # %else8
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB2_5: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT:    vse16.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB2_2
+; RV64-NEXT:  .LBB2_6: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    vse16.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB2_3
+; RV64-NEXT:  .LBB2_7: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 2
+; RV64-NEXT:    vse16.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB2_4
+; RV64-NEXT:  .LBB2_8: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-NEXT:    vse16.v v8, (a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v4f16(<4 x half> %v, ptr align 2 %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8f16(<8 x half>, ptr, <8 x i1>)
+define void @compressstore_v8f16(ptr %base, <8 x half> %v, <8 x i1> %mask) {
+; RV32-LABEL: compressstore_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB3_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB3_10
+; RV32-NEXT:  .LBB3_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB3_11
+; RV32-NEXT:  .LBB3_3: # %else5
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB3_12
+; RV32-NEXT:  .LBB3_4: # %else8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB3_13
+; RV32-NEXT:  .LBB3_5: # %else11
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB3_14
+; RV32-NEXT:  .LBB3_6: # %else14
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB3_15
+; RV32-NEXT:  .LBB3_7: # %else17
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB3_16
+; RV32-NEXT:  .LBB3_8: # %else20
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB3_9: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vse16.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB3_2
+; RV32-NEXT:  .LBB3_10: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vse16.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB3_3
+; RV32-NEXT:  .LBB3_11: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 2
+; RV32-NEXT:    vse16.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB3_4
+; RV32-NEXT:  .LBB3_12: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 3
+; RV32-NEXT:    vse16.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB3_5
+; RV32-NEXT:  .LBB3_13: # %cond.store10
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 4
+; RV32-NEXT:    vse16.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB3_6
+; RV32-NEXT:  .LBB3_14: # %cond.store13
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 5
+; RV32-NEXT:    vse16.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB3_7
+; RV32-NEXT:  .LBB3_15: # %cond.store16
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 6
+; RV32-NEXT:    vse16.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB3_8
+; RV32-NEXT:  .LBB3_16: # %cond.store19
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    vse16.v v8, (a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB3_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB3_10
+; RV64-NEXT:  .LBB3_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB3_11
+; RV64-NEXT:  .LBB3_3: # %else5
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB3_12
+; RV64-NEXT:  .LBB3_4: # %else8
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB3_13
+; RV64-NEXT:  .LBB3_5: # %else11
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB3_14
+; RV64-NEXT:  .LBB3_6: # %else14
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB3_15
+; RV64-NEXT:  .LBB3_7: # %else17
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB3_16
+; RV64-NEXT:  .LBB3_8: # %else20
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB3_9: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vse16.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB3_2
+; RV64-NEXT:  .LBB3_10: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    vse16.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB3_3
+; RV64-NEXT:  .LBB3_11: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 2
+; RV64-NEXT:    vse16.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB3_4
+; RV64-NEXT:  .LBB3_12: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 3
+; RV64-NEXT:    vse16.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB3_5
+; RV64-NEXT:  .LBB3_13: # %cond.store10
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 4
+; RV64-NEXT:    vse16.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB3_6
+; RV64-NEXT:  .LBB3_14: # %cond.store13
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 5
+; RV64-NEXT:    vse16.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB3_7
+; RV64-NEXT:  .LBB3_15: # %cond.store16
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 6
+; RV64-NEXT:    vse16.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB3_8
+; RV64-NEXT:  .LBB3_16: # %cond.store19
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 7
+; RV64-NEXT:    vse16.v v8, (a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v8f16(<8 x half> %v, ptr align 2 %base, <8 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v1f32(<1 x float>, ptr, <1 x i1>)
+define void @compressstore_v1f32(ptr %base, <1 x float> %v, <1 x i1> %mask) {
+; RV32-LABEL: compressstore_v1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB4_2
+; RV32-NEXT:  # %bb.1: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
+; RV32-NEXT:  .LBB4_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v1f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB4_2
+; RV64-NEXT:  # %bb.1: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    vse32.v v8, (a0)
+; RV64-NEXT:  .LBB4_2: # %else
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v1f32(<1 x float> %v, ptr align 4 %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2f32(<2 x float>, ptr, <2 x i1>)
+define void @compressstore_v2f32(ptr %base, <2 x float> %v, <2 x i1> %mask) {
+; RV32-LABEL: compressstore_v2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB5_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB5_4
+; RV32-NEXT:  .LBB5_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB5_3: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB5_2
+; RV32-NEXT:  .LBB5_4: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vse32.v v8, (a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB5_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB5_4
+; RV64-NEXT:  .LBB5_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB5_3: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    vse32.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB5_2
+; RV64-NEXT:  .LBB5_4: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    vse32.v v8, (a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v2f32(<2 x float> %v, ptr align 4 %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4f32(<4 x float>, ptr, <4 x i1>)
+define void @compressstore_v4f32(ptr %base, <4 x float> %v, <4 x i1> %mask) {
+; RV32-LABEL: compressstore_v4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB6_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB6_6
+; RV32-NEXT:  .LBB6_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB6_7
+; RV32-NEXT:  .LBB6_3: # %else5
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB6_8
+; RV32-NEXT:  .LBB6_4: # %else8
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB6_5: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB6_2
+; RV32-NEXT:  .LBB6_6: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vse32.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB6_3
+; RV32-NEXT:  .LBB6_7: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 2
+; RV32-NEXT:    vse32.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB6_4
+; RV32-NEXT:  .LBB6_8: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vse32.v v8, (a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB6_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB6_6
+; RV64-NEXT:  .LBB6_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB6_7
+; RV64-NEXT:  .LBB6_3: # %else5
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB6_8
+; RV64-NEXT:  .LBB6_4: # %else8
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB6_5: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB6_2
+; RV64-NEXT:  .LBB6_6: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    vse32.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB6_3
+; RV64-NEXT:  .LBB6_7: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 2
+; RV64-NEXT:    vse32.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB6_4
+; RV64-NEXT:  .LBB6_8: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-NEXT:    vse32.v v8, (a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v4f32(<4 x float> %v, ptr align 4 %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8f32(<8 x float>, ptr, <8 x i1>)
+define void @compressstore_v8f32(ptr %base, <8 x float> %v, <8 x i1> %mask) {
+; RV32-LABEL: compressstore_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB7_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB7_10
+; RV32-NEXT:  .LBB7_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB7_11
+; RV32-NEXT:  .LBB7_3: # %else5
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB7_12
+; RV32-NEXT:  .LBB7_4: # %else8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB7_13
+; RV32-NEXT:  .LBB7_5: # %else11
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB7_14
+; RV32-NEXT:  .LBB7_6: # %else14
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB7_15
+; RV32-NEXT:  .LBB7_7: # %else17
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB7_16
+; RV32-NEXT:  .LBB7_8: # %else20
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB7_9: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB7_2
+; RV32-NEXT:  .LBB7_10: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 1
+; RV32-NEXT:    vse32.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB7_3
+; RV32-NEXT:  .LBB7_11: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    vse32.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB7_4
+; RV32-NEXT:  .LBB7_12: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 3
+; RV32-NEXT:    vse32.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB7_5
+; RV32-NEXT:  .LBB7_13: # %cond.store10
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 4
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB7_6
+; RV32-NEXT:  .LBB7_14: # %cond.store13
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 5
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB7_7
+; RV32-NEXT:  .LBB7_15: # %cond.store16
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 6
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB7_8
+; RV32-NEXT:  .LBB7_16: # %cond.store19
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB7_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB7_10
+; RV64-NEXT:  .LBB7_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB7_11
+; RV64-NEXT:  .LBB7_3: # %else5
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB7_12
+; RV64-NEXT:  .LBB7_4: # %else8
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB7_13
+; RV64-NEXT:  .LBB7_5: # %else11
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB7_14
+; RV64-NEXT:  .LBB7_6: # %else14
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB7_15
+; RV64-NEXT:  .LBB7_7: # %else17
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB7_16
+; RV64-NEXT:  .LBB7_8: # %else20
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB7_9: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB7_2
+; RV64-NEXT:  .LBB7_10: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 1
+; RV64-NEXT:    vse32.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB7_3
+; RV64-NEXT:  .LBB7_11: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    vse32.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB7_4
+; RV64-NEXT:  .LBB7_12: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 3
+; RV64-NEXT:    vse32.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB7_5
+; RV64-NEXT:  .LBB7_13: # %cond.store10
+; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 4
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB7_6
+; RV64-NEXT:  .LBB7_14: # %cond.store13
+; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 5
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB7_7
+; RV64-NEXT:  .LBB7_15: # %cond.store16
+; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 6
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB7_8
+; RV64-NEXT:  .LBB7_16: # %cond.store19
+; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 7
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v8, (a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v8f32(<8 x float> %v, ptr align 4 %base, <8 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v1f64(<1 x double>, ptr, <1 x i1>)
+define void @compressstore_v1f64(ptr %base, <1 x double> %v, <1 x i1> %mask) {
+; RV32-LABEL: compressstore_v1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB8_2
+; RV32-NEXT:  # %bb.1: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v8, (a0)
+; RV32-NEXT:  .LBB8_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v1f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB8_2
+; RV64-NEXT:  # %bb.1: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:  .LBB8_2: # %else
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v1f64(<1 x double> %v, ptr align 8 %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2f64(<2 x double>, ptr, <2 x i1>)
+define void @compressstore_v2f64(ptr %base, <2 x double> %v, <2 x i1> %mask) {
+; RV32-LABEL: compressstore_v2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB9_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB9_4
+; RV32-NEXT:  .LBB9_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB9_3: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB9_2
+; RV32-NEXT:  .LBB9_4: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vse64.v v8, (a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB9_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB9_4
+; RV64-NEXT:  .LBB9_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB9_3: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB9_2
+; RV64-NEXT:  .LBB9_4: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v2f64(<2 x double> %v, ptr align 8 %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4f64(<4 x double>, ptr, <4 x i1>)
+define void @compressstore_v4f64(ptr %base, <4 x double> %v, <4 x i1> %mask) {
+; RV32-LABEL: compressstore_v4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB10_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB10_6
+; RV32-NEXT:  .LBB10_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB10_7
+; RV32-NEXT:  .LBB10_3: # %else5
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB10_8
+; RV32-NEXT:  .LBB10_4: # %else8
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB10_5: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB10_2
+; RV32-NEXT:  .LBB10_6: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 1
+; RV32-NEXT:    vse64.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB10_3
+; RV32-NEXT:  .LBB10_7: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB10_4
+; RV32-NEXT:  .LBB10_8: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v8, (a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB10_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB10_6
+; RV64-NEXT:  .LBB10_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB10_7
+; RV64-NEXT:  .LBB10_3: # %else5
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB10_8
+; RV64-NEXT:  .LBB10_4: # %else8
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB10_5: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB10_2
+; RV64-NEXT:  .LBB10_6: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 1
+; RV64-NEXT:    vse64.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB10_3
+; RV64-NEXT:  .LBB10_7: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB10_4
+; RV64-NEXT:  .LBB10_8: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v4f64(<4 x double> %v, ptr align 8 %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8f64(<8 x double>, ptr, <8 x i1>)
+define void @compressstore_v8f64(ptr %base, <8 x double> %v, <8 x i1> %mask) {
+; RV32-LABEL: compressstore_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB11_11
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB11_12
+; RV32-NEXT:  .LBB11_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB11_13
+; RV32-NEXT:  .LBB11_3: # %else5
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB11_5
+; RV32-NEXT:  .LBB11_4: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 3
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v12, (a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:  .LBB11_5: # %else8
+; RV32-NEXT:    addi sp, sp, -320
+; RV32-NEXT:    .cfi_def_cfa_offset 320
+; RV32-NEXT:    sw ra, 316(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 312(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 320
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB11_14
+; RV32-NEXT:  # %bb.6: # %else11
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB11_15
+; RV32-NEXT:  .LBB11_7: # %else14
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB11_16
+; RV32-NEXT:  .LBB11_8: # %else17
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB11_10
+; RV32-NEXT:  .LBB11_9: # %cond.store19
+; RV32-NEXT:    mv a1, sp
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vse64.v v8, (a1)
+; RV32-NEXT:    fld fa5, 56(sp)
+; RV32-NEXT:    fsd fa5, 0(a0)
+; RV32-NEXT:  .LBB11_10: # %else20
+; RV32-NEXT:    addi sp, s0, -320
+; RV32-NEXT:    lw ra, 316(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 312(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 320
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB11_11: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB11_2
+; RV32-NEXT:  .LBB11_12: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 1
+; RV32-NEXT:    vse64.v v12, (a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB11_3
+; RV32-NEXT:  .LBB11_13: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 2
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v12, (a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB11_4
+; RV32-NEXT:    j .LBB11_5
+; RV32-NEXT:  .LBB11_14: # %cond.store10
+; RV32-NEXT:    addi a2, sp, 192
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vse64.v v8, (a2)
+; RV32-NEXT:    fld fa5, 224(sp)
+; RV32-NEXT:    fsd fa5, 0(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB11_7
+; RV32-NEXT:  .LBB11_15: # %cond.store13
+; RV32-NEXT:    addi a2, sp, 128
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vse64.v v8, (a2)
+; RV32-NEXT:    fld fa5, 168(sp)
+; RV32-NEXT:    fsd fa5, 0(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB11_8
+; RV32-NEXT:  .LBB11_16: # %cond.store16
+; RV32-NEXT:    addi a2, sp, 64
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vse64.v v8, (a2)
+; RV32-NEXT:    fld fa5, 112(sp)
+; RV32-NEXT:    fsd fa5, 0(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB11_9
+; RV32-NEXT:    j .LBB11_10
+;
+; RV64-LABEL: compressstore_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB11_11
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB11_12
+; RV64-NEXT:  .LBB11_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB11_13
+; RV64-NEXT:  .LBB11_3: # %else5
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB11_5
+; RV64-NEXT:  .LBB11_4: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 3
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v12, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:  .LBB11_5: # %else8
+; RV64-NEXT:    addi sp, sp, -320
+; RV64-NEXT:    .cfi_def_cfa_offset 320
+; RV64-NEXT:    sd ra, 312(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 304(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    addi s0, sp, 320
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB11_14
+; RV64-NEXT:  # %bb.6: # %else11
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB11_15
+; RV64-NEXT:  .LBB11_7: # %else14
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB11_16
+; RV64-NEXT:  .LBB11_8: # %else17
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB11_10
+; RV64-NEXT:  .LBB11_9: # %cond.store19
+; RV64-NEXT:    mv a1, sp
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a1)
+; RV64-NEXT:    fld fa5, 56(sp)
+; RV64-NEXT:    fsd fa5, 0(a0)
+; RV64-NEXT:  .LBB11_10: # %else20
+; RV64-NEXT:    addi sp, s0, -320
+; RV64-NEXT:    ld ra, 312(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 304(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 320
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB11_11: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB11_2
+; RV64-NEXT:  .LBB11_12: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 1
+; RV64-NEXT:    vse64.v v12, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB11_3
+; RV64-NEXT:  .LBB11_13: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 2
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v12, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB11_4
+; RV64-NEXT:    j .LBB11_5
+; RV64-NEXT:  .LBB11_14: # %cond.store10
+; RV64-NEXT:    addi a2, sp, 192
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    fld fa5, 224(sp)
+; RV64-NEXT:    fsd fa5, 0(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB11_7
+; RV64-NEXT:  .LBB11_15: # %cond.store13
+; RV64-NEXT:    addi a2, sp, 128
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    fld fa5, 168(sp)
+; RV64-NEXT:    fsd fa5, 0(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB11_8
+; RV64-NEXT:  .LBB11_16: # %cond.store16
+; RV64-NEXT:    addi a2, sp, 64
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    fld fa5, 112(sp)
+; RV64-NEXT:    fsd fa5, 0(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB11_9
+; RV64-NEXT:    j .LBB11_10
+  call void @llvm.masked.compressstore.v8f64(<8 x double> %v, ptr align 8 %base, <8 x i1> %mask)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
new file mode 100644
index 00000000000000..eb0096dbfba6de
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
@@ -0,0 +1,986 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>)
+define void @compressstore_v1i8(ptr %base, <1 x i8> %v, <1 x i1> %mask) {
+; CHECK-LABEL: compressstore_v1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:  .LBB0_2: # %else
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v1i8(<1 x i8> %v, ptr %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
+define void @compressstore_v2i8(ptr %base, <2 x i8> %v, <2 x i1> %mask) {
+; CHECK-LABEL: compressstore_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB1_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB1_4
+; CHECK-NEXT:  .LBB1_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_3: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB1_2
+; CHECK-NEXT:  .LBB1_4: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v2i8(<2 x i8> %v, ptr %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>)
+define void @compressstore_v4i8(ptr %base, <4 x i8> %v, <4 x i1> %mask) {
+; CHECK-LABEL: compressstore_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB2_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB2_6
+; CHECK-NEXT:  .LBB2_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB2_7
+; CHECK-NEXT:  .LBB2_3: # %else5
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB2_8
+; CHECK-NEXT:  .LBB2_4: # %else8
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_5: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB2_2
+; CHECK-NEXT:  .LBB2_6: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB2_3
+; CHECK-NEXT:  .LBB2_7: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB2_4
+; CHECK-NEXT:  .LBB2_8: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v4i8(<4 x i8> %v, ptr %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>)
+define void @compressstore_v8i8(ptr %base, <8 x i8> %v, <8 x i1> %mask) {
+; CHECK-LABEL: compressstore_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB3_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB3_10
+; CHECK-NEXT:  .LBB3_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB3_11
+; CHECK-NEXT:  .LBB3_3: # %else5
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB3_12
+; CHECK-NEXT:  .LBB3_4: # %else8
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB3_13
+; CHECK-NEXT:  .LBB3_5: # %else11
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB3_14
+; CHECK-NEXT:  .LBB3_6: # %else14
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB3_15
+; CHECK-NEXT:  .LBB3_7: # %else17
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB3_16
+; CHECK-NEXT:  .LBB3_8: # %else20
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_9: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB3_2
+; CHECK-NEXT:  .LBB3_10: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB3_3
+; CHECK-NEXT:  .LBB3_11: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB3_4
+; CHECK-NEXT:  .LBB3_12: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 3
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB3_5
+; CHECK-NEXT:  .LBB3_13: # %cond.store10
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 4
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB3_6
+; CHECK-NEXT:  .LBB3_14: # %cond.store13
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 5
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB3_7
+; CHECK-NEXT:  .LBB3_15: # %cond.store16
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 6
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB3_8
+; CHECK-NEXT:  .LBB3_16: # %cond.store19
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v8i8(<8 x i8> %v, ptr %base, <8 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>)
+define void @compressstore_v1i16(ptr %base, <1 x i16> %v, <1 x i1> %mask) {
+; CHECK-LABEL: compressstore_v1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB4_2
+; CHECK-NEXT:  # %bb.1: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:  .LBB4_2: # %else
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v1i16(<1 x i16> %v, ptr align 2 %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>)
+define void @compressstore_v2i16(ptr %base, <2 x i16> %v, <2 x i1> %mask) {
+; CHECK-LABEL: compressstore_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB5_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB5_4
+; CHECK-NEXT:  .LBB5_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB5_3: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB5_2
+; CHECK-NEXT:  .LBB5_4: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v2i16(<2 x i16> %v, ptr align 2 %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>)
+define void @compressstore_v4i16(ptr %base, <4 x i16> %v, <4 x i1> %mask) {
+; CHECK-LABEL: compressstore_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB6_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB6_6
+; CHECK-NEXT:  .LBB6_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB6_7
+; CHECK-NEXT:  .LBB6_3: # %else5
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB6_8
+; CHECK-NEXT:  .LBB6_4: # %else8
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB6_5: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB6_2
+; CHECK-NEXT:  .LBB6_6: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vse16.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB6_3
+; CHECK-NEXT:  .LBB6_7: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vse16.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB6_4
+; CHECK-NEXT:  .LBB6_8: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v4i16(<4 x i16> %v, ptr align 2 %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>)
+define void @compressstore_v8i16(ptr %base, <8 x i16> %v, <8 x i1> %mask) {
+; CHECK-LABEL: compressstore_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB7_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB7_10
+; CHECK-NEXT:  .LBB7_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB7_11
+; CHECK-NEXT:  .LBB7_3: # %else5
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB7_12
+; CHECK-NEXT:  .LBB7_4: # %else8
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB7_13
+; CHECK-NEXT:  .LBB7_5: # %else11
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB7_14
+; CHECK-NEXT:  .LBB7_6: # %else14
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB7_15
+; CHECK-NEXT:  .LBB7_7: # %else17
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB7_16
+; CHECK-NEXT:  .LBB7_8: # %else20
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB7_9: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB7_2
+; CHECK-NEXT:  .LBB7_10: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vse16.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB7_3
+; CHECK-NEXT:  .LBB7_11: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vse16.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB7_4
+; CHECK-NEXT:  .LBB7_12: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 3
+; CHECK-NEXT:    vse16.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB7_5
+; CHECK-NEXT:  .LBB7_13: # %cond.store10
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 4
+; CHECK-NEXT:    vse16.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB7_6
+; CHECK-NEXT:  .LBB7_14: # %cond.store13
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 5
+; CHECK-NEXT:    vse16.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB7_7
+; CHECK-NEXT:  .LBB7_15: # %cond.store16
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 6
+; CHECK-NEXT:    vse16.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB7_8
+; CHECK-NEXT:  .LBB7_16: # %cond.store19
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v8i16(<8 x i16> %v, ptr align 2 %base, <8 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>)
+define void @compressstore_v1i32(ptr %base, <1 x i32> %v, <1 x i1> %mask) {
+; CHECK-LABEL: compressstore_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB8_2
+; CHECK-NEXT:  # %bb.1: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:  .LBB8_2: # %else
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v1i32(<1 x i32> %v, ptr align 4 %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>)
+define void @compressstore_v2i32(ptr %base, <2 x i32> %v, <2 x i1> %mask) {
+; CHECK-LABEL: compressstore_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB9_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB9_4
+; CHECK-NEXT:  .LBB9_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB9_3: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB9_2
+; CHECK-NEXT:  .LBB9_4: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v2i32(<2 x i32> %v, ptr align 4 %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>)
+define void @compressstore_v4i32(ptr %base, <4 x i32> %v, <4 x i1> %mask) {
+; CHECK-LABEL: compressstore_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB10_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB10_6
+; CHECK-NEXT:  .LBB10_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB10_7
+; CHECK-NEXT:  .LBB10_3: # %else5
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB10_8
+; CHECK-NEXT:  .LBB10_4: # %else8
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB10_5: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB10_2
+; CHECK-NEXT:  .LBB10_6: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vse32.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB10_3
+; CHECK-NEXT:  .LBB10_7: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vse32.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB10_4
+; CHECK-NEXT:  .LBB10_8: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v4i32(<4 x i32> %v, ptr align 4 %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>)
+define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) {
+; CHECK-LABEL: compressstore_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB11_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB11_10
+; CHECK-NEXT:  .LBB11_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB11_11
+; CHECK-NEXT:  .LBB11_3: # %else5
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB11_12
+; CHECK-NEXT:  .LBB11_4: # %else8
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB11_13
+; CHECK-NEXT:  .LBB11_5: # %else11
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB11_14
+; CHECK-NEXT:  .LBB11_6: # %else14
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB11_15
+; CHECK-NEXT:  .LBB11_7: # %else17
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB11_16
+; CHECK-NEXT:  .LBB11_8: # %else20
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB11_9: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB11_2
+; CHECK-NEXT:  .LBB11_10: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
+; CHECK-NEXT:    vse32.v v10, (a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB11_3
+; CHECK-NEXT:  .LBB11_11: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 2
+; CHECK-NEXT:    vse32.v v10, (a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB11_4
+; CHECK-NEXT:  .LBB11_12: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 3
+; CHECK-NEXT:    vse32.v v10, (a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB11_5
+; CHECK-NEXT:  .LBB11_13: # %cond.store10
+; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 4
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v10, (a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB11_6
+; CHECK-NEXT:  .LBB11_14: # %cond.store13
+; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 5
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v10, (a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB11_7
+; CHECK-NEXT:  .LBB11_15: # %cond.store16
+; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 6
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v10, (a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB11_8
+; CHECK-NEXT:  .LBB11_16: # %cond.store19
+; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v8i32(<8 x i32> %v, ptr align 4 %base, <8 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>)
+define void @compressstore_v1i64(ptr %base, <1 x i64> %v, <1 x i1> %mask) {
+; RV32-LABEL: compressstore_v1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB12_2
+; RV32-NEXT:  # %bb.1: # %cond.store
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vsrl.vx v9, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v9
+; RV32-NEXT:    vmv.x.s a2, v8
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    sw a1, 4(a0)
+; RV32-NEXT:  .LBB12_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB12_2
+; RV64-NEXT:  # %bb.1: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:  .LBB12_2: # %else
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v1i64(<1 x i64> %v, ptr align 8 %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>)
+define void @compressstore_v2i64(ptr %base, <2 x i64> %v, <2 x i1> %mask) {
+; RV32-LABEL: compressstore_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB13_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB13_4
+; RV32-NEXT:  .LBB13_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB13_3: # %cond.store
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vsrl.vx v9, v8, a2
+; RV32-NEXT:    vmv.x.s a2, v9
+; RV32-NEXT:    vmv.x.s a3, v8
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB13_2
+; RV32-NEXT:  .LBB13_4: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsrl.vx v9, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v9
+; RV32-NEXT:    vmv.x.s a2, v8
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    sw a1, 4(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB13_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB13_4
+; RV64-NEXT:  .LBB13_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB13_3: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB13_2
+; RV64-NEXT:  .LBB13_4: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v2i64(<2 x i64> %v, ptr align 8 %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>)
+define void @compressstore_v4i64(ptr %base, <4 x i64> %v, <4 x i1> %mask) {
+; RV32-LABEL: compressstore_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB14_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB14_6
+; RV32-NEXT:  .LBB14_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB14_7
+; RV32-NEXT:  .LBB14_3: # %else5
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB14_8
+; RV32-NEXT:  .LBB14_4: # %else8
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB14_5: # %cond.store
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vsrl.vx v10, v8, a2
+; RV32-NEXT:    vmv.x.s a2, v10
+; RV32-NEXT:    vmv.x.s a3, v8
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB14_2
+; RV32-NEXT:  .LBB14_6: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 1
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v12, v10, a2
+; RV32-NEXT:    vmv.x.s a2, v12
+; RV32-NEXT:    vmv.x.s a3, v10
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB14_3
+; RV32-NEXT:  .LBB14_7: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v12, v10, a2
+; RV32-NEXT:    vmv.x.s a2, v12
+; RV32-NEXT:    vmv.x.s a3, v10
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB14_4
+; RV32-NEXT:  .LBB14_8: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsrl.vx v10, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v10
+; RV32-NEXT:    vmv.x.s a2, v8
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    sw a1, 4(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB14_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB14_6
+; RV64-NEXT:  .LBB14_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB14_7
+; RV64-NEXT:  .LBB14_3: # %else5
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB14_8
+; RV64-NEXT:  .LBB14_4: # %else8
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB14_5: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB14_2
+; RV64-NEXT:  .LBB14_6: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 1
+; RV64-NEXT:    vse64.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB14_3
+; RV64-NEXT:  .LBB14_7: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB14_4
+; RV64-NEXT:  .LBB14_8: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v4i64(<4 x i64> %v, ptr align 8 %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>)
+define void @compressstore_v8i64(ptr %base, <8 x i64> %v, <8 x i1> %mask) {
+; RV32-LABEL: compressstore_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB15_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB15_10
+; RV32-NEXT:  .LBB15_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB15_11
+; RV32-NEXT:  .LBB15_3: # %else5
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB15_12
+; RV32-NEXT:  .LBB15_4: # %else8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB15_13
+; RV32-NEXT:  .LBB15_5: # %else11
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB15_14
+; RV32-NEXT:  .LBB15_6: # %else14
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB15_15
+; RV32-NEXT:  .LBB15_7: # %else17
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB15_16
+; RV32-NEXT:  .LBB15_8: # %else20
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB15_9: # %cond.store
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vsrl.vx v12, v8, a2
+; RV32-NEXT:    vmv.x.s a2, v12
+; RV32-NEXT:    vmv.x.s a3, v8
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB15_2
+; RV32-NEXT:  .LBB15_10: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 1
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB15_3
+; RV32-NEXT:  .LBB15_11: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 2
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB15_4
+; RV32-NEXT:  .LBB15_12: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 3
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB15_5
+; RV32-NEXT:  .LBB15_13: # %cond.store10
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 4
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB15_6
+; RV32-NEXT:  .LBB15_14: # %cond.store13
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 5
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB15_7
+; RV32-NEXT:  .LBB15_15: # %cond.store16
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 6
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB15_8
+; RV32-NEXT:  .LBB15_16: # %cond.store19
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsrl.vx v12, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v12
+; RV32-NEXT:    vmv.x.s a2, v8
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    sw a1, 4(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB15_11
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB15_12
+; RV64-NEXT:  .LBB15_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB15_13
+; RV64-NEXT:  .LBB15_3: # %else5
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB15_5
+; RV64-NEXT:  .LBB15_4: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 3
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v12, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:  .LBB15_5: # %else8
+; RV64-NEXT:    addi sp, sp, -320
+; RV64-NEXT:    .cfi_def_cfa_offset 320
+; RV64-NEXT:    sd ra, 312(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 304(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    addi s0, sp, 320
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB15_14
+; RV64-NEXT:  # %bb.6: # %else11
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB15_15
+; RV64-NEXT:  .LBB15_7: # %else14
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB15_16
+; RV64-NEXT:  .LBB15_8: # %else17
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB15_10
+; RV64-NEXT:  .LBB15_9: # %cond.store19
+; RV64-NEXT:    mv a1, sp
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a1)
+; RV64-NEXT:    ld a1, 56(sp)
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:  .LBB15_10: # %else20
+; RV64-NEXT:    addi sp, s0, -320
+; RV64-NEXT:    ld ra, 312(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 304(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 320
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB15_11: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB15_2
+; RV64-NEXT:  .LBB15_12: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 1
+; RV64-NEXT:    vse64.v v12, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB15_3
+; RV64-NEXT:  .LBB15_13: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 2
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v12, (a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB15_4
+; RV64-NEXT:    j .LBB15_5
+; RV64-NEXT:  .LBB15_14: # %cond.store10
+; RV64-NEXT:    addi a2, sp, 192
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    ld a2, 224(sp)
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB15_7
+; RV64-NEXT:  .LBB15_15: # %cond.store13
+; RV64-NEXT:    addi a2, sp, 128
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    ld a2, 168(sp)
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB15_8
+; RV64-NEXT:  .LBB15_16: # %cond.store16
+; RV64-NEXT:    addi a2, sp, 64
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    ld a2, 112(sp)
+; RV64-NEXT:    sd a2, 0(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB15_9
+; RV64-NEXT:    j .LBB15_10
+  call void @llvm.masked.compressstore.v8i64(<8 x i64> %v, ptr align 8 %base, <8 x i1> %mask)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
new file mode 100644
index 00000000000000..48e820243c9578
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
@@ -0,0 +1,1107 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
+
+declare <1 x half> @llvm.masked.expandload.v1f16(ptr, <1 x i1>, <1 x half>)
+define <1 x half> @expandload_v1f16(ptr %base, <1 x half> %src0, <1 x i1> %mask) {
+; RV32-LABEL: expandload_v1f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB0_2
+; RV32-NEXT:  # %bb.1: # %cond.load
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vle16.v v8, (a0)
+; RV32-NEXT:  .LBB0_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v1f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB0_2
+; RV64-NEXT:  # %bb.1: # %cond.load
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    vle16.v v8, (a0)
+; RV64-NEXT:  .LBB0_2: # %else
+; RV64-NEXT:    ret
+  %res = call <1 x half> @llvm.masked.expandload.v1f16(ptr align 2 %base, <1 x i1> %mask, <1 x half> %src0)
+  ret <1 x half>%res
+}
+
+declare <2 x half> @llvm.masked.expandload.v2f16(ptr, <2 x i1>, <2 x half>)
+define <2 x half> @expandload_v2f16(ptr %base, <2 x half> %src0, <2 x i1> %mask) {
+; RV32-LABEL: expandload_v2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB1_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB1_4
+; RV32-NEXT:  .LBB1_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB1_3: # %cond.load
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e16, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB1_2
+; RV32-NEXT:  .LBB1_4: # %cond.load1
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v2f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB1_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB1_4
+; RV64-NEXT:  .LBB1_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB1_3: # %cond.load
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e16, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB1_2
+; RV64-NEXT:  .LBB1_4: # %cond.load1
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    ret
+  %res = call <2 x half> @llvm.masked.expandload.v2f16(ptr align 2 %base, <2 x i1> %mask, <2 x half> %src0)
+  ret <2 x half>%res
+}
+
+declare <4 x half> @llvm.masked.expandload.v4f16(ptr, <4 x i1>, <4 x half>)
+define <4 x half> @expandload_v4f16(ptr %base, <4 x half> %src0, <4 x i1> %mask) {
+; RV32-LABEL: expandload_v4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB2_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB2_6
+; RV32-NEXT:  .LBB2_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB2_7
+; RV32-NEXT:  .LBB2_3: # %else6
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB2_8
+; RV32-NEXT:  .LBB2_4: # %else10
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB2_5: # %cond.load
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB2_2
+; RV32-NEXT:  .LBB2_6: # %cond.load1
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB2_3
+; RV32-NEXT:  .LBB2_7: # %cond.load5
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 2
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB2_4
+; RV32-NEXT:  .LBB2_8: # %cond.load9
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 3
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB2_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB2_6
+; RV64-NEXT:  .LBB2_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB2_7
+; RV64-NEXT:  .LBB2_3: # %else6
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB2_8
+; RV64-NEXT:  .LBB2_4: # %else10
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB2_5: # %cond.load
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB2_2
+; RV64-NEXT:  .LBB2_6: # %cond.load1
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB2_3
+; RV64-NEXT:  .LBB2_7: # %cond.load5
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 2
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB2_4
+; RV64-NEXT:  .LBB2_8: # %cond.load9
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 3
+; RV64-NEXT:    ret
+  %res = call <4 x half> @llvm.masked.expandload.v4f16(ptr align 2 %base, <4 x i1> %mask, <4 x half> %src0)
+  ret <4 x half>%res
+}
+
+declare <8 x half> @llvm.masked.expandload.v8f16(ptr, <8 x i1>, <8 x half>)
+define <8 x half> @expandload_v8f16(ptr %base, <8 x half> %src0, <8 x i1> %mask) {
+; RV32-LABEL: expandload_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB3_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB3_10
+; RV32-NEXT:  .LBB3_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB3_11
+; RV32-NEXT:  .LBB3_3: # %else6
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB3_12
+; RV32-NEXT:  .LBB3_4: # %else10
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB3_13
+; RV32-NEXT:  .LBB3_5: # %else14
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB3_14
+; RV32-NEXT:  .LBB3_6: # %else18
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB3_15
+; RV32-NEXT:  .LBB3_7: # %else22
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB3_16
+; RV32-NEXT:  .LBB3_8: # %else26
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB3_9: # %cond.load
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB3_2
+; RV32-NEXT:  .LBB3_10: # %cond.load1
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB3_3
+; RV32-NEXT:  .LBB3_11: # %cond.load5
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 2
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB3_4
+; RV32-NEXT:  .LBB3_12: # %cond.load9
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 3
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB3_5
+; RV32-NEXT:  .LBB3_13: # %cond.load13
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 4
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB3_6
+; RV32-NEXT:  .LBB3_14: # %cond.load17
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 5
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB3_7
+; RV32-NEXT:  .LBB3_15: # %cond.load21
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 6
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB3_8
+; RV32-NEXT:  .LBB3_16: # %cond.load25
+; RV32-NEXT:    flh fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 7
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB3_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB3_10
+; RV64-NEXT:  .LBB3_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB3_11
+; RV64-NEXT:  .LBB3_3: # %else6
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB3_12
+; RV64-NEXT:  .LBB3_4: # %else10
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB3_13
+; RV64-NEXT:  .LBB3_5: # %else14
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB3_14
+; RV64-NEXT:  .LBB3_6: # %else18
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB3_15
+; RV64-NEXT:  .LBB3_7: # %else22
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB3_16
+; RV64-NEXT:  .LBB3_8: # %else26
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB3_9: # %cond.load
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB3_2
+; RV64-NEXT:  .LBB3_10: # %cond.load1
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB3_3
+; RV64-NEXT:  .LBB3_11: # %cond.load5
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 2
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB3_4
+; RV64-NEXT:  .LBB3_12: # %cond.load9
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 3
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB3_5
+; RV64-NEXT:  .LBB3_13: # %cond.load13
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 4
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB3_6
+; RV64-NEXT:  .LBB3_14: # %cond.load17
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 5
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB3_7
+; RV64-NEXT:  .LBB3_15: # %cond.load21
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 6
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB3_8
+; RV64-NEXT:  .LBB3_16: # %cond.load25
+; RV64-NEXT:    flh fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 7
+; RV64-NEXT:    ret
+  %res = call <8 x half> @llvm.masked.expandload.v8f16(ptr align 2 %base, <8 x i1> %mask, <8 x half> %src0)
+  ret <8 x half>%res
+}
+
+declare <1 x float> @llvm.masked.expandload.v1f32(ptr, <1 x i1>, <1 x float>)
+define <1 x float> @expandload_v1f32(ptr %base, <1 x float> %src0, <1 x i1> %mask) {
+; RV32-LABEL: expandload_v1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB4_2
+; RV32-NEXT:  # %bb.1: # %cond.load
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
+; RV32-NEXT:  .LBB4_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v1f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB4_2
+; RV64-NEXT:  # %bb.1: # %cond.load
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    vle32.v v8, (a0)
+; RV64-NEXT:  .LBB4_2: # %else
+; RV64-NEXT:    ret
+  %res = call <1 x float> @llvm.masked.expandload.v1f32(ptr align 4 %base, <1 x i1> %mask, <1 x float> %src0)
+  ret <1 x float>%res
+}
+
+declare <2 x float> @llvm.masked.expandload.v2f32(ptr, <2 x i1>, <2 x float>)
+define <2 x float> @expandload_v2f32(ptr %base, <2 x float> %src0, <2 x i1> %mask) {
+; RV32-LABEL: expandload_v2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB5_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB5_4
+; RV32-NEXT:  .LBB5_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB5_3: # %cond.load
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB5_2
+; RV32-NEXT:  .LBB5_4: # %cond.load1
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB5_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB5_4
+; RV64-NEXT:  .LBB5_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB5_3: # %cond.load
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB5_2
+; RV64-NEXT:  .LBB5_4: # %cond.load1
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    ret
+  %res = call <2 x float> @llvm.masked.expandload.v2f32(ptr align 4 %base, <2 x i1> %mask, <2 x float> %src0)
+  ret <2 x float>%res
+}
+
+declare <4 x float> @llvm.masked.expandload.v4f32(ptr, <4 x i1>, <4 x float>)
+define <4 x float> @expandload_v4f32(ptr %base, <4 x float> %src0, <4 x i1> %mask) {
+; RV32-LABEL: expandload_v4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB6_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB6_6
+; RV32-NEXT:  .LBB6_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB6_7
+; RV32-NEXT:  .LBB6_3: # %else6
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB6_8
+; RV32-NEXT:  .LBB6_4: # %else10
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB6_5: # %cond.load
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB6_2
+; RV32-NEXT:  .LBB6_6: # %cond.load1
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB6_3
+; RV32-NEXT:  .LBB6_7: # %cond.load5
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 2
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB6_4
+; RV32-NEXT:  .LBB6_8: # %cond.load9
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 3
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB6_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB6_6
+; RV64-NEXT:  .LBB6_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB6_7
+; RV64-NEXT:  .LBB6_3: # %else6
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB6_8
+; RV64-NEXT:  .LBB6_4: # %else10
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB6_5: # %cond.load
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB6_2
+; RV64-NEXT:  .LBB6_6: # %cond.load1
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB6_3
+; RV64-NEXT:  .LBB6_7: # %cond.load5
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 2
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB6_4
+; RV64-NEXT:  .LBB6_8: # %cond.load9
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 3
+; RV64-NEXT:    ret
+  %res = call <4 x float> @llvm.masked.expandload.v4f32(ptr align 4 %base, <4 x i1> %mask, <4 x float> %src0)
+  ret <4 x float>%res
+}
+
+declare <8 x float> @llvm.masked.expandload.v8f32(ptr, <8 x i1>, <8 x float>)
+define <8 x float> @expandload_v8f32(ptr %base, <8 x float> %src0, <8 x i1> %mask) {
+; RV32-LABEL: expandload_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB7_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB7_10
+; RV32-NEXT:  .LBB7_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB7_11
+; RV32-NEXT:  .LBB7_3: # %else6
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB7_12
+; RV32-NEXT:  .LBB7_4: # %else10
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB7_13
+; RV32-NEXT:  .LBB7_5: # %else14
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB7_14
+; RV32-NEXT:  .LBB7_6: # %else18
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB7_15
+; RV32-NEXT:  .LBB7_7: # %else22
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB7_16
+; RV32-NEXT:  .LBB7_8: # %else26
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB7_9: # %cond.load
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB7_2
+; RV32-NEXT:  .LBB7_10: # %cond.load1
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 1
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB7_3
+; RV32-NEXT:  .LBB7_11: # %cond.load5
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 2
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB7_4
+; RV32-NEXT:  .LBB7_12: # %cond.load9
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 3
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB7_5
+; RV32-NEXT:  .LBB7_13: # %cond.load13
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 4
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB7_6
+; RV32-NEXT:  .LBB7_14: # %cond.load17
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 5
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB7_7
+; RV32-NEXT:  .LBB7_15: # %cond.load21
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 6
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB7_8
+; RV32-NEXT:  .LBB7_16: # %cond.load25
+; RV32-NEXT:    flw fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 7
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB7_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB7_10
+; RV64-NEXT:  .LBB7_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB7_11
+; RV64-NEXT:  .LBB7_3: # %else6
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB7_12
+; RV64-NEXT:  .LBB7_4: # %else10
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB7_13
+; RV64-NEXT:  .LBB7_5: # %else14
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB7_14
+; RV64-NEXT:  .LBB7_6: # %else18
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB7_15
+; RV64-NEXT:  .LBB7_7: # %else22
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB7_16
+; RV64-NEXT:  .LBB7_8: # %else26
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB7_9: # %cond.load
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB7_2
+; RV64-NEXT:  .LBB7_10: # %cond.load1
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 1
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB7_3
+; RV64-NEXT:  .LBB7_11: # %cond.load5
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 2
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB7_4
+; RV64-NEXT:  .LBB7_12: # %cond.load9
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 3
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB7_5
+; RV64-NEXT:  .LBB7_13: # %cond.load13
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 4
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB7_6
+; RV64-NEXT:  .LBB7_14: # %cond.load17
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 5
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB7_7
+; RV64-NEXT:  .LBB7_15: # %cond.load21
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 6
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB7_8
+; RV64-NEXT:  .LBB7_16: # %cond.load25
+; RV64-NEXT:    flw fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 7
+; RV64-NEXT:    ret
+  %res = call <8 x float> @llvm.masked.expandload.v8f32(ptr align 4 %base, <8 x i1> %mask, <8 x float> %src0)
+  ret <8 x float>%res
+}
+
+declare <1 x double> @llvm.masked.expandload.v1f64(ptr, <1 x i1>, <1 x double>)
+define <1 x double> @expandload_v1f64(ptr %base, <1 x double> %src0, <1 x i1> %mask) {
+; RV32-LABEL: expandload_v1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB8_2
+; RV32-NEXT:  # %bb.1: # %cond.load
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:  .LBB8_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v1f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB8_2
+; RV64-NEXT:  # %bb.1: # %cond.load
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:  .LBB8_2: # %else
+; RV64-NEXT:    ret
+  %res = call <1 x double> @llvm.masked.expandload.v1f64(ptr align 8 %base, <1 x i1> %mask, <1 x double> %src0)
+  ret <1 x double>%res
+}
+
+declare <2 x double> @llvm.masked.expandload.v2f64(ptr, <2 x i1>, <2 x double>)
+define <2 x double> @expandload_v2f64(ptr %base, <2 x double> %src0, <2 x i1> %mask) {
+; RV32-LABEL: expandload_v2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB9_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB9_4
+; RV32-NEXT:  .LBB9_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB9_3: # %cond.load
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e64, m8, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB9_2
+; RV32-NEXT:  .LBB9_4: # %cond.load1
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB9_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB9_4
+; RV64-NEXT:  .LBB9_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB9_3: # %cond.load
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e64, m8, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB9_2
+; RV64-NEXT:  .LBB9_4: # %cond.load1
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    ret
+  %res = call <2 x double> @llvm.masked.expandload.v2f64(ptr align 8 %base, <2 x i1> %mask, <2 x double> %src0)
+  ret <2 x double>%res
+}
+
+declare <4 x double> @llvm.masked.expandload.v4f64(ptr, <4 x i1>, <4 x double>)
+define <4 x double> @expandload_v4f64(ptr %base, <4 x double> %src0, <4 x i1> %mask) {
+; RV32-LABEL: expandload_v4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB10_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB10_6
+; RV32-NEXT:  .LBB10_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB10_7
+; RV32-NEXT:  .LBB10_3: # %else6
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB10_8
+; RV32-NEXT:  .LBB10_4: # %else10
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB10_5: # %cond.load
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB10_2
+; RV32-NEXT:  .LBB10_6: # %cond.load1
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 1
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB10_3
+; RV32-NEXT:  .LBB10_7: # %cond.load5
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 2
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB10_4
+; RV32-NEXT:  .LBB10_8: # %cond.load9
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 3
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB10_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB10_6
+; RV64-NEXT:  .LBB10_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB10_7
+; RV64-NEXT:  .LBB10_3: # %else6
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB10_8
+; RV64-NEXT:  .LBB10_4: # %else10
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB10_5: # %cond.load
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB10_2
+; RV64-NEXT:  .LBB10_6: # %cond.load1
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 1
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB10_3
+; RV64-NEXT:  .LBB10_7: # %cond.load5
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB10_4
+; RV64-NEXT:  .LBB10_8: # %cond.load9
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 3
+; RV64-NEXT:    ret
+  %res = call <4 x double> @llvm.masked.expandload.v4f64(ptr align 8 %base, <4 x i1> %mask, <4 x double> %src0)
+  ret <4 x double>%res
+}
+
+declare <8 x double> @llvm.masked.expandload.v8f64(ptr, <8 x i1>, <8 x double>)
+define <8 x double> @expandload_v8f64(ptr %base, <8 x double> %src0, <8 x i1> %mask) {
+; RV32-LABEL: expandload_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB11_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB11_10
+; RV32-NEXT:  .LBB11_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB11_11
+; RV32-NEXT:  .LBB11_3: # %else6
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB11_12
+; RV32-NEXT:  .LBB11_4: # %else10
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB11_13
+; RV32-NEXT:  .LBB11_5: # %else14
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB11_14
+; RV32-NEXT:  .LBB11_6: # %else18
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB11_15
+; RV32-NEXT:  .LBB11_7: # %else22
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB11_16
+; RV32-NEXT:  .LBB11_8: # %else26
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB11_9: # %cond.load
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 8, e64, m8, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB11_2
+; RV32-NEXT:  .LBB11_10: # %cond.load1
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 1
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB11_3
+; RV32-NEXT:  .LBB11_11: # %cond.load5
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 2
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB11_4
+; RV32-NEXT:  .LBB11_12: # %cond.load9
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 3
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB11_5
+; RV32-NEXT:  .LBB11_13: # %cond.load13
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 4
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB11_6
+; RV32-NEXT:  .LBB11_14: # %cond.load17
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 5
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB11_7
+; RV32-NEXT:  .LBB11_15: # %cond.load21
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 6
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB11_8
+; RV32-NEXT:  .LBB11_16: # %cond.load25
+; RV32-NEXT:    fld fa5, 0(a0)
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 7
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB11_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB11_10
+; RV64-NEXT:  .LBB11_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB11_11
+; RV64-NEXT:  .LBB11_3: # %else6
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB11_12
+; RV64-NEXT:  .LBB11_4: # %else10
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB11_13
+; RV64-NEXT:  .LBB11_5: # %else14
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB11_14
+; RV64-NEXT:  .LBB11_6: # %else18
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB11_15
+; RV64-NEXT:  .LBB11_7: # %else22
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB11_16
+; RV64-NEXT:  .LBB11_8: # %else26
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB11_9: # %cond.load
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 8, e64, m8, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB11_2
+; RV64-NEXT:  .LBB11_10: # %cond.load1
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 1
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB11_3
+; RV64-NEXT:  .LBB11_11: # %cond.load5
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB11_4
+; RV64-NEXT:  .LBB11_12: # %cond.load9
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 3
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB11_5
+; RV64-NEXT:  .LBB11_13: # %cond.load13
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 4
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB11_6
+; RV64-NEXT:  .LBB11_14: # %cond.load17
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 5
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB11_7
+; RV64-NEXT:  .LBB11_15: # %cond.load21
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 6
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB11_8
+; RV64-NEXT:  .LBB11_16: # %cond.load25
+; RV64-NEXT:    fld fa5, 0(a0)
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 7
+; RV64-NEXT:    ret
+  %res = call <8 x double> @llvm.masked.expandload.v8f64(ptr align 8 %base, <8 x i1> %mask, <8 x double> %src0)
+  ret <8 x double>%res
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
new file mode 100644
index 00000000000000..d6aca55fbde59d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
@@ -0,0 +1,1000 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <1 x i8> @llvm.masked.expandload.v1i8(ptr, <1 x i1>, <1 x i8>)
+define <1 x i8> @expandload_v1i8(ptr %base, <1 x i8> %src0, <1 x i1> %mask) {
+; CHECK-LABEL: expandload_v1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %cond.load
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:  .LBB0_2: # %else
+; CHECK-NEXT:    ret
+  %res = call <1 x i8> @llvm.masked.expandload.v1i8(ptr %base, <1 x i1> %mask, <1 x i8> %src0)
+  ret <1 x i8>%res
+}
+
+declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>)
+define <2 x i8> @expandload_v2i8(ptr %base, <2 x i8> %src0, <2 x i1> %mask) {
+; CHECK-LABEL: expandload_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB1_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB1_4
+; CHECK-NEXT:  .LBB1_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_3: # %cond.load
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e8, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB1_2
+; CHECK-NEXT:  .LBB1_4: # %cond.load1
+; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    ret
+  %res = call <2 x i8> @llvm.masked.expandload.v2i8(ptr %base, <2 x i1> %mask, <2 x i8> %src0)
+  ret <2 x i8>%res
+}
+
+declare <4 x i8> @llvm.masked.expandload.v4i8(ptr, <4 x i1>, <4 x i8>)
+define <4 x i8> @expandload_v4i8(ptr %base, <4 x i8> %src0, <4 x i1> %mask) {
+; CHECK-LABEL: expandload_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB2_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB2_6
+; CHECK-NEXT:  .LBB2_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB2_7
+; CHECK-NEXT:  .LBB2_3: # %else6
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB2_8
+; CHECK-NEXT:  .LBB2_4: # %else10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_5: # %cond.load
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB2_2
+; CHECK-NEXT:  .LBB2_6: # %cond.load1
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB2_3
+; CHECK-NEXT:  .LBB2_7: # %cond.load5
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB2_4
+; CHECK-NEXT:  .LBB2_8: # %cond.load9
+; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    ret
+  %res = call <4 x i8> @llvm.masked.expandload.v4i8(ptr %base, <4 x i1> %mask, <4 x i8> %src0)
+  ret <4 x i8>%res
+}
+
+declare <8 x i8> @llvm.masked.expandload.v8i8(ptr, <8 x i1>, <8 x i8>)
+define <8 x i8> @expandload_v8i8(ptr %base, <8 x i8> %src0, <8 x i1> %mask) {
+; CHECK-LABEL: expandload_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB3_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB3_10
+; CHECK-NEXT:  .LBB3_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB3_11
+; CHECK-NEXT:  .LBB3_3: # %else6
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB3_12
+; CHECK-NEXT:  .LBB3_4: # %else10
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB3_13
+; CHECK-NEXT:  .LBB3_5: # %else14
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB3_14
+; CHECK-NEXT:  .LBB3_6: # %else18
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB3_15
+; CHECK-NEXT:  .LBB3_7: # %else22
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB3_16
+; CHECK-NEXT:  .LBB3_8: # %else26
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_9: # %cond.load
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB3_2
+; CHECK-NEXT:  .LBB3_10: # %cond.load1
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB3_3
+; CHECK-NEXT:  .LBB3_11: # %cond.load5
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB3_4
+; CHECK-NEXT:  .LBB3_12: # %cond.load9
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB3_5
+; CHECK-NEXT:  .LBB3_13: # %cond.load13
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 5, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 4
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB3_6
+; CHECK-NEXT:  .LBB3_14: # %cond.load17
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 6, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 5
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB3_7
+; CHECK-NEXT:  .LBB3_15: # %cond.load21
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 7, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 6
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB3_8
+; CHECK-NEXT:  .LBB3_16: # %cond.load25
+; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 7
+; CHECK-NEXT:    ret
+  %res = call <8 x i8> @llvm.masked.expandload.v8i8(ptr %base, <8 x i1> %mask, <8 x i8> %src0)
+  ret <8 x i8>%res
+}
+
+declare <1 x i16> @llvm.masked.expandload.v1i16(ptr, <1 x i1>, <1 x i16>)
+define <1 x i16> @expandload_v1i16(ptr %base, <1 x i16> %src0, <1 x i1> %mask) {
+; CHECK-LABEL: expandload_v1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB4_2
+; CHECK-NEXT:  # %bb.1: # %cond.load
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:  .LBB4_2: # %else
+; CHECK-NEXT:    ret
+  %res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr align 2 %base, <1 x i1> %mask, <1 x i16> %src0)
+  ret <1 x i16>%res
+}
+
+declare <2 x i16> @llvm.masked.expandload.v2i16(ptr, <2 x i1>, <2 x i16>)
+define <2 x i16> @expandload_v2i16(ptr %base, <2 x i16> %src0, <2 x i1> %mask) {
+; CHECK-LABEL: expandload_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB5_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB5_4
+; CHECK-NEXT:  .LBB5_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB5_3: # %cond.load
+; CHECK-NEXT:    lh a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e16, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB5_2
+; CHECK-NEXT:  .LBB5_4: # %cond.load1
+; CHECK-NEXT:    lh a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    ret
+  %res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr align 2 %base, <2 x i1> %mask, <2 x i16> %src0)
+  ret <2 x i16>%res
+}
+
+declare <4 x i16> @llvm.masked.expandload.v4i16(ptr, <4 x i1>, <4 x i16>)
+define <4 x i16> @expandload_v4i16(ptr %base, <4 x i16> %src0, <4 x i1> %mask) {
+; CHECK-LABEL: expandload_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB6_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB6_6
+; CHECK-NEXT:  .LBB6_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB6_7
+; CHECK-NEXT:  .LBB6_3: # %else6
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB6_8
+; CHECK-NEXT:  .LBB6_4: # %else10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB6_5: # %cond.load
+; CHECK-NEXT:    lh a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB6_2
+; CHECK-NEXT:  .LBB6_6: # %cond.load1
+; CHECK-NEXT:    lh a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB6_3
+; CHECK-NEXT:  .LBB6_7: # %cond.load5
+; CHECK-NEXT:    lh a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB6_4
+; CHECK-NEXT:  .LBB6_8: # %cond.load9
+; CHECK-NEXT:    lh a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    ret
+  %res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr align 2 %base, <4 x i1> %mask, <4 x i16> %src0)
+  ret <4 x i16>%res
+}
+
+declare <8 x i16> @llvm.masked.expandload.v8i16(ptr, <8 x i1>, <8 x i16>)
+define <8 x i16> @expandload_v8i16(ptr %base, <8 x i16> %src0, <8 x i1> %mask) {
+; CHECK-LABEL: expandload_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB7_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB7_10
+; CHECK-NEXT:  .LBB7_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB7_11
+; CHECK-NEXT:  .LBB7_3: # %else6
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB7_12
+; CHECK-NEXT:  .LBB7_4: # %else10
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB7_13
+; CHECK-NEXT:  .LBB7_5: # %else14
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB7_14
+; CHECK-NEXT:  .LBB7_6: # %else18
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB7_15
+; CHECK-NEXT:  .LBB7_7: # %else22
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB7_16
+; CHECK-NEXT:  .LBB7_8: # %else26
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB7_9: # %cond.load
+; CHECK-NEXT:    lh a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB7_2
+; CHECK-NEXT:  .LBB7_10: # %cond.load1
+; CHECK-NEXT:    lh a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB7_3
+; CHECK-NEXT:  .LBB7_11: # %cond.load5
+; CHECK-NEXT:    lh a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB7_4
+; CHECK-NEXT:  .LBB7_12: # %cond.load9
+; CHECK-NEXT:    lh a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB7_5
+; CHECK-NEXT:  .LBB7_13: # %cond.load13
+; CHECK-NEXT:    lh a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 4
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB7_6
+; CHECK-NEXT:  .LBB7_14: # %cond.load17
+; CHECK-NEXT:    lh a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 5
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB7_7
+; CHECK-NEXT:  .LBB7_15: # %cond.load21
+; CHECK-NEXT:    lh a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 6
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB7_8
+; CHECK-NEXT:  .LBB7_16: # %cond.load25
+; CHECK-NEXT:    lh a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 7
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr align 2 %base, <8 x i1> %mask, <8 x i16> %src0)
+  ret <8 x i16>%res
+}
+
+declare <1 x i32> @llvm.masked.expandload.v1i32(ptr, <1 x i1>, <1 x i32>)
+define <1 x i32> @expandload_v1i32(ptr %base, <1 x i32> %src0, <1 x i1> %mask) {
+; CHECK-LABEL: expandload_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB8_2
+; CHECK-NEXT:  # %bb.1: # %cond.load
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:  .LBB8_2: # %else
+; CHECK-NEXT:    ret
+  %res = call <1 x i32> @llvm.masked.expandload.v1i32(ptr align 4 %base, <1 x i1> %mask, <1 x i32> %src0)
+  ret <1 x i32>%res
+}
+
+declare <2 x i32> @llvm.masked.expandload.v2i32(ptr, <2 x i1>, <2 x i32>)
+define <2 x i32> @expandload_v2i32(ptr %base, <2 x i32> %src0, <2 x i1> %mask) {
+; CHECK-LABEL: expandload_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB9_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB9_4
+; CHECK-NEXT:  .LBB9_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB9_3: # %cond.load
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB9_2
+; CHECK-NEXT:  .LBB9_4: # %cond.load1
+; CHECK-NEXT:    lw a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    ret
+  %res = call <2 x i32> @llvm.masked.expandload.v2i32(ptr align 4 %base, <2 x i1> %mask, <2 x i32> %src0)
+  ret <2 x i32>%res
+}
+
+declare <4 x i32> @llvm.masked.expandload.v4i32(ptr, <4 x i1>, <4 x i32>)
+define <4 x i32> @expandload_v4i32(ptr %base, <4 x i32> %src0, <4 x i1> %mask) {
+; CHECK-LABEL: expandload_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB10_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB10_6
+; CHECK-NEXT:  .LBB10_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB10_7
+; CHECK-NEXT:  .LBB10_3: # %else6
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB10_8
+; CHECK-NEXT:  .LBB10_4: # %else10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB10_5: # %cond.load
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB10_2
+; CHECK-NEXT:  .LBB10_6: # %cond.load1
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB10_3
+; CHECK-NEXT:  .LBB10_7: # %cond.load5
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB10_4
+; CHECK-NEXT:  .LBB10_8: # %cond.load9
+; CHECK-NEXT:    lw a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.masked.expandload.v4i32(ptr align 4 %base, <4 x i1> %mask, <4 x i32> %src0)
+  ret <4 x i32>%res
+}
+
+declare <8 x i32> @llvm.masked.expandload.v8i32(ptr, <8 x i1>, <8 x i32>)
+define <8 x i32> @expandload_v8i32(ptr %base, <8 x i32> %src0, <8 x i1> %mask) {
+; CHECK-LABEL: expandload_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB11_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB11_10
+; CHECK-NEXT:  .LBB11_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB11_11
+; CHECK-NEXT:  .LBB11_3: # %else6
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB11_12
+; CHECK-NEXT:  .LBB11_4: # %else10
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB11_13
+; CHECK-NEXT:  .LBB11_5: # %else14
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB11_14
+; CHECK-NEXT:  .LBB11_6: # %else18
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB11_15
+; CHECK-NEXT:  .LBB11_7: # %else22
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB11_16
+; CHECK-NEXT:  .LBB11_8: # %else26
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB11_9: # %cond.load
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB11_2
+; CHECK-NEXT:  .LBB11_10: # %cond.load1
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 1
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB11_3
+; CHECK-NEXT:  .LBB11_11: # %cond.load5
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 2
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB11_4
+; CHECK-NEXT:  .LBB11_12: # %cond.load9
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 3
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB11_5
+; CHECK-NEXT:  .LBB11_13: # %cond.load13
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 4
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB11_6
+; CHECK-NEXT:  .LBB11_14: # %cond.load17
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 5
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB11_7
+; CHECK-NEXT:  .LBB11_15: # %cond.load21
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 6
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB11_8
+; CHECK-NEXT:  .LBB11_16: # %cond.load25
+; CHECK-NEXT:    lw a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vmv.s.x v10, a0
+; CHECK-NEXT:    vslideup.vi v8, v10, 7
+; CHECK-NEXT:    ret
+  %res = call <8 x i32> @llvm.masked.expandload.v8i32(ptr align 4 %base, <8 x i1> %mask, <8 x i32> %src0)
+  ret <8 x i32>%res
+}
+
+declare <1 x i64> @llvm.masked.expandload.v1i64(ptr, <1 x i1>, <1 x i64>)
+define <1 x i64> @expandload_v1i64(ptr %base, <1 x i64> %src0, <1 x i1> %mask) {
+; RV32-LABEL: expandload_v1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB12_2
+; RV32-NEXT:  # %bb.1: # %cond.load
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    lw a1, 4(a0)
+; RV32-NEXT:    lw a0, 0(a0)
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v8, (a0), zero
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:  .LBB12_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB12_2
+; RV64-NEXT:  # %bb.1: # %cond.load
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:  .LBB12_2: # %else
+; RV64-NEXT:    ret
+  %res = call <1 x i64> @llvm.masked.expandload.v1i64(ptr align 8 %base, <1 x i1> %mask, <1 x i64> %src0)
+  ret <1 x i64>%res
+}
+
+declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>)
+define <2 x i64> @expandload_v2i64(ptr %base, <2 x i64> %src0, <2 x i1> %mask) {
+; RV32-LABEL: expandload_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB13_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB13_4
+; RV32-NEXT:  .LBB13_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB13_3: # %cond.load
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a2
+; RV32-NEXT:    vslide1down.vx v8, v8, a3
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB13_2
+; RV32-NEXT:  .LBB13_4: # %cond.load1
+; RV32-NEXT:    lw a1, 0(a0)
+; RV32-NEXT:    lw a0, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v9, v8, a1
+; RV32-NEXT:    vslide1down.vx v9, v9, a0
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB13_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB13_4
+; RV64-NEXT:  .LBB13_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB13_3: # %cond.load
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e64, m8, tu, ma
+; RV64-NEXT:    vmv.s.x v8, a2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB13_2
+; RV64-NEXT:  .LBB13_4: # %cond.load1
+; RV64-NEXT:    ld a0, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vmv.s.x v9, a0
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    ret
+  %res = call <2 x i64> @llvm.masked.expandload.v2i64(ptr align 8 %base, <2 x i1> %mask, <2 x i64> %src0)
+  ret <2 x i64>%res
+}
+
+declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>)
+define <4 x i64> @expandload_v4i64(ptr %base, <4 x i64> %src0, <4 x i1> %mask) {
+; RV32-LABEL: expandload_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB14_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB14_6
+; RV32-NEXT:  .LBB14_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB14_7
+; RV32-NEXT:  .LBB14_3: # %else6
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB14_8
+; RV32-NEXT:  .LBB14_4: # %else10
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB14_5: # %cond.load
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a2
+; RV32-NEXT:    vslide1down.vx v8, v8, a3
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB14_2
+; RV32-NEXT:  .LBB14_6: # %cond.load1
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v10, v8, a2
+; RV32-NEXT:    vslide1down.vx v10, v10, a3
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v10, 1
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB14_3
+; RV32-NEXT:  .LBB14_7: # %cond.load5
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v10, v8, a2
+; RV32-NEXT:    vslide1down.vx v10, v10, a3
+; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v10, 2
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB14_4
+; RV32-NEXT:  .LBB14_8: # %cond.load9
+; RV32-NEXT:    lw a1, 0(a0)
+; RV32-NEXT:    lw a0, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v10, v8, a1
+; RV32-NEXT:    vslide1down.vx v10, v10, a0
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT:    vslideup.vi v8, v10, 3
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB14_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB14_6
+; RV64-NEXT:  .LBB14_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB14_7
+; RV64-NEXT:  .LBB14_3: # %else6
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB14_8
+; RV64-NEXT:  .LBB14_4: # %else10
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB14_5: # %cond.load
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
+; RV64-NEXT:    vmv.s.x v8, a2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB14_2
+; RV64-NEXT:  .LBB14_6: # %cond.load1
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV64-NEXT:    vmv.s.x v10, a2
+; RV64-NEXT:    vslideup.vi v8, v10, 1
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB14_3
+; RV64-NEXT:  .LBB14_7: # %cond.load5
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV64-NEXT:    vmv.s.x v10, a2
+; RV64-NEXT:    vslideup.vi v8, v10, 2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB14_4
+; RV64-NEXT:  .LBB14_8: # %cond.load9
+; RV64-NEXT:    ld a0, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vmv.s.x v10, a0
+; RV64-NEXT:    vslideup.vi v8, v10, 3
+; RV64-NEXT:    ret
+  %res = call <4 x i64> @llvm.masked.expandload.v4i64(ptr align 8 %base, <4 x i1> %mask, <4 x i64> %src0)
+  ret <4 x i64>%res
+}
+
+declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>)
+define <8 x i64> @expandload_v8i64(ptr %base, <8 x i64> %src0, <8 x i1> %mask) {
+; RV32-LABEL: expandload_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB15_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB15_10
+; RV32-NEXT:  .LBB15_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB15_11
+; RV32-NEXT:  .LBB15_3: # %else6
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB15_12
+; RV32-NEXT:  .LBB15_4: # %else10
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB15_13
+; RV32-NEXT:  .LBB15_5: # %else14
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB15_14
+; RV32-NEXT:  .LBB15_6: # %else18
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB15_15
+; RV32-NEXT:  .LBB15_7: # %else22
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB15_16
+; RV32-NEXT:  .LBB15_8: # %else26
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB15_9: # %cond.load
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a2
+; RV32-NEXT:    vslide1down.vx v8, v8, a3
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB15_2
+; RV32-NEXT:  .LBB15_10: # %cond.load1
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 1
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB15_3
+; RV32-NEXT:  .LBB15_11: # %cond.load5
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
+; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 2
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB15_4
+; RV32-NEXT:  .LBB15_12: # %cond.load9
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 3
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB15_5
+; RV32-NEXT:  .LBB15_13: # %cond.load13
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
+; RV32-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 4
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB15_6
+; RV32-NEXT:  .LBB15_14: # %cond.load17
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
+; RV32-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 5
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB15_7
+; RV32-NEXT:  .LBB15_15: # %cond.load21
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
+; RV32-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 6
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB15_8
+; RV32-NEXT:  .LBB15_16: # %cond.load25
+; RV32-NEXT:    lw a1, 0(a0)
+; RV32-NEXT:    lw a0, 4(a0)
+; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a1
+; RV32-NEXT:    vslide1down.vx v12, v12, a0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 7
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB15_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB15_10
+; RV64-NEXT:  .LBB15_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB15_11
+; RV64-NEXT:  .LBB15_3: # %else6
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB15_12
+; RV64-NEXT:  .LBB15_4: # %else10
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB15_13
+; RV64-NEXT:  .LBB15_5: # %else14
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB15_14
+; RV64-NEXT:  .LBB15_6: # %else18
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB15_15
+; RV64-NEXT:  .LBB15_7: # %else22
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB15_16
+; RV64-NEXT:  .LBB15_8: # %else26
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB15_9: # %cond.load
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    vsetivli zero, 8, e64, m8, tu, ma
+; RV64-NEXT:    vmv.s.x v8, a2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB15_2
+; RV64-NEXT:  .LBB15_10: # %cond.load1
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 1
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB15_3
+; RV64-NEXT:  .LBB15_11: # %cond.load5
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB15_4
+; RV64-NEXT:  .LBB15_12: # %cond.load9
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 3
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB15_5
+; RV64-NEXT:  .LBB15_13: # %cond.load13
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 4
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB15_6
+; RV64-NEXT:  .LBB15_14: # %cond.load17
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 5
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB15_7
+; RV64-NEXT:  .LBB15_15: # %cond.load21
+; RV64-NEXT:    ld a2, 0(a0)
+; RV64-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 6
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB15_8
+; RV64-NEXT:  .LBB15_16: # %cond.load25
+; RV64-NEXT:    ld a0, 0(a0)
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vmv.s.x v12, a0
+; RV64-NEXT:    vslideup.vi v8, v12, 7
+; RV64-NEXT:    ret
+  %res = call <8 x i64> @llvm.masked.expandload.v8i64(ptr align 8 %base, <8 x i1> %mask, <8 x i64> %src0)
+  ret <8 x i64>%res
+}


        


More information about the llvm-commits mailing list