[llvm] [ScalarizeMaskedMemIntrin] Use pointer alignment from pointer of masked.compressstore/expandload. (PR #83519)

Yeting Kuo via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 29 23:20:36 PST 2024


https://github.com/yetingk updated https://github.com/llvm/llvm-project/pull/83519

>From dff4e1500703f92af3e78820659bc0ae121c7983 Mon Sep 17 00:00:00 2001
From: Yeting Kuo <yeting.kuo at sifive.com>
Date: Fri, 1 Mar 2024 10:08:58 +0800
Subject: [PATCH 1/3] Precommit tests

---
 .../rvv/fixed-vectors-compressstore-fp.ll     | 2016 +++++++++++++++
 .../rvv/fixed-vectors-compressstore-int.ll    | 1529 +++++++++++
 .../RISCV/rvv/fixed-vectors-expandload-fp.ll  | 2298 +++++++++++++++++
 .../RISCV/rvv/fixed-vectors-expandload-int.ll | 2078 +++++++++++++++
 4 files changed, 7921 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll

diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
new file mode 100644
index 00000000000000..f32352b30c5ec6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
@@ -0,0 +1,2016 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
+
+declare void @llvm.masked.compressstore.v1f16(<1 x half>, ptr, <1 x i1>)
+define void @compressstore_v1f16(ptr align 2 %base, <1 x half> %v, <1 x i1> %mask) {
+; RV32-LABEL: compressstore_v1f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB0_2
+; RV32-NEXT:  # %bb.1: # %cond.store
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    addi a1, sp, 12
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vse16.v v8, (a1)
+; RV32-NEXT:    lh a1, 12(sp)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:  .LBB0_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v1f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB0_2
+; RV64-NEXT:  # %bb.1: # %cond.store
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    addi a1, sp, 8
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    vse16.v v8, (a1)
+; RV64-NEXT:    lh a1, 8(sp)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:  .LBB0_2: # %else
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v1f16(<1 x half> %v, ptr %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2f16(<2 x half>, ptr, <2 x i1>)
+define void @compressstore_v2f16(ptr align 2 %base, <2 x half> %v, <2 x i1> %mask) {
+; RV32-LABEL: compressstore_v2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB1_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB1_4
+; RV32-NEXT:  .LBB1_2: # %else2
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB1_3: # %cond.store
+; RV32-NEXT:    addi a2, sp, 12
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vse16.v v8, (a2)
+; RV32-NEXT:    lh a2, 12(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB1_2
+; RV32-NEXT:  .LBB1_4: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vse16.v v8, (a1)
+; RV32-NEXT:    lh a1, 8(sp)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v2f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB1_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB1_4
+; RV64-NEXT:  .LBB1_2: # %else2
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB1_3: # %cond.store
+; RV64-NEXT:    addi a2, sp, 8
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    vse16.v v8, (a2)
+; RV64-NEXT:    lh a2, 8(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB1_2
+; RV64-NEXT:  .LBB1_4: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    mv a1, sp
+; RV64-NEXT:    vse16.v v8, (a1)
+; RV64-NEXT:    lh a1, 0(sp)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v2f16(<2 x half> %v, ptr %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4f16(<4 x half>, ptr, <4 x i1>)
+define void @compressstore_v4f16(ptr align 2 %base, <4 x half> %v, <4 x i1> %mask) {
+; RV32-LABEL: compressstore_v4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB2_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB2_6
+; RV32-NEXT:  .LBB2_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB2_7
+; RV32-NEXT:  .LBB2_3: # %else5
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB2_8
+; RV32-NEXT:  .LBB2_4: # %else8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB2_5: # %cond.store
+; RV32-NEXT:    addi a2, sp, 12
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vse16.v v8, (a2)
+; RV32-NEXT:    lh a2, 12(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB2_2
+; RV32-NEXT:  .LBB2_6: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    addi a2, sp, 8
+; RV32-NEXT:    vse16.v v9, (a2)
+; RV32-NEXT:    lh a2, 8(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB2_3
+; RV32-NEXT:  .LBB2_7: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 2
+; RV32-NEXT:    addi a2, sp, 4
+; RV32-NEXT:    vse16.v v9, (a2)
+; RV32-NEXT:    lh a2, 4(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB2_4
+; RV32-NEXT:  .LBB2_8: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    mv a1, sp
+; RV32-NEXT:    vse16.v v8, (a1)
+; RV32-NEXT:    lh a1, 0(sp)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -32
+; RV64-NEXT:    .cfi_def_cfa_offset 32
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB2_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB2_6
+; RV64-NEXT:  .LBB2_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB2_7
+; RV64-NEXT:  .LBB2_3: # %else5
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB2_8
+; RV64-NEXT:  .LBB2_4: # %else8
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB2_5: # %cond.store
+; RV64-NEXT:    addi a2, sp, 24
+; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT:    vse16.v v8, (a2)
+; RV64-NEXT:    lh a2, 24(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB2_2
+; RV64-NEXT:  .LBB2_6: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vse16.v v9, (a2)
+; RV64-NEXT:    lh a2, 16(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB2_3
+; RV64-NEXT:  .LBB2_7: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 2
+; RV64-NEXT:    addi a2, sp, 8
+; RV64-NEXT:    vse16.v v9, (a2)
+; RV64-NEXT:    lh a2, 8(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB2_4
+; RV64-NEXT:  .LBB2_8: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-NEXT:    mv a1, sp
+; RV64-NEXT:    vse16.v v8, (a1)
+; RV64-NEXT:    lh a1, 0(sp)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v4f16(<4 x half> %v, ptr %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8f16(<8 x half>, ptr, <8 x i1>)
+define void @compressstore_v8f16(ptr align 2 %base, <8 x half> %v, <8 x i1> %mask) {
+; RV32-LABEL: compressstore_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB3_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB3_10
+; RV32-NEXT:  .LBB3_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB3_11
+; RV32-NEXT:  .LBB3_3: # %else5
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB3_12
+; RV32-NEXT:  .LBB3_4: # %else8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB3_13
+; RV32-NEXT:  .LBB3_5: # %else11
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB3_14
+; RV32-NEXT:  .LBB3_6: # %else14
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB3_15
+; RV32-NEXT:  .LBB3_7: # %else17
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB3_16
+; RV32-NEXT:  .LBB3_8: # %else20
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB3_9: # %cond.store
+; RV32-NEXT:    addi a2, sp, 28
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vse16.v v8, (a2)
+; RV32-NEXT:    lh a2, 28(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB3_2
+; RV32-NEXT:  .LBB3_10: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    addi a2, sp, 24
+; RV32-NEXT:    vse16.v v9, (a2)
+; RV32-NEXT:    lh a2, 24(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB3_3
+; RV32-NEXT:  .LBB3_11: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 2
+; RV32-NEXT:    addi a2, sp, 20
+; RV32-NEXT:    vse16.v v9, (a2)
+; RV32-NEXT:    lh a2, 20(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB3_4
+; RV32-NEXT:  .LBB3_12: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 3
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vse16.v v9, (a2)
+; RV32-NEXT:    lh a2, 16(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB3_5
+; RV32-NEXT:  .LBB3_13: # %cond.store10
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 4
+; RV32-NEXT:    addi a2, sp, 12
+; RV32-NEXT:    vse16.v v9, (a2)
+; RV32-NEXT:    lh a2, 12(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB3_6
+; RV32-NEXT:  .LBB3_14: # %cond.store13
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 5
+; RV32-NEXT:    addi a2, sp, 8
+; RV32-NEXT:    vse16.v v9, (a2)
+; RV32-NEXT:    lh a2, 8(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB3_7
+; RV32-NEXT:  .LBB3_15: # %cond.store16
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 6
+; RV32-NEXT:    addi a2, sp, 4
+; RV32-NEXT:    vse16.v v9, (a2)
+; RV32-NEXT:    lh a2, 4(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB3_8
+; RV32-NEXT:  .LBB3_16: # %cond.store19
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    mv a1, sp
+; RV32-NEXT:    vse16.v v8, (a1)
+; RV32-NEXT:    lh a1, 0(sp)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -64
+; RV64-NEXT:    .cfi_def_cfa_offset 64
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB3_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB3_10
+; RV64-NEXT:  .LBB3_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB3_11
+; RV64-NEXT:  .LBB3_3: # %else5
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB3_12
+; RV64-NEXT:  .LBB3_4: # %else8
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB3_13
+; RV64-NEXT:  .LBB3_5: # %else11
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB3_14
+; RV64-NEXT:  .LBB3_6: # %else14
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB3_15
+; RV64-NEXT:  .LBB3_7: # %else17
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB3_16
+; RV64-NEXT:  .LBB3_8: # %else20
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB3_9: # %cond.store
+; RV64-NEXT:    addi a2, sp, 56
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vse16.v v8, (a2)
+; RV64-NEXT:    lh a2, 56(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB3_2
+; RV64-NEXT:  .LBB3_10: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    addi a2, sp, 48
+; RV64-NEXT:    vse16.v v9, (a2)
+; RV64-NEXT:    lh a2, 48(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB3_3
+; RV64-NEXT:  .LBB3_11: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 2
+; RV64-NEXT:    addi a2, sp, 40
+; RV64-NEXT:    vse16.v v9, (a2)
+; RV64-NEXT:    lh a2, 40(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB3_4
+; RV64-NEXT:  .LBB3_12: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 3
+; RV64-NEXT:    addi a2, sp, 32
+; RV64-NEXT:    vse16.v v9, (a2)
+; RV64-NEXT:    lh a2, 32(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB3_5
+; RV64-NEXT:  .LBB3_13: # %cond.store10
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 4
+; RV64-NEXT:    addi a2, sp, 24
+; RV64-NEXT:    vse16.v v9, (a2)
+; RV64-NEXT:    lh a2, 24(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB3_6
+; RV64-NEXT:  .LBB3_14: # %cond.store13
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 5
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vse16.v v9, (a2)
+; RV64-NEXT:    lh a2, 16(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB3_7
+; RV64-NEXT:  .LBB3_15: # %cond.store16
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 6
+; RV64-NEXT:    addi a2, sp, 8
+; RV64-NEXT:    vse16.v v9, (a2)
+; RV64-NEXT:    lh a2, 8(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB3_8
+; RV64-NEXT:  .LBB3_16: # %cond.store19
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 7
+; RV64-NEXT:    mv a1, sp
+; RV64-NEXT:    vse16.v v8, (a1)
+; RV64-NEXT:    lh a1, 0(sp)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v8f16(<8 x half> %v, ptr %base, <8 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v1f32(<1 x float>, ptr, <1 x i1>)
+define void @compressstore_v1f32(ptr align 4 %base, <1 x float> %v, <1 x i1> %mask) {
+; RV32-LABEL: compressstore_v1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB4_2
+; RV32-NEXT:  # %bb.1: # %cond.store
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fmv.x.w a1, fa5
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:  .LBB4_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v1f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB4_2
+; RV64-NEXT:  # %bb.1: # %cond.store
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    addi a1, sp, 8
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    vse32.v v8, (a1)
+; RV64-NEXT:    lw a1, 8(sp)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:  .LBB4_2: # %else
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v1f32(<1 x float> %v, ptr %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2f32(<2 x float>, ptr, <2 x i1>)
+define void @compressstore_v2f32(ptr align 4 %base, <2 x float> %v, <2 x i1> %mask) {
+; RV32-LABEL: compressstore_v2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB5_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB5_4
+; RV32-NEXT:  .LBB5_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB5_3: # %cond.store
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fmv.x.w a2, fa5
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB5_2
+; RV32-NEXT:  .LBB5_4: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fmv.x.w a1, fa5
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB5_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB5_4
+; RV64-NEXT:  .LBB5_2: # %else2
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB5_3: # %cond.store
+; RV64-NEXT:    addi a2, sp, 8
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    vse32.v v8, (a2)
+; RV64-NEXT:    lw a2, 8(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB5_2
+; RV64-NEXT:  .LBB5_4: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    mv a1, sp
+; RV64-NEXT:    vse32.v v8, (a1)
+; RV64-NEXT:    lw a1, 0(sp)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v2f32(<2 x float> %v, ptr %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4f32(<4 x float>, ptr, <4 x i1>)
+define void @compressstore_v4f32(ptr align 4 %base, <4 x float> %v, <4 x i1> %mask) {
+; RV32-LABEL: compressstore_v4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB6_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB6_6
+; RV32-NEXT:  .LBB6_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB6_7
+; RV32-NEXT:  .LBB6_3: # %else5
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB6_8
+; RV32-NEXT:  .LBB6_4: # %else8
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB6_5: # %cond.store
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fmv.x.w a2, fa5
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB6_2
+; RV32-NEXT:  .LBB6_6: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    fmv.x.w a2, fa5
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB6_3
+; RV32-NEXT:  .LBB6_7: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 2
+; RV32-NEXT:    vfmv.f.s fa5, v9
+; RV32-NEXT:    fmv.x.w a2, fa5
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB6_4
+; RV32-NEXT:  .LBB6_8: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fmv.x.w a1, fa5
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -32
+; RV64-NEXT:    .cfi_def_cfa_offset 32
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB6_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB6_6
+; RV64-NEXT:  .LBB6_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB6_7
+; RV64-NEXT:  .LBB6_3: # %else5
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB6_8
+; RV64-NEXT:  .LBB6_4: # %else8
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB6_5: # %cond.store
+; RV64-NEXT:    addi a2, sp, 24
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v8, (a2)
+; RV64-NEXT:    lw a2, 24(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB6_2
+; RV64-NEXT:  .LBB6_6: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 1
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vse32.v v9, (a2)
+; RV64-NEXT:    lw a2, 16(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB6_3
+; RV64-NEXT:  .LBB6_7: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 2
+; RV64-NEXT:    addi a2, sp, 8
+; RV64-NEXT:    vse32.v v9, (a2)
+; RV64-NEXT:    lw a2, 8(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB6_4
+; RV64-NEXT:  .LBB6_8: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-NEXT:    mv a1, sp
+; RV64-NEXT:    vse32.v v8, (a1)
+; RV64-NEXT:    lw a1, 0(sp)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v4f32(<4 x float> %v, ptr %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8f32(<8 x float>, ptr, <8 x i1>)
+define void @compressstore_v8f32(ptr align 4 %base, <8 x float> %v, <8 x i1> %mask) {
+; RV32-LABEL: compressstore_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB7_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB7_10
+; RV32-NEXT:  .LBB7_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB7_11
+; RV32-NEXT:  .LBB7_3: # %else5
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB7_12
+; RV32-NEXT:  .LBB7_4: # %else8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB7_13
+; RV32-NEXT:  .LBB7_5: # %else11
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB7_14
+; RV32-NEXT:  .LBB7_6: # %else14
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB7_15
+; RV32-NEXT:  .LBB7_7: # %else17
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB7_16
+; RV32-NEXT:  .LBB7_8: # %else20
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB7_9: # %cond.store
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fmv.x.w a2, fa5
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB7_2
+; RV32-NEXT:  .LBB7_10: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 1
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fmv.x.w a2, fa5
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB7_3
+; RV32-NEXT:  .LBB7_11: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fmv.x.w a2, fa5
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB7_4
+; RV32-NEXT:  .LBB7_12: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 3
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fmv.x.w a2, fa5
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB7_5
+; RV32-NEXT:  .LBB7_13: # %cond.store10
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 4
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fmv.x.w a2, fa5
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB7_6
+; RV32-NEXT:  .LBB7_14: # %cond.store13
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 5
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fmv.x.w a2, fa5
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB7_7
+; RV32-NEXT:  .LBB7_15: # %cond.store16
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 6
+; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fmv.x.w a2, fa5
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB7_8
+; RV32-NEXT:  .LBB7_16: # %cond.store19
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    vfmv.f.s fa5, v8
+; RV32-NEXT:    fmv.x.w a1, fa5
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -64
+; RV64-NEXT:    .cfi_def_cfa_offset 64
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB7_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB7_10
+; RV64-NEXT:  .LBB7_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB7_11
+; RV64-NEXT:  .LBB7_3: # %else5
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB7_12
+; RV64-NEXT:  .LBB7_4: # %else8
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB7_13
+; RV64-NEXT:  .LBB7_5: # %else11
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB7_14
+; RV64-NEXT:  .LBB7_6: # %else14
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB7_15
+; RV64-NEXT:  .LBB7_7: # %else17
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB7_16
+; RV64-NEXT:  .LBB7_8: # %else20
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB7_9: # %cond.store
+; RV64-NEXT:    addi a2, sp, 56
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v8, (a2)
+; RV64-NEXT:    lw a2, 56(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB7_2
+; RV64-NEXT:  .LBB7_10: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 1
+; RV64-NEXT:    addi a2, sp, 48
+; RV64-NEXT:    vse32.v v10, (a2)
+; RV64-NEXT:    lw a2, 48(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB7_3
+; RV64-NEXT:  .LBB7_11: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    addi a2, sp, 40
+; RV64-NEXT:    vse32.v v10, (a2)
+; RV64-NEXT:    lw a2, 40(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB7_4
+; RV64-NEXT:  .LBB7_12: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 3
+; RV64-NEXT:    addi a2, sp, 32
+; RV64-NEXT:    vse32.v v10, (a2)
+; RV64-NEXT:    lw a2, 32(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB7_5
+; RV64-NEXT:  .LBB7_13: # %cond.store10
+; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 4
+; RV64-NEXT:    addi a2, sp, 24
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v10, (a2)
+; RV64-NEXT:    lw a2, 24(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB7_6
+; RV64-NEXT:  .LBB7_14: # %cond.store13
+; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 5
+; RV64-NEXT:    addi a2, sp, 16
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v10, (a2)
+; RV64-NEXT:    lw a2, 16(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB7_7
+; RV64-NEXT:  .LBB7_15: # %cond.store16
+; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 6
+; RV64-NEXT:    addi a2, sp, 8
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v10, (a2)
+; RV64-NEXT:    lw a2, 8(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB7_8
+; RV64-NEXT:  .LBB7_16: # %cond.store19
+; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 7
+; RV64-NEXT:    mv a1, sp
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT:    vse32.v v8, (a1)
+; RV64-NEXT:    lw a1, 0(sp)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v8f32(<8 x float> %v, ptr %base, <8 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v1f64(<1 x double>, ptr, <1 x i1>)
+define void @compressstore_v1f64(ptr align 8 %base, <1 x double> %v, <1 x i1> %mask) {
+; RV32-LABEL: compressstore_v1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB8_2
+; RV32-NEXT:  # %bb.1: # %cond.store
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    addi a1, sp, 8
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v8, (a1)
+; RV32-NEXT:    lw a1, 12(sp)
+; RV32-NEXT:    sb a1, 4(a0)
+; RV32-NEXT:    lw a2, 8(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a1, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a1, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:    srli a1, a2, 24
+; RV32-NEXT:    sb a1, 3(a0)
+; RV32-NEXT:    srli a1, a2, 16
+; RV32-NEXT:    sb a1, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:  .LBB8_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v1f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB8_2
+; RV64-NEXT:  # %bb.1: # %cond.store
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fmv.x.d a1, fa5
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:  .LBB8_2: # %else
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v1f64(<1 x double> %v, ptr %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2f64(<2 x double>, ptr, <2 x i1>)
+define void @compressstore_v2f64(ptr align 8 %base, <2 x double> %v, <2 x i1> %mask) {
+; RV32-LABEL: compressstore_v2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB9_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB9_4
+; RV32-NEXT:  .LBB9_2: # %else2
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB9_3: # %cond.store
+; RV32-NEXT:    addi a2, sp, 8
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v8, (a2)
+; RV32-NEXT:    lw a2, 12(sp)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    lw a3, 8(sp)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    sb a4, 7(a0)
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB9_2
+; RV32-NEXT:  .LBB9_4: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    mv a1, sp
+; RV32-NEXT:    vse64.v v8, (a1)
+; RV32-NEXT:    lw a1, 4(sp)
+; RV32-NEXT:    sb a1, 4(a0)
+; RV32-NEXT:    lw a2, 0(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a1, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a1, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:    srli a1, a2, 24
+; RV32-NEXT:    sb a1, 3(a0)
+; RV32-NEXT:    srli a1, a2, 16
+; RV32-NEXT:    sb a1, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB9_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB9_4
+; RV64-NEXT:  .LBB9_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB9_3: # %cond.store
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fmv.x.d a2, fa5
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB9_2
+; RV64-NEXT:  .LBB9_4: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fmv.x.d a1, fa5
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v2f64(<2 x double> %v, ptr %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4f64(<4 x double>, ptr, <4 x i1>)
+define void @compressstore_v4f64(ptr align 8 %base, <4 x double> %v, <4 x i1> %mask) {
+; RV32-LABEL: compressstore_v4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB10_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB10_6
+; RV32-NEXT:  .LBB10_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB10_7
+; RV32-NEXT:  .LBB10_3: # %else5
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB10_8
+; RV32-NEXT:  .LBB10_4: # %else8
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB10_5: # %cond.store
+; RV32-NEXT:    addi a2, sp, 24
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v8, (a2)
+; RV32-NEXT:    lw a2, 28(sp)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    lw a3, 24(sp)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    sb a4, 7(a0)
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB10_2
+; RV32-NEXT:  .LBB10_6: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 1
+; RV32-NEXT:    addi a2, sp, 16
+; RV32-NEXT:    vse64.v v10, (a2)
+; RV32-NEXT:    lw a2, 20(sp)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    lw a3, 16(sp)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    sb a4, 7(a0)
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB10_3
+; RV32-NEXT:  .LBB10_7: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    addi a2, sp, 8
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v10, (a2)
+; RV32-NEXT:    lw a2, 12(sp)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    lw a3, 8(sp)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    sb a4, 7(a0)
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB10_4
+; RV32-NEXT:  .LBB10_8: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    mv a1, sp
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v8, (a1)
+; RV32-NEXT:    lw a1, 4(sp)
+; RV32-NEXT:    sb a1, 4(a0)
+; RV32-NEXT:    lw a2, 0(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a1, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a1, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:    srli a1, a2, 24
+; RV32-NEXT:    sb a1, 3(a0)
+; RV32-NEXT:    srli a1, a2, 16
+; RV32-NEXT:    sb a1, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB10_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB10_6
+; RV64-NEXT:  .LBB10_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB10_7
+; RV64-NEXT:  .LBB10_3: # %else5
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB10_8
+; RV64-NEXT:  .LBB10_4: # %else8
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB10_5: # %cond.store
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fmv.x.d a2, fa5
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB10_2
+; RV64-NEXT:  .LBB10_6: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 1
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    fmv.x.d a2, fa5
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB10_3
+; RV64-NEXT:  .LBB10_7: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    fmv.x.d a2, fa5
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB10_4
+; RV64-NEXT:  .LBB10_8: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fmv.x.d a1, fa5
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v4f64(<4 x double> %v, ptr %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8f64(<8 x double>, ptr, <8 x i1>)
+define void @compressstore_v8f64(ptr align 8 %base, <8 x double> %v, <8 x i1> %mask) {
+; RV32-LABEL: compressstore_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -512
+; RV32-NEXT:    .cfi_def_cfa_offset 512
+; RV32-NEXT:    sw ra, 508(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 504(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 512
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB11_10
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB11_11
+; RV32-NEXT:  .LBB11_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB11_12
+; RV32-NEXT:  .LBB11_3: # %else5
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB11_13
+; RV32-NEXT:  .LBB11_4: # %else8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB11_14
+; RV32-NEXT:  .LBB11_5: # %else11
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB11_15
+; RV32-NEXT:  .LBB11_6: # %else14
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB11_16
+; RV32-NEXT:  .LBB11_7: # %else17
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB11_9
+; RV32-NEXT:  .LBB11_8: # %cond.store19
+; RV32-NEXT:    mv a1, sp
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vse64.v v8, (a1)
+; RV32-NEXT:    fld fa5, 56(sp)
+; RV32-NEXT:    fsd fa5, 120(sp)
+; RV32-NEXT:    lw a1, 124(sp)
+; RV32-NEXT:    sb a1, 4(a0)
+; RV32-NEXT:    lw a2, 120(sp)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    srli a3, a1, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a1, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:    srli a1, a2, 24
+; RV32-NEXT:    sb a1, 3(a0)
+; RV32-NEXT:    srli a1, a2, 16
+; RV32-NEXT:    sb a1, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:  .LBB11_9: # %else20
+; RV32-NEXT:    addi sp, s0, -512
+; RV32-NEXT:    lw ra, 508(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 504(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 512
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB11_10: # %cond.store
+; RV32-NEXT:    addi a2, sp, 496
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v8, (a2)
+; RV32-NEXT:    lw a2, 500(sp)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    lw a3, 496(sp)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    sb a4, 7(a0)
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB11_2
+; RV32-NEXT:  .LBB11_11: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 1
+; RV32-NEXT:    addi a2, sp, 488
+; RV32-NEXT:    vse64.v v12, (a2)
+; RV32-NEXT:    lw a2, 492(sp)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    lw a3, 488(sp)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    sb a4, 7(a0)
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB11_3
+; RV32-NEXT:  .LBB11_12: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 2
+; RV32-NEXT:    addi a2, sp, 480
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v12, (a2)
+; RV32-NEXT:    lw a2, 484(sp)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    lw a3, 480(sp)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    sb a4, 7(a0)
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB11_4
+; RV32-NEXT:  .LBB11_13: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 3
+; RV32-NEXT:    addi a2, sp, 472
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v12, (a2)
+; RV32-NEXT:    lw a2, 476(sp)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    lw a3, 472(sp)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    sb a4, 7(a0)
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB11_5
+; RV32-NEXT:  .LBB11_14: # %cond.store10
+; RV32-NEXT:    addi a2, sp, 384
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vse64.v v8, (a2)
+; RV32-NEXT:    fld fa5, 416(sp)
+; RV32-NEXT:    fsd fa5, 464(sp)
+; RV32-NEXT:    lw a2, 468(sp)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    lw a3, 464(sp)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    sb a4, 7(a0)
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB11_6
+; RV32-NEXT:  .LBB11_15: # %cond.store13
+; RV32-NEXT:    addi a2, sp, 256
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vse64.v v8, (a2)
+; RV32-NEXT:    fld fa5, 296(sp)
+; RV32-NEXT:    fsd fa5, 376(sp)
+; RV32-NEXT:    lw a2, 380(sp)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    lw a3, 376(sp)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    sb a4, 7(a0)
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB11_7
+; RV32-NEXT:  .LBB11_16: # %cond.store16
+; RV32-NEXT:    addi a2, sp, 128
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vse64.v v8, (a2)
+; RV32-NEXT:    fld fa5, 176(sp)
+; RV32-NEXT:    fsd fa5, 248(sp)
+; RV32-NEXT:    lw a2, 252(sp)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    lw a3, 248(sp)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    sb a4, 7(a0)
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB11_8
+; RV32-NEXT:    j .LBB11_9
+;
+; RV64-LABEL: compressstore_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB11_11
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB11_12
+; RV64-NEXT:  .LBB11_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB11_13
+; RV64-NEXT:  .LBB11_3: # %else5
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB11_5
+; RV64-NEXT:  .LBB11_4: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 3
+; RV64-NEXT:    vfmv.f.s fa5, v12
+; RV64-NEXT:    fmv.x.d a2, fa5
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:  .LBB11_5: # %else8
+; RV64-NEXT:    addi sp, sp, -320
+; RV64-NEXT:    .cfi_def_cfa_offset 320
+; RV64-NEXT:    sd ra, 312(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 304(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    addi s0, sp, 320
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB11_14
+; RV64-NEXT:  # %bb.6: # %else11
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB11_15
+; RV64-NEXT:  .LBB11_7: # %else14
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB11_16
+; RV64-NEXT:  .LBB11_8: # %else17
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB11_10
+; RV64-NEXT:  .LBB11_9: # %cond.store19
+; RV64-NEXT:    mv a1, sp
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a1)
+; RV64-NEXT:    ld a1, 56(sp)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:  .LBB11_10: # %else20
+; RV64-NEXT:    addi sp, s0, -320
+; RV64-NEXT:    ld ra, 312(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 304(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 320
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB11_11: # %cond.store
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vfmv.f.s fa5, v8
+; RV64-NEXT:    fmv.x.d a2, fa5
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB11_2
+; RV64-NEXT:  .LBB11_12: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 1
+; RV64-NEXT:    vfmv.f.s fa5, v12
+; RV64-NEXT:    fmv.x.d a2, fa5
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB11_3
+; RV64-NEXT:  .LBB11_13: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 2
+; RV64-NEXT:    vfmv.f.s fa5, v12
+; RV64-NEXT:    fmv.x.d a2, fa5
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB11_4
+; RV64-NEXT:    j .LBB11_5
+; RV64-NEXT:  .LBB11_14: # %cond.store10
+; RV64-NEXT:    addi a2, sp, 192
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    ld a2, 224(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB11_7
+; RV64-NEXT:  .LBB11_15: # %cond.store13
+; RV64-NEXT:    addi a2, sp, 128
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    ld a2, 168(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB11_8
+; RV64-NEXT:  .LBB11_16: # %cond.store16
+; RV64-NEXT:    addi a2, sp, 64
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    ld a2, 112(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB11_9
+; RV64-NEXT:    j .LBB11_10
+  call void @llvm.masked.compressstore.v8f64(<8 x double> %v, ptr %base, <8 x i1> %mask)
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
new file mode 100644
index 00000000000000..b0387023294cf4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
@@ -0,0 +1,1529 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>)
+define void @compressstore_v1i8(ptr align 2 %base, <1 x i8> %v, <1 x i1> %mask) {
+; CHECK-LABEL: compressstore_v1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:  .LBB0_2: # %else
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v1i8(<1 x i8> %v, ptr %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
+define void @compressstore_v2i8(ptr align 2 %base, <2 x i8> %v, <2 x i1> %mask) {
+; CHECK-LABEL: compressstore_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB1_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB1_4
+; CHECK-NEXT:  .LBB1_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_3: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB1_2
+; CHECK-NEXT:  .LBB1_4: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v2i8(<2 x i8> %v, ptr %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>)
+define void @compressstore_v4i8(ptr align 2 %base, <4 x i8> %v, <4 x i1> %mask) {
+; CHECK-LABEL: compressstore_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB2_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB2_6
+; CHECK-NEXT:  .LBB2_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB2_7
+; CHECK-NEXT:  .LBB2_3: # %else5
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB2_8
+; CHECK-NEXT:  .LBB2_4: # %else8
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_5: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB2_2
+; CHECK-NEXT:  .LBB2_6: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB2_3
+; CHECK-NEXT:  .LBB2_7: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB2_4
+; CHECK-NEXT:  .LBB2_8: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v4i8(<4 x i8> %v, ptr %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>)
+define void @compressstore_v8i8(ptr align 2 %base, <8 x i8> %v, <8 x i1> %mask) {
+; CHECK-LABEL: compressstore_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB3_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB3_10
+; CHECK-NEXT:  .LBB3_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB3_11
+; CHECK-NEXT:  .LBB3_3: # %else5
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB3_12
+; CHECK-NEXT:  .LBB3_4: # %else8
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB3_13
+; CHECK-NEXT:  .LBB3_5: # %else11
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB3_14
+; CHECK-NEXT:  .LBB3_6: # %else14
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB3_15
+; CHECK-NEXT:  .LBB3_7: # %else17
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB3_16
+; CHECK-NEXT:  .LBB3_8: # %else20
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_9: # %cond.store
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB3_2
+; CHECK-NEXT:  .LBB3_10: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB3_3
+; CHECK-NEXT:  .LBB3_11: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB3_4
+; CHECK-NEXT:  .LBB3_12: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 3
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB3_5
+; CHECK-NEXT:  .LBB3_13: # %cond.store10
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 4
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB3_6
+; CHECK-NEXT:  .LBB3_14: # %cond.store13
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 5
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB3_7
+; CHECK-NEXT:  .LBB3_15: # %cond.store16
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 6
+; CHECK-NEXT:    vse8.v v9, (a0)
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB3_8
+; CHECK-NEXT:  .LBB3_16: # %cond.store19
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v8i8(<8 x i8> %v, ptr %base, <8 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>)
+define void @compressstore_v1i16(ptr align 2 %base, <1 x i16> %v, <1 x i1> %mask) {
+; CHECK-LABEL: compressstore_v1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB4_2
+; CHECK-NEXT:  # %bb.1: # %cond.store
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    sb a1, 0(a0)
+; CHECK-NEXT:    srli a1, a1, 8
+; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:  .LBB4_2: # %else
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v1i16(<1 x i16> %v, ptr %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>)
+define void @compressstore_v2i16(ptr align 2 %base, <2 x i16> %v, <2 x i1> %mask) {
+; CHECK-LABEL: compressstore_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB5_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB5_4
+; CHECK-NEXT:  .LBB5_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB5_3: # %cond.store
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.x.s a2, v8
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB5_2
+; CHECK-NEXT:  .LBB5_4: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    sb a1, 0(a0)
+; CHECK-NEXT:    srli a1, a1, 8
+; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v2i16(<2 x i16> %v, ptr %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>)
+define void @compressstore_v4i16(ptr align 2 %base, <4 x i16> %v, <4 x i1> %mask) {
+; CHECK-LABEL: compressstore_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB6_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB6_6
+; CHECK-NEXT:  .LBB6_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB6_7
+; CHECK-NEXT:  .LBB6_3: # %else5
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB6_8
+; CHECK-NEXT:  .LBB6_4: # %else8
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB6_5: # %cond.store
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.x.s a2, v8
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB6_2
+; CHECK-NEXT:  .LBB6_6: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vmv.x.s a2, v9
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB6_3
+; CHECK-NEXT:  .LBB6_7: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vmv.x.s a2, v9
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB6_4
+; CHECK-NEXT:  .LBB6_8: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    sb a1, 0(a0)
+; CHECK-NEXT:    srli a1, a1, 8
+; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v4i16(<4 x i16> %v, ptr %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>)
+define void @compressstore_v8i16(ptr align 2 %base, <8 x i16> %v, <8 x i1> %mask) {
+; CHECK-LABEL: compressstore_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB7_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB7_10
+; CHECK-NEXT:  .LBB7_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB7_11
+; CHECK-NEXT:  .LBB7_3: # %else5
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB7_12
+; CHECK-NEXT:  .LBB7_4: # %else8
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB7_13
+; CHECK-NEXT:  .LBB7_5: # %else11
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB7_14
+; CHECK-NEXT:  .LBB7_6: # %else14
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB7_15
+; CHECK-NEXT:  .LBB7_7: # %else17
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB7_16
+; CHECK-NEXT:  .LBB7_8: # %else20
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB7_9: # %cond.store
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.x.s a2, v8
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB7_2
+; CHECK-NEXT:  .LBB7_10: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vmv.x.s a2, v9
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB7_3
+; CHECK-NEXT:  .LBB7_11: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vmv.x.s a2, v9
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB7_4
+; CHECK-NEXT:  .LBB7_12: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 3
+; CHECK-NEXT:    vmv.x.s a2, v9
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB7_5
+; CHECK-NEXT:  .LBB7_13: # %cond.store10
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 4
+; CHECK-NEXT:    vmv.x.s a2, v9
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB7_6
+; CHECK-NEXT:  .LBB7_14: # %cond.store13
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 5
+; CHECK-NEXT:    vmv.x.s a2, v9
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB7_7
+; CHECK-NEXT:  .LBB7_15: # %cond.store16
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 6
+; CHECK-NEXT:    vmv.x.s a2, v9
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB7_8
+; CHECK-NEXT:  .LBB7_16: # %cond.store19
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    sb a1, 0(a0)
+; CHECK-NEXT:    srli a1, a1, 8
+; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v8i16(<8 x i16> %v, ptr %base, <8 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>)
+define void @compressstore_v1i32(ptr align 4 %base, <1 x i32> %v, <1 x i1> %mask) {
+; CHECK-LABEL: compressstore_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB8_2
+; CHECK-NEXT:  # %bb.1: # %cond.store
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    sb a1, 0(a0)
+; CHECK-NEXT:    srli a2, a1, 24
+; CHECK-NEXT:    sb a2, 3(a0)
+; CHECK-NEXT:    srli a2, a1, 16
+; CHECK-NEXT:    sb a2, 2(a0)
+; CHECK-NEXT:    srli a1, a1, 8
+; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:  .LBB8_2: # %else
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v1i32(<1 x i32> %v, ptr %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>)
+define void @compressstore_v2i32(ptr align 4 %base, <2 x i32> %v, <2 x i1> %mask) {
+; CHECK-LABEL: compressstore_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB9_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB9_4
+; CHECK-NEXT:  .LBB9_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB9_3: # %cond.store
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmv.x.s a2, v8
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a3, a2, 24
+; CHECK-NEXT:    sb a3, 3(a0)
+; CHECK-NEXT:    srli a3, a2, 16
+; CHECK-NEXT:    sb a3, 2(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB9_2
+; CHECK-NEXT:  .LBB9_4: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    sb a1, 0(a0)
+; CHECK-NEXT:    srli a2, a1, 24
+; CHECK-NEXT:    sb a2, 3(a0)
+; CHECK-NEXT:    srli a2, a1, 16
+; CHECK-NEXT:    sb a2, 2(a0)
+; CHECK-NEXT:    srli a1, a1, 8
+; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v2i32(<2 x i32> %v, ptr %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>)
+define void @compressstore_v4i32(ptr align 4 %base, <4 x i32> %v, <4 x i1> %mask) {
+; CHECK-LABEL: compressstore_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB10_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB10_6
+; CHECK-NEXT:  .LBB10_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB10_7
+; CHECK-NEXT:  .LBB10_3: # %else5
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB10_8
+; CHECK-NEXT:  .LBB10_4: # %else8
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB10_5: # %cond.store
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmv.x.s a2, v8
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a3, a2, 24
+; CHECK-NEXT:    sb a3, 3(a0)
+; CHECK-NEXT:    srli a3, a2, 16
+; CHECK-NEXT:    sb a3, 2(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB10_2
+; CHECK-NEXT:  .LBB10_6: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
+; CHECK-NEXT:    vmv.x.s a2, v9
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a3, a2, 24
+; CHECK-NEXT:    sb a3, 3(a0)
+; CHECK-NEXT:    srli a3, a2, 16
+; CHECK-NEXT:    sb a3, 2(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB10_3
+; CHECK-NEXT:  .LBB10_7: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
+; CHECK-NEXT:    vmv.x.s a2, v9
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a3, a2, 24
+; CHECK-NEXT:    sb a3, 3(a0)
+; CHECK-NEXT:    srli a3, a2, 16
+; CHECK-NEXT:    sb a3, 2(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB10_4
+; CHECK-NEXT:  .LBB10_8: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    sb a1, 0(a0)
+; CHECK-NEXT:    srli a2, a1, 24
+; CHECK-NEXT:    sb a2, 3(a0)
+; CHECK-NEXT:    srli a2, a1, 16
+; CHECK-NEXT:    sb a2, 2(a0)
+; CHECK-NEXT:    srli a1, a1, 8
+; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v4i32(<4 x i32> %v, ptr %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>)
+define void @compressstore_v8i32(ptr align 4 %base, <8 x i32> %v, <8 x i1> %mask) {
+; CHECK-LABEL: compressstore_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB11_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB11_10
+; CHECK-NEXT:  .LBB11_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB11_11
+; CHECK-NEXT:  .LBB11_3: # %else5
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB11_12
+; CHECK-NEXT:  .LBB11_4: # %else8
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB11_13
+; CHECK-NEXT:  .LBB11_5: # %else11
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB11_14
+; CHECK-NEXT:  .LBB11_6: # %else14
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB11_15
+; CHECK-NEXT:  .LBB11_7: # %else17
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB11_16
+; CHECK-NEXT:  .LBB11_8: # %else20
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB11_9: # %cond.store
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmv.x.s a2, v8
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a3, a2, 24
+; CHECK-NEXT:    sb a3, 3(a0)
+; CHECK-NEXT:    srli a3, a2, 16
+; CHECK-NEXT:    sb a3, 2(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB11_2
+; CHECK-NEXT:  .LBB11_10: # %cond.store1
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
+; CHECK-NEXT:    vmv.x.s a2, v10
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a3, a2, 24
+; CHECK-NEXT:    sb a3, 3(a0)
+; CHECK-NEXT:    srli a3, a2, 16
+; CHECK-NEXT:    sb a3, 2(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB11_3
+; CHECK-NEXT:  .LBB11_11: # %cond.store4
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 2
+; CHECK-NEXT:    vmv.x.s a2, v10
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a3, a2, 24
+; CHECK-NEXT:    sb a3, 3(a0)
+; CHECK-NEXT:    srli a3, a2, 16
+; CHECK-NEXT:    sb a3, 2(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB11_4
+; CHECK-NEXT:  .LBB11_12: # %cond.store7
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 3
+; CHECK-NEXT:    vmv.x.s a2, v10
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a3, a2, 24
+; CHECK-NEXT:    sb a3, 3(a0)
+; CHECK-NEXT:    srli a3, a2, 16
+; CHECK-NEXT:    sb a3, 2(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB11_5
+; CHECK-NEXT:  .LBB11_13: # %cond.store10
+; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 4
+; CHECK-NEXT:    vmv.x.s a2, v10
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a3, a2, 24
+; CHECK-NEXT:    sb a3, 3(a0)
+; CHECK-NEXT:    srli a3, a2, 16
+; CHECK-NEXT:    sb a3, 2(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB11_6
+; CHECK-NEXT:  .LBB11_14: # %cond.store13
+; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 5
+; CHECK-NEXT:    vmv.x.s a2, v10
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a3, a2, 24
+; CHECK-NEXT:    sb a3, 3(a0)
+; CHECK-NEXT:    srli a3, a2, 16
+; CHECK-NEXT:    sb a3, 2(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB11_7
+; CHECK-NEXT:  .LBB11_15: # %cond.store16
+; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 6
+; CHECK-NEXT:    vmv.x.s a2, v10
+; CHECK-NEXT:    sb a2, 0(a0)
+; CHECK-NEXT:    srli a3, a2, 24
+; CHECK-NEXT:    sb a3, 3(a0)
+; CHECK-NEXT:    srli a3, a2, 16
+; CHECK-NEXT:    sb a3, 2(a0)
+; CHECK-NEXT:    srli a2, a2, 8
+; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB11_8
+; CHECK-NEXT:  .LBB11_16: # %cond.store19
+; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    sb a1, 0(a0)
+; CHECK-NEXT:    srli a2, a1, 24
+; CHECK-NEXT:    sb a2, 3(a0)
+; CHECK-NEXT:    srli a2, a1, 16
+; CHECK-NEXT:    sb a2, 2(a0)
+; CHECK-NEXT:    srli a1, a1, 8
+; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    ret
+  call void @llvm.masked.compressstore.v8i32(<8 x i32> %v, ptr %base, <8 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>)
+define void @compressstore_v1i64(ptr align 8 %base, <1 x i64> %v, <1 x i1> %mask) {
+; RV32-LABEL: compressstore_v1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB12_2
+; RV32-NEXT:  # %bb.1: # %cond.store
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vsrl.vx v9, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v9
+; RV32-NEXT:    vmv.x.s a2, v8
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    sb a1, 4(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 7(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 6(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:  .LBB12_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB12_2
+; RV64-NEXT:  # %bb.1: # %cond.store
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v8
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:  .LBB12_2: # %else
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v1i64(<1 x i64> %v, ptr %base, <1 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>)
+define void @compressstore_v2i64(ptr align 8 %base, <2 x i64> %v, <2 x i1> %mask) {
+; RV32-LABEL: compressstore_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB13_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB13_4
+; RV32-NEXT:  .LBB13_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB13_3: # %cond.store
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vsrl.vx v9, v8, a2
+; RV32-NEXT:    vmv.x.s a2, v9
+; RV32-NEXT:    vmv.x.s a3, v8
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    srli a4, a3, 24
+; RV32-NEXT:    sb a4, 3(a0)
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB13_2
+; RV32-NEXT:  .LBB13_4: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsrl.vx v9, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v9
+; RV32-NEXT:    vmv.x.s a2, v8
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    sb a1, 4(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 7(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 6(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB13_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB13_4
+; RV64-NEXT:  .LBB13_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB13_3: # %cond.store
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vmv.x.s a2, v8
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB13_2
+; RV64-NEXT:  .LBB13_4: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    vmv.x.s a1, v8
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v2i64(<2 x i64> %v, ptr %base, <2 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>)
+define void @compressstore_v4i64(ptr align 8 %base, <4 x i64> %v, <4 x i1> %mask) {
+; RV32-LABEL: compressstore_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB14_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB14_6
+; RV32-NEXT:  .LBB14_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB14_7
+; RV32-NEXT:  .LBB14_3: # %else5
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB14_8
+; RV32-NEXT:  .LBB14_4: # %else8
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB14_5: # %cond.store
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vsrl.vx v10, v8, a2
+; RV32-NEXT:    vmv.x.s a2, v10
+; RV32-NEXT:    vmv.x.s a3, v8
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    srli a4, a3, 24
+; RV32-NEXT:    sb a4, 3(a0)
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB14_2
+; RV32-NEXT:  .LBB14_6: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 1
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v12, v10, a2
+; RV32-NEXT:    vmv.x.s a2, v12
+; RV32-NEXT:    vmv.x.s a3, v10
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    srli a4, a3, 24
+; RV32-NEXT:    sb a4, 3(a0)
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB14_3
+; RV32-NEXT:  .LBB14_7: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v12, v10, a2
+; RV32-NEXT:    vmv.x.s a2, v12
+; RV32-NEXT:    vmv.x.s a3, v10
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    srli a4, a3, 24
+; RV32-NEXT:    sb a4, 3(a0)
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB14_4
+; RV32-NEXT:  .LBB14_8: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsrl.vx v10, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v10
+; RV32-NEXT:    vmv.x.s a2, v8
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    sb a1, 4(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 7(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 6(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB14_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB14_6
+; RV64-NEXT:  .LBB14_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB14_7
+; RV64-NEXT:  .LBB14_3: # %else5
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB14_8
+; RV64-NEXT:  .LBB14_4: # %else8
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB14_5: # %cond.store
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vmv.x.s a2, v8
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB14_2
+; RV64-NEXT:  .LBB14_6: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 1
+; RV64-NEXT:    vmv.x.s a2, v10
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB14_3
+; RV64-NEXT:  .LBB14_7: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v8, 2
+; RV64-NEXT:    vmv.x.s a2, v10
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB14_4
+; RV64-NEXT:  .LBB14_8: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-NEXT:    vmv.x.s a1, v8
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+  call void @llvm.masked.compressstore.v4i64(<4 x i64> %v, ptr %base, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>)
+define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask) {
+; RV32-LABEL: compressstore_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB15_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB15_10
+; RV32-NEXT:  .LBB15_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB15_11
+; RV32-NEXT:  .LBB15_3: # %else5
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB15_12
+; RV32-NEXT:  .LBB15_4: # %else8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB15_13
+; RV32-NEXT:  .LBB15_5: # %else11
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB15_14
+; RV32-NEXT:  .LBB15_6: # %else14
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB15_15
+; RV32-NEXT:  .LBB15_7: # %else17
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB15_16
+; RV32-NEXT:  .LBB15_8: # %else20
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB15_9: # %cond.store
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vsrl.vx v12, v8, a2
+; RV32-NEXT:    vmv.x.s a2, v12
+; RV32-NEXT:    vmv.x.s a3, v8
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    srli a4, a3, 24
+; RV32-NEXT:    sb a4, 3(a0)
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB15_2
+; RV32-NEXT:  .LBB15_10: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 1
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    srli a4, a3, 24
+; RV32-NEXT:    sb a4, 3(a0)
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB15_3
+; RV32-NEXT:  .LBB15_11: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 2
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    srli a4, a3, 24
+; RV32-NEXT:    sb a4, 3(a0)
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB15_4
+; RV32-NEXT:  .LBB15_12: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 3
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    srli a4, a3, 24
+; RV32-NEXT:    sb a4, 3(a0)
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB15_5
+; RV32-NEXT:  .LBB15_13: # %cond.store10
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 4
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    srli a4, a3, 24
+; RV32-NEXT:    sb a4, 3(a0)
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB15_6
+; RV32-NEXT:  .LBB15_14: # %cond.store13
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 5
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    srli a4, a3, 24
+; RV32-NEXT:    sb a4, 3(a0)
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB15_7
+; RV32-NEXT:  .LBB15_15: # %cond.store16
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 6
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsrl.vx v16, v12, a2
+; RV32-NEXT:    vmv.x.s a2, v16
+; RV32-NEXT:    vmv.x.s a3, v12
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    srli a4, a3, 24
+; RV32-NEXT:    sb a4, 3(a0)
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB15_8
+; RV32-NEXT:  .LBB15_16: # %cond.store19
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsrl.vx v12, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v12
+; RV32-NEXT:    vmv.x.s a2, v8
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    sb a1, 4(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 3(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 7(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 6(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: compressstore_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB15_11
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB15_12
+; RV64-NEXT:  .LBB15_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB15_13
+; RV64-NEXT:  .LBB15_3: # %else5
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB15_5
+; RV64-NEXT:  .LBB15_4: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 3
+; RV64-NEXT:    vmv.x.s a2, v12
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:  .LBB15_5: # %else8
+; RV64-NEXT:    addi sp, sp, -320
+; RV64-NEXT:    .cfi_def_cfa_offset 320
+; RV64-NEXT:    sd ra, 312(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 304(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    addi s0, sp, 320
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB15_14
+; RV64-NEXT:  # %bb.6: # %else11
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB15_15
+; RV64-NEXT:  .LBB15_7: # %else14
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB15_16
+; RV64-NEXT:  .LBB15_8: # %else17
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB15_10
+; RV64-NEXT:  .LBB15_9: # %cond.store19
+; RV64-NEXT:    mv a1, sp
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a1)
+; RV64-NEXT:    ld a1, 56(sp)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:  .LBB15_10: # %else20
+; RV64-NEXT:    addi sp, s0, -320
+; RV64-NEXT:    ld ra, 312(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 304(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 320
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB15_11: # %cond.store
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vmv.x.s a2, v8
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB15_2
+; RV64-NEXT:  .LBB15_12: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 1
+; RV64-NEXT:    vmv.x.s a2, v12
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB15_3
+; RV64-NEXT:  .LBB15_13: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v12, v8, 2
+; RV64-NEXT:    vmv.x.s a2, v12
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB15_4
+; RV64-NEXT:    j .LBB15_5
+; RV64-NEXT:  .LBB15_14: # %cond.store10
+; RV64-NEXT:    addi a2, sp, 192
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    ld a2, 224(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB15_7
+; RV64-NEXT:  .LBB15_15: # %cond.store13
+; RV64-NEXT:    addi a2, sp, 128
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    ld a2, 168(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB15_8
+; RV64-NEXT:  .LBB15_16: # %cond.store16
+; RV64-NEXT:    addi a2, sp, 64
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vse64.v v8, (a2)
+; RV64-NEXT:    ld a2, 112(sp)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 7(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 6(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 4(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 3(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 2(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB15_9
+; RV64-NEXT:    j .LBB15_10
+  call void @llvm.masked.compressstore.v8i64(<8 x i64> %v, ptr %base, <8 x i1> %mask)
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
new file mode 100644
index 00000000000000..ddf027f97ec34c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
@@ -0,0 +1,2298 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
+
+declare <1 x half> @llvm.masked.expandload.v1f16(ptr, <1 x i1>, <1 x half>)
+define <1 x half> @expandload_v1f16(ptr align 2 %base, <1 x half> %src0, <1 x i1> %mask) {
+; RV32-LABEL: expandload_v1f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB0_2
+; RV32-NEXT:  # %bb.1: # %cond.load
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a0, 0(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    sh a0, 12(sp)
+; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vle16.v v8, (a0)
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:  .LBB0_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v1f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB0_2
+; RV64-NEXT:  # %bb.1: # %cond.load
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a0, 0(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a0, a1, a0
+; RV64-NEXT:    sh a0, 8(sp)
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    vle16.v v8, (a0)
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:  .LBB0_2: # %else
+; RV64-NEXT:    ret
+  %res = call <1 x half> @llvm.masked.expandload.v1f16(ptr %base, <1 x i1> %mask, <1 x half> %src0)
+  ret <1 x half>%res
+}
+
+declare <2 x half> @llvm.masked.expandload.v2f16(ptr, <2 x i1>, <2 x half>)
+define <2 x half> @expandload_v2f16(ptr align 2 %base, <2 x half> %src0, <2 x i1> %mask) {
+; RV32-LABEL: expandload_v2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB1_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB1_4
+; RV32-NEXT:  .LBB1_2: # %else2
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB1_3: # %cond.load
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    sh a2, 12(sp)
+; RV32-NEXT:    flh fa5, 12(sp)
+; RV32-NEXT:    vsetivli zero, 2, e16, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB1_2
+; RV32-NEXT:  .LBB1_4: # %cond.load1
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a0, 0(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    sh a0, 8(sp)
+; RV32-NEXT:    flh fa5, 8(sp)
+; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v2f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB1_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB1_4
+; RV64-NEXT:  .LBB1_2: # %else2
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB1_3: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    sh a2, 8(sp)
+; RV64-NEXT:    flh fa5, 8(sp)
+; RV64-NEXT:    vsetivli zero, 2, e16, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB1_2
+; RV64-NEXT:  .LBB1_4: # %cond.load1
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a0, 0(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a0, a1, a0
+; RV64-NEXT:    sh a0, 0(sp)
+; RV64-NEXT:    flh fa5, 0(sp)
+; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %res = call <2 x half> @llvm.masked.expandload.v2f16(ptr %base, <2 x i1> %mask, <2 x half> %src0)
+  ret <2 x half>%res
+}
+
+declare <4 x half> @llvm.masked.expandload.v4f16(ptr, <4 x i1>, <4 x half>)
+define <4 x half> @expandload_v4f16(ptr align 2 %base, <4 x half> %src0, <4 x i1> %mask) {
+; RV32-LABEL: expandload_v4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB2_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB2_6
+; RV32-NEXT:  .LBB2_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB2_7
+; RV32-NEXT:  .LBB2_3: # %else6
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB2_8
+; RV32-NEXT:  .LBB2_4: # %else10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB2_5: # %cond.load
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    sh a2, 12(sp)
+; RV32-NEXT:    flh fa5, 12(sp)
+; RV32-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB2_2
+; RV32-NEXT:  .LBB2_6: # %cond.load1
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    sh a2, 8(sp)
+; RV32-NEXT:    flh fa5, 8(sp)
+; RV32-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB2_3
+; RV32-NEXT:  .LBB2_7: # %cond.load5
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    sh a2, 4(sp)
+; RV32-NEXT:    flh fa5, 4(sp)
+; RV32-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 2
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB2_4
+; RV32-NEXT:  .LBB2_8: # %cond.load9
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a0, 0(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    sh a0, 0(sp)
+; RV32-NEXT:    flh fa5, 0(sp)
+; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 3
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -32
+; RV64-NEXT:    .cfi_def_cfa_offset 32
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB2_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB2_6
+; RV64-NEXT:  .LBB2_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB2_7
+; RV64-NEXT:  .LBB2_3: # %else6
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB2_8
+; RV64-NEXT:  .LBB2_4: # %else10
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB2_5: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    sh a2, 24(sp)
+; RV64-NEXT:    flh fa5, 24(sp)
+; RV64-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB2_2
+; RV64-NEXT:  .LBB2_6: # %cond.load1
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    sh a2, 16(sp)
+; RV64-NEXT:    flh fa5, 16(sp)
+; RV64-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB2_3
+; RV64-NEXT:  .LBB2_7: # %cond.load5
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    sh a2, 8(sp)
+; RV64-NEXT:    flh fa5, 8(sp)
+; RV64-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 2
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB2_4
+; RV64-NEXT:  .LBB2_8: # %cond.load9
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a0, 0(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a0, a1, a0
+; RV64-NEXT:    sh a0, 0(sp)
+; RV64-NEXT:    flh fa5, 0(sp)
+; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 3
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ret
+  %res = call <4 x half> @llvm.masked.expandload.v4f16(ptr %base, <4 x i1> %mask, <4 x half> %src0)
+  ret <4 x half>%res
+}
+
+declare <8 x half> @llvm.masked.expandload.v8f16(ptr, <8 x i1>, <8 x half>)
+define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1> %mask) {
+; RV32-LABEL: expandload_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB3_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB3_10
+; RV32-NEXT:  .LBB3_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB3_11
+; RV32-NEXT:  .LBB3_3: # %else6
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB3_12
+; RV32-NEXT:  .LBB3_4: # %else10
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB3_13
+; RV32-NEXT:  .LBB3_5: # %else14
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB3_14
+; RV32-NEXT:  .LBB3_6: # %else18
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB3_15
+; RV32-NEXT:  .LBB3_7: # %else22
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB3_16
+; RV32-NEXT:  .LBB3_8: # %else26
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB3_9: # %cond.load
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    sh a2, 28(sp)
+; RV32-NEXT:    flh fa5, 28(sp)
+; RV32-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB3_2
+; RV32-NEXT:  .LBB3_10: # %cond.load1
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    sh a2, 24(sp)
+; RV32-NEXT:    flh fa5, 24(sp)
+; RV32-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB3_3
+; RV32-NEXT:  .LBB3_11: # %cond.load5
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    sh a2, 20(sp)
+; RV32-NEXT:    flh fa5, 20(sp)
+; RV32-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 2
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB3_4
+; RV32-NEXT:  .LBB3_12: # %cond.load9
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    sh a2, 16(sp)
+; RV32-NEXT:    flh fa5, 16(sp)
+; RV32-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 3
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB3_5
+; RV32-NEXT:  .LBB3_13: # %cond.load13
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    sh a2, 12(sp)
+; RV32-NEXT:    flh fa5, 12(sp)
+; RV32-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 4
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB3_6
+; RV32-NEXT:  .LBB3_14: # %cond.load17
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    sh a2, 8(sp)
+; RV32-NEXT:    flh fa5, 8(sp)
+; RV32-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 5
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB3_7
+; RV32-NEXT:  .LBB3_15: # %cond.load21
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    sh a2, 4(sp)
+; RV32-NEXT:    flh fa5, 4(sp)
+; RV32-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 6
+; RV32-NEXT:    addi a0, a0, 2
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB3_8
+; RV32-NEXT:  .LBB3_16: # %cond.load25
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a0, 0(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    sh a0, 0(sp)
+; RV32-NEXT:    flh fa5, 0(sp)
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 7
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -64
+; RV64-NEXT:    .cfi_def_cfa_offset 64
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB3_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB3_10
+; RV64-NEXT:  .LBB3_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB3_11
+; RV64-NEXT:  .LBB3_3: # %else6
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB3_12
+; RV64-NEXT:  .LBB3_4: # %else10
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB3_13
+; RV64-NEXT:  .LBB3_5: # %else14
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB3_14
+; RV64-NEXT:  .LBB3_6: # %else18
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB3_15
+; RV64-NEXT:  .LBB3_7: # %else22
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB3_16
+; RV64-NEXT:  .LBB3_8: # %else26
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB3_9: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    sh a2, 56(sp)
+; RV64-NEXT:    flh fa5, 56(sp)
+; RV64-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB3_2
+; RV64-NEXT:  .LBB3_10: # %cond.load1
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    sh a2, 48(sp)
+; RV64-NEXT:    flh fa5, 48(sp)
+; RV64-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB3_3
+; RV64-NEXT:  .LBB3_11: # %cond.load5
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    sh a2, 40(sp)
+; RV64-NEXT:    flh fa5, 40(sp)
+; RV64-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 2
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB3_4
+; RV64-NEXT:  .LBB3_12: # %cond.load9
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    sh a2, 32(sp)
+; RV64-NEXT:    flh fa5, 32(sp)
+; RV64-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 3
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB3_5
+; RV64-NEXT:  .LBB3_13: # %cond.load13
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    sh a2, 24(sp)
+; RV64-NEXT:    flh fa5, 24(sp)
+; RV64-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 4
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB3_6
+; RV64-NEXT:  .LBB3_14: # %cond.load17
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    sh a2, 16(sp)
+; RV64-NEXT:    flh fa5, 16(sp)
+; RV64-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 5
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB3_7
+; RV64-NEXT:  .LBB3_15: # %cond.load21
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    sh a2, 8(sp)
+; RV64-NEXT:    flh fa5, 8(sp)
+; RV64-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 6
+; RV64-NEXT:    addi a0, a0, 2
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB3_8
+; RV64-NEXT:  .LBB3_16: # %cond.load25
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a0, 0(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a0, a1, a0
+; RV64-NEXT:    sh a0, 0(sp)
+; RV64-NEXT:    flh fa5, 0(sp)
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 7
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+  %res = call <8 x half> @llvm.masked.expandload.v8f16(ptr %base, <8 x i1> %mask, <8 x half> %src0)
+  ret <8 x half>%res
+}
+
+declare <1 x float> @llvm.masked.expandload.v1f32(ptr, <1 x i1>, <1 x float>)
+define <1 x float> @expandload_v1f32(ptr align 4 %base, <1 x float> %src0, <1 x i1> %mask) {
+; RV32-LABEL: expandload_v1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB4_2
+; RV32-NEXT:  # %bb.1: # %cond.load
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:  .LBB4_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v1f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB4_2
+; RV64-NEXT:  # %bb.1: # %cond.load
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a0, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a3
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    sw a0, 8(sp)
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    vle32.v v8, (a0)
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:  .LBB4_2: # %else
+; RV64-NEXT:    ret
+  %res = call <1 x float> @llvm.masked.expandload.v1f32(ptr %base, <1 x i1> %mask, <1 x float> %src0)
+  ret <1 x float>%res
+}
+
+declare <2 x float> @llvm.masked.expandload.v2f32(ptr, <2 x i1>, <2 x float>)
+define <2 x float> @expandload_v2f32(ptr align 4 %base, <2 x float> %src0, <2 x i1> %mask) {
+; RV32-LABEL: expandload_v2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB5_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB5_4
+; RV32-NEXT:  .LBB5_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB5_3: # %cond.load
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB5_2
+; RV32-NEXT:  .LBB5_4: # %cond.load1
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB5_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB5_4
+; RV64-NEXT:  .LBB5_2: # %else2
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB5_3: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    sw a2, 8(sp)
+; RV64-NEXT:    flw fa5, 8(sp)
+; RV64-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB5_2
+; RV64-NEXT:  .LBB5_4: # %cond.load1
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a0, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a3
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    sw a0, 0(sp)
+; RV64-NEXT:    flw fa5, 0(sp)
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %res = call <2 x float> @llvm.masked.expandload.v2f32(ptr %base, <2 x i1> %mask, <2 x float> %src0)
+  ret <2 x float>%res
+}
+
+declare <4 x float> @llvm.masked.expandload.v4f32(ptr, <4 x i1>, <4 x float>)
+define <4 x float> @expandload_v4f32(ptr align 4 %base, <4 x float> %src0, <4 x i1> %mask) {
+; RV32-LABEL: expandload_v4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB6_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB6_6
+; RV32-NEXT:  .LBB6_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB6_7
+; RV32-NEXT:  .LBB6_3: # %else6
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB6_8
+; RV32-NEXT:  .LBB6_4: # %else10
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB6_5: # %cond.load
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB6_2
+; RV32-NEXT:  .LBB6_6: # %cond.load1
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB6_3
+; RV32-NEXT:  .LBB6_7: # %cond.load5
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 2
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB6_4
+; RV32-NEXT:  .LBB6_8: # %cond.load9
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 3
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -32
+; RV64-NEXT:    .cfi_def_cfa_offset 32
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB6_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB6_6
+; RV64-NEXT:  .LBB6_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB6_7
+; RV64-NEXT:  .LBB6_3: # %else6
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB6_8
+; RV64-NEXT:  .LBB6_4: # %else10
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB6_5: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    sw a2, 24(sp)
+; RV64-NEXT:    flw fa5, 24(sp)
+; RV64-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB6_2
+; RV64-NEXT:  .LBB6_6: # %cond.load1
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    sw a2, 16(sp)
+; RV64-NEXT:    flw fa5, 16(sp)
+; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB6_3
+; RV64-NEXT:  .LBB6_7: # %cond.load5
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    sw a2, 8(sp)
+; RV64-NEXT:    flw fa5, 8(sp)
+; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 2
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB6_4
+; RV64-NEXT:  .LBB6_8: # %cond.load9
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a0, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a3
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    sw a0, 0(sp)
+; RV64-NEXT:    flw fa5, 0(sp)
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 3
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ret
+  %res = call <4 x float> @llvm.masked.expandload.v4f32(ptr %base, <4 x i1> %mask, <4 x float> %src0)
+  ret <4 x float>%res
+}
+
+declare <8 x float> @llvm.masked.expandload.v8f32(ptr, <8 x i1>, <8 x float>)
+define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x i1> %mask) {
+; RV32-LABEL: expandload_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB7_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB7_10
+; RV32-NEXT:  .LBB7_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB7_11
+; RV32-NEXT:  .LBB7_3: # %else6
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB7_12
+; RV32-NEXT:  .LBB7_4: # %else10
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB7_13
+; RV32-NEXT:  .LBB7_5: # %else14
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB7_14
+; RV32-NEXT:  .LBB7_6: # %else18
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB7_15
+; RV32-NEXT:  .LBB7_7: # %else22
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB7_16
+; RV32-NEXT:  .LBB7_8: # %else26
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB7_9: # %cond.load
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB7_2
+; RV32-NEXT:  .LBB7_10: # %cond.load1
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 1
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB7_3
+; RV32-NEXT:  .LBB7_11: # %cond.load5
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 2
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB7_4
+; RV32-NEXT:  .LBB7_12: # %cond.load9
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 3
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB7_5
+; RV32-NEXT:  .LBB7_13: # %cond.load13
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 4
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB7_6
+; RV32-NEXT:  .LBB7_14: # %cond.load17
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 5
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB7_7
+; RV32-NEXT:  .LBB7_15: # %cond.load21
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 6
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB7_8
+; RV32-NEXT:  .LBB7_16: # %cond.load25
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 7
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -64
+; RV64-NEXT:    .cfi_def_cfa_offset 64
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB7_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB7_10
+; RV64-NEXT:  .LBB7_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB7_11
+; RV64-NEXT:  .LBB7_3: # %else6
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB7_12
+; RV64-NEXT:  .LBB7_4: # %else10
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB7_13
+; RV64-NEXT:  .LBB7_5: # %else14
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB7_14
+; RV64-NEXT:  .LBB7_6: # %else18
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB7_15
+; RV64-NEXT:  .LBB7_7: # %else22
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB7_16
+; RV64-NEXT:  .LBB7_8: # %else26
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB7_9: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    sw a2, 56(sp)
+; RV64-NEXT:    flw fa5, 56(sp)
+; RV64-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB7_2
+; RV64-NEXT:  .LBB7_10: # %cond.load1
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    sw a2, 48(sp)
+; RV64-NEXT:    flw fa5, 48(sp)
+; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 1
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB7_3
+; RV64-NEXT:  .LBB7_11: # %cond.load5
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    sw a2, 40(sp)
+; RV64-NEXT:    flw fa5, 40(sp)
+; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 2
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB7_4
+; RV64-NEXT:  .LBB7_12: # %cond.load9
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    sw a2, 32(sp)
+; RV64-NEXT:    flw fa5, 32(sp)
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 3
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB7_5
+; RV64-NEXT:  .LBB7_13: # %cond.load13
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    sw a2, 24(sp)
+; RV64-NEXT:    flw fa5, 24(sp)
+; RV64-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 4
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB7_6
+; RV64-NEXT:  .LBB7_14: # %cond.load17
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    sw a2, 16(sp)
+; RV64-NEXT:    flw fa5, 16(sp)
+; RV64-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 5
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB7_7
+; RV64-NEXT:  .LBB7_15: # %cond.load21
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    sw a2, 8(sp)
+; RV64-NEXT:    flw fa5, 8(sp)
+; RV64-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 6
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB7_8
+; RV64-NEXT:  .LBB7_16: # %cond.load25
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a0, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a3
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    sw a0, 0(sp)
+; RV64-NEXT:    flw fa5, 0(sp)
+; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 7
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+  %res = call <8 x float> @llvm.masked.expandload.v8f32(ptr %base, <8 x i1> %mask, <8 x float> %src0)
+  ret <8 x float>%res
+}
+
+declare <1 x double> @llvm.masked.expandload.v1f64(ptr, <1 x i1>, <1 x double>)
+define <1 x double> @expandload_v1f64(ptr align 8 %base, <1 x double> %src0, <1 x i1> %mask) {
+; RV32-LABEL: expandload_v1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB8_2
+; RV32-NEXT:  # %bb.1: # %cond.load
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    lbu a1, 5(a0)
+; RV32-NEXT:    lbu a2, 4(a0)
+; RV32-NEXT:    lbu a3, 6(a0)
+; RV32-NEXT:    lbu a4, 7(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a4, a4, 24
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    or a1, a3, a1
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:  .LBB8_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v1f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB8_2
+; RV64-NEXT:  # %bb.1: # %cond.load
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a4, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a4, a4, 24
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    or a1, a3, a1
+; RV64-NEXT:    lbu a2, 5(a0)
+; RV64-NEXT:    lbu a3, 4(a0)
+; RV64-NEXT:    lbu a4, 6(a0)
+; RV64-NEXT:    lbu a0, 7(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a4
+; RV64-NEXT:    or a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:  .LBB8_2: # %else
+; RV64-NEXT:    ret
+  %res = call <1 x double> @llvm.masked.expandload.v1f64(ptr %base, <1 x i1> %mask, <1 x double> %src0)
+  ret <1 x double>%res
+}
+
+declare <2 x double> @llvm.masked.expandload.v2f64(ptr, <2 x i1>, <2 x double>)
+define <2 x double> @expandload_v2f64(ptr align 8 %base, <2 x double> %src0, <2 x i1> %mask) {
+; RV32-LABEL: expandload_v2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB9_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB9_4
+; RV32-NEXT:  .LBB9_2: # %else2
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB9_3: # %cond.load
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetivli zero, 2, e64, m8, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB9_2
+; RV32-NEXT:  .LBB9_4: # %cond.load1
+; RV32-NEXT:    lbu a1, 5(a0)
+; RV32-NEXT:    lbu a2, 4(a0)
+; RV32-NEXT:    lbu a3, 6(a0)
+; RV32-NEXT:    lbu a4, 7(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a4, a4, 24
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    or a1, a3, a1
+; RV32-NEXT:    sw a1, 4(sp)
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    sw a0, 0(sp)
+; RV32-NEXT:    fld fa5, 0(sp)
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.s.f v9, fa5
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB9_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB9_4
+; RV64-NEXT:  .LBB9_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB9_3: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    vsetivli zero, 2, e64, m8, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB9_2
+; RV64-NEXT:  .LBB9_4: # %cond.load1
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a4, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a4, a4, 24
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    or a1, a3, a1
+; RV64-NEXT:    lbu a2, 5(a0)
+; RV64-NEXT:    lbu a3, 4(a0)
+; RV64-NEXT:    lbu a4, 6(a0)
+; RV64-NEXT:    lbu a0, 7(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a4
+; RV64-NEXT:    or a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vfmv.s.f v9, fa5
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    ret
+  %res = call <2 x double> @llvm.masked.expandload.v2f64(ptr %base, <2 x i1> %mask, <2 x double> %src0)
+  ret <2 x double>%res
+}
+
+declare <4 x double> @llvm.masked.expandload.v4f64(ptr, <4 x i1>, <4 x double>)
+define <4 x double> @expandload_v4f64(ptr align 8 %base, <4 x double> %src0, <4 x i1> %mask) {
+; RV32-LABEL: expandload_v4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB10_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB10_6
+; RV32-NEXT:  .LBB10_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB10_7
+; RV32-NEXT:  .LBB10_3: # %else6
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB10_8
+; RV32-NEXT:  .LBB10_4: # %else10
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB10_5: # %cond.load
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 28(sp)
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 24(sp)
+; RV32-NEXT:    fld fa5, 24(sp)
+; RV32-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB10_2
+; RV32-NEXT:  .LBB10_6: # %cond.load1
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 20(sp)
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 16(sp)
+; RV32-NEXT:    fld fa5, 16(sp)
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 1
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB10_3
+; RV32-NEXT:  .LBB10_7: # %cond.load5
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 2
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB10_4
+; RV32-NEXT:  .LBB10_8: # %cond.load9
+; RV32-NEXT:    lbu a1, 5(a0)
+; RV32-NEXT:    lbu a2, 4(a0)
+; RV32-NEXT:    lbu a3, 6(a0)
+; RV32-NEXT:    lbu a4, 7(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a4, a4, 24
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    or a1, a3, a1
+; RV32-NEXT:    sw a1, 4(sp)
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    sw a0, 0(sp)
+; RV32-NEXT:    fld fa5, 0(sp)
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT:    vfmv.s.f v10, fa5
+; RV32-NEXT:    vslideup.vi v8, v10, 3
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB10_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB10_6
+; RV64-NEXT:  .LBB10_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB10_7
+; RV64-NEXT:  .LBB10_3: # %else6
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB10_8
+; RV64-NEXT:  .LBB10_4: # %else10
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB10_5: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB10_2
+; RV64-NEXT:  .LBB10_6: # %cond.load1
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 1
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB10_3
+; RV64-NEXT:  .LBB10_7: # %cond.load5
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB10_4
+; RV64-NEXT:  .LBB10_8: # %cond.load9
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a4, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a4, a4, 24
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    or a1, a3, a1
+; RV64-NEXT:    lbu a2, 5(a0)
+; RV64-NEXT:    lbu a3, 4(a0)
+; RV64-NEXT:    lbu a4, 6(a0)
+; RV64-NEXT:    lbu a0, 7(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a4
+; RV64-NEXT:    or a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vfmv.s.f v10, fa5
+; RV64-NEXT:    vslideup.vi v8, v10, 3
+; RV64-NEXT:    ret
+  %res = call <4 x double> @llvm.masked.expandload.v4f64(ptr %base, <4 x i1> %mask, <4 x double> %src0)
+  ret <4 x double>%res
+}
+
+declare <8 x double> @llvm.masked.expandload.v8f64(ptr, <8 x i1>, <8 x double>)
+define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8 x i1> %mask) {
+; RV32-LABEL: expandload_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -64
+; RV32-NEXT:    .cfi_def_cfa_offset 64
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB11_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB11_10
+; RV32-NEXT:  .LBB11_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB11_11
+; RV32-NEXT:  .LBB11_3: # %else6
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB11_12
+; RV32-NEXT:  .LBB11_4: # %else10
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB11_13
+; RV32-NEXT:  .LBB11_5: # %else14
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB11_14
+; RV32-NEXT:  .LBB11_6: # %else18
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB11_15
+; RV32-NEXT:  .LBB11_7: # %else22
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB11_16
+; RV32-NEXT:  .LBB11_8: # %else26
+; RV32-NEXT:    addi sp, sp, 64
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB11_9: # %cond.load
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 60(sp)
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 56(sp)
+; RV32-NEXT:    fld fa5, 56(sp)
+; RV32-NEXT:    vsetivli zero, 8, e64, m8, tu, ma
+; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB11_2
+; RV32-NEXT:  .LBB11_10: # %cond.load1
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 52(sp)
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 48(sp)
+; RV32-NEXT:    fld fa5, 48(sp)
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 1
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB11_3
+; RV32-NEXT:  .LBB11_11: # %cond.load5
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 44(sp)
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 40(sp)
+; RV32-NEXT:    fld fa5, 40(sp)
+; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 2
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB11_4
+; RV32-NEXT:  .LBB11_12: # %cond.load9
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 36(sp)
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 32(sp)
+; RV32-NEXT:    fld fa5, 32(sp)
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 3
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB11_5
+; RV32-NEXT:  .LBB11_13: # %cond.load13
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 28(sp)
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 24(sp)
+; RV32-NEXT:    fld fa5, 24(sp)
+; RV32-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 4
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB11_6
+; RV32-NEXT:  .LBB11_14: # %cond.load17
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 20(sp)
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 16(sp)
+; RV32-NEXT:    fld fa5, 16(sp)
+; RV32-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 5
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB11_7
+; RV32-NEXT:  .LBB11_15: # %cond.load21
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 12(sp)
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    sw a2, 8(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 6
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB11_8
+; RV32-NEXT:  .LBB11_16: # %cond.load25
+; RV32-NEXT:    lbu a1, 5(a0)
+; RV32-NEXT:    lbu a2, 4(a0)
+; RV32-NEXT:    lbu a3, 6(a0)
+; RV32-NEXT:    lbu a4, 7(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a4, a4, 24
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    or a1, a3, a1
+; RV32-NEXT:    sw a1, 4(sp)
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    sw a0, 0(sp)
+; RV32-NEXT:    fld fa5, 0(sp)
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vfmv.s.f v12, fa5
+; RV32-NEXT:    vslideup.vi v8, v12, 7
+; RV32-NEXT:    addi sp, sp, 64
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB11_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB11_10
+; RV64-NEXT:  .LBB11_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB11_11
+; RV64-NEXT:  .LBB11_3: # %else6
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB11_12
+; RV64-NEXT:  .LBB11_4: # %else10
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB11_13
+; RV64-NEXT:  .LBB11_5: # %else14
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB11_14
+; RV64-NEXT:  .LBB11_6: # %else18
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB11_15
+; RV64-NEXT:  .LBB11_7: # %else22
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB11_16
+; RV64-NEXT:  .LBB11_8: # %else26
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB11_9: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    vsetivli zero, 8, e64, m8, tu, ma
+; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB11_2
+; RV64-NEXT:  .LBB11_10: # %cond.load1
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 1
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB11_3
+; RV64-NEXT:  .LBB11_11: # %cond.load5
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB11_4
+; RV64-NEXT:  .LBB11_12: # %cond.load9
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 3
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB11_5
+; RV64-NEXT:  .LBB11_13: # %cond.load13
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 4
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB11_6
+; RV64-NEXT:  .LBB11_14: # %cond.load17
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 5
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB11_7
+; RV64-NEXT:  .LBB11_15: # %cond.load21
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 6
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB11_8
+; RV64-NEXT:  .LBB11_16: # %cond.load25
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a4, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a4, a4, 24
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    or a1, a3, a1
+; RV64-NEXT:    lbu a2, 5(a0)
+; RV64-NEXT:    lbu a3, 4(a0)
+; RV64-NEXT:    lbu a4, 6(a0)
+; RV64-NEXT:    lbu a0, 7(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a4
+; RV64-NEXT:    or a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vfmv.s.f v12, fa5
+; RV64-NEXT:    vslideup.vi v8, v12, 7
+; RV64-NEXT:    ret
+  %res = call <8 x double> @llvm.masked.expandload.v8f64(ptr %base, <8 x i1> %mask, <8 x double> %src0)
+  ret <8 x double>%res
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
new file mode 100644
index 00000000000000..42d099e0a3dc8c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
@@ -0,0 +1,2078 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <1 x i8> @llvm.masked.expandload.v1i8(ptr, <1 x i1>, <1 x i8>)
+define <1 x i8> @expandload_v1i8(ptr %base, <1 x i8> %src0, <1 x i1> %mask) {
+; CHECK-LABEL: expandload_v1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %cond.load
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:  .LBB0_2: # %else
+; CHECK-NEXT:    ret
+  %res = call <1 x i8> @llvm.masked.expandload.v1i8(ptr %base, <1 x i1> %mask, <1 x i8> %src0)
+  ret <1 x i8>%res
+}
+
+declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>)
+define <2 x i8> @expandload_v2i8(ptr %base, <2 x i8> %src0, <2 x i1> %mask) {
+; CHECK-LABEL: expandload_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB1_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB1_4
+; CHECK-NEXT:  .LBB1_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_3: # %cond.load
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e8, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB1_2
+; CHECK-NEXT:  .LBB1_4: # %cond.load1
+; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    ret
+  %res = call <2 x i8> @llvm.masked.expandload.v2i8(ptr %base, <2 x i1> %mask, <2 x i8> %src0)
+  ret <2 x i8>%res
+}
+
+declare <4 x i8> @llvm.masked.expandload.v4i8(ptr, <4 x i1>, <4 x i8>)
+define <4 x i8> @expandload_v4i8(ptr %base, <4 x i8> %src0, <4 x i1> %mask) {
+; CHECK-LABEL: expandload_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB2_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB2_6
+; CHECK-NEXT:  .LBB2_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB2_7
+; CHECK-NEXT:  .LBB2_3: # %else6
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB2_8
+; CHECK-NEXT:  .LBB2_4: # %else10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_5: # %cond.load
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB2_2
+; CHECK-NEXT:  .LBB2_6: # %cond.load1
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB2_3
+; CHECK-NEXT:  .LBB2_7: # %cond.load5
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB2_4
+; CHECK-NEXT:  .LBB2_8: # %cond.load9
+; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    ret
+  %res = call <4 x i8> @llvm.masked.expandload.v4i8(ptr %base, <4 x i1> %mask, <4 x i8> %src0)
+  ret <4 x i8>%res
+}
+
+declare <8 x i8> @llvm.masked.expandload.v8i8(ptr, <8 x i1>, <8 x i8>)
+define <8 x i8> @expandload_v8i8(ptr %base, <8 x i8> %src0, <8 x i1> %mask) {
+; CHECK-LABEL: expandload_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB3_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB3_10
+; CHECK-NEXT:  .LBB3_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB3_11
+; CHECK-NEXT:  .LBB3_3: # %else6
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB3_12
+; CHECK-NEXT:  .LBB3_4: # %else10
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB3_13
+; CHECK-NEXT:  .LBB3_5: # %else14
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB3_14
+; CHECK-NEXT:  .LBB3_6: # %else18
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB3_15
+; CHECK-NEXT:  .LBB3_7: # %else22
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB3_16
+; CHECK-NEXT:  .LBB3_8: # %else26
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_9: # %cond.load
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB3_2
+; CHECK-NEXT:  .LBB3_10: # %cond.load1
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB3_3
+; CHECK-NEXT:  .LBB3_11: # %cond.load5
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB3_4
+; CHECK-NEXT:  .LBB3_12: # %cond.load9
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB3_5
+; CHECK-NEXT:  .LBB3_13: # %cond.load13
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 5, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 4
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB3_6
+; CHECK-NEXT:  .LBB3_14: # %cond.load17
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 6, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 5
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB3_7
+; CHECK-NEXT:  .LBB3_15: # %cond.load21
+; CHECK-NEXT:    lbu a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 7, e8, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 6
+; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB3_8
+; CHECK-NEXT:  .LBB3_16: # %cond.load25
+; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 7
+; CHECK-NEXT:    ret
+  %res = call <8 x i8> @llvm.masked.expandload.v8i8(ptr %base, <8 x i1> %mask, <8 x i8> %src0)
+  ret <8 x i8>%res
+}
+
+declare <1 x i16> @llvm.masked.expandload.v1i16(ptr, <1 x i1>, <1 x i16>)
+define <1 x i16> @expandload_v1i16(ptr align 2 %base, <1 x i16> %src0, <1 x i1> %mask) {
+; CHECK-LABEL: expandload_v1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB4_2
+; CHECK-NEXT:  # %bb.1: # %cond.load
+; CHECK-NEXT:    lbu a1, 1(a0)
+; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    slli a1, a1, 8
+; CHECK-NEXT:    or a0, a1, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.s.x v8, a0
+; CHECK-NEXT:  .LBB4_2: # %else
+; CHECK-NEXT:    ret
+  %res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr %base, <1 x i1> %mask, <1 x i16> %src0)
+  ret <1 x i16>%res
+}
+
+declare <2 x i16> @llvm.masked.expandload.v2i16(ptr, <2 x i1>, <2 x i16>)
+define <2 x i16> @expandload_v2i16(ptr align 2 %base, <2 x i16> %src0, <2 x i1> %mask) {
+; CHECK-LABEL: expandload_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB5_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB5_4
+; CHECK-NEXT:  .LBB5_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB5_3: # %cond.load
+; CHECK-NEXT:    lbu a2, 1(a0)
+; CHECK-NEXT:    lbu a3, 0(a0)
+; CHECK-NEXT:    slli a2, a2, 8
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 2, e16, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB5_2
+; CHECK-NEXT:  .LBB5_4: # %cond.load1
+; CHECK-NEXT:    lbu a1, 1(a0)
+; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    slli a1, a1, 8
+; CHECK-NEXT:    or a0, a1, a0
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    ret
+  %res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr %base, <2 x i1> %mask, <2 x i16> %src0)
+  ret <2 x i16>%res
+}
+
+declare <4 x i16> @llvm.masked.expandload.v4i16(ptr, <4 x i1>, <4 x i16>)
+define <4 x i16> @expandload_v4i16(ptr align 2 %base, <4 x i16> %src0, <4 x i1> %mask) {
+; CHECK-LABEL: expandload_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB6_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB6_6
+; CHECK-NEXT:  .LBB6_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB6_7
+; CHECK-NEXT:  .LBB6_3: # %else6
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB6_8
+; CHECK-NEXT:  .LBB6_4: # %else10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB6_5: # %cond.load
+; CHECK-NEXT:    lbu a2, 1(a0)
+; CHECK-NEXT:    lbu a3, 0(a0)
+; CHECK-NEXT:    slli a2, a2, 8
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB6_2
+; CHECK-NEXT:  .LBB6_6: # %cond.load1
+; CHECK-NEXT:    lbu a2, 1(a0)
+; CHECK-NEXT:    lbu a3, 0(a0)
+; CHECK-NEXT:    slli a2, a2, 8
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB6_3
+; CHECK-NEXT:  .LBB6_7: # %cond.load5
+; CHECK-NEXT:    lbu a2, 1(a0)
+; CHECK-NEXT:    lbu a3, 0(a0)
+; CHECK-NEXT:    slli a2, a2, 8
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB6_4
+; CHECK-NEXT:  .LBB6_8: # %cond.load9
+; CHECK-NEXT:    lbu a1, 1(a0)
+; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    slli a1, a1, 8
+; CHECK-NEXT:    or a0, a1, a0
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    ret
+  %res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr %base, <4 x i1> %mask, <4 x i16> %src0)
+  ret <4 x i16>%res
+}
+
+declare <8 x i16> @llvm.masked.expandload.v8i16(ptr, <8 x i1>, <8 x i16>)
+define <8 x i16> @expandload_v8i16(ptr align 2 %base, <8 x i16> %src0, <8 x i1> %mask) {
+; CHECK-LABEL: expandload_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB7_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB7_10
+; CHECK-NEXT:  .LBB7_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB7_11
+; CHECK-NEXT:  .LBB7_3: # %else6
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB7_12
+; CHECK-NEXT:  .LBB7_4: # %else10
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB7_13
+; CHECK-NEXT:  .LBB7_5: # %else14
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB7_14
+; CHECK-NEXT:  .LBB7_6: # %else18
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB7_15
+; CHECK-NEXT:  .LBB7_7: # %else22
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB7_16
+; CHECK-NEXT:  .LBB7_8: # %else26
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB7_9: # %cond.load
+; CHECK-NEXT:    lbu a2, 1(a0)
+; CHECK-NEXT:    lbu a3, 0(a0)
+; CHECK-NEXT:    slli a2, a2, 8
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB7_2
+; CHECK-NEXT:  .LBB7_10: # %cond.load1
+; CHECK-NEXT:    lbu a2, 1(a0)
+; CHECK-NEXT:    lbu a3, 0(a0)
+; CHECK-NEXT:    slli a2, a2, 8
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB7_3
+; CHECK-NEXT:  .LBB7_11: # %cond.load5
+; CHECK-NEXT:    lbu a2, 1(a0)
+; CHECK-NEXT:    lbu a3, 0(a0)
+; CHECK-NEXT:    slli a2, a2, 8
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB7_4
+; CHECK-NEXT:  .LBB7_12: # %cond.load9
+; CHECK-NEXT:    lbu a2, 1(a0)
+; CHECK-NEXT:    lbu a3, 0(a0)
+; CHECK-NEXT:    slli a2, a2, 8
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB7_5
+; CHECK-NEXT:  .LBB7_13: # %cond.load13
+; CHECK-NEXT:    lbu a2, 1(a0)
+; CHECK-NEXT:    lbu a3, 0(a0)
+; CHECK-NEXT:    slli a2, a2, 8
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 4
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB7_6
+; CHECK-NEXT:  .LBB7_14: # %cond.load17
+; CHECK-NEXT:    lbu a2, 1(a0)
+; CHECK-NEXT:    lbu a3, 0(a0)
+; CHECK-NEXT:    slli a2, a2, 8
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 5
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB7_7
+; CHECK-NEXT:  .LBB7_15: # %cond.load21
+; CHECK-NEXT:    lbu a2, 1(a0)
+; CHECK-NEXT:    lbu a3, 0(a0)
+; CHECK-NEXT:    slli a2, a2, 8
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 6
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB7_8
+; CHECK-NEXT:  .LBB7_16: # %cond.load25
+; CHECK-NEXT:    lbu a1, 1(a0)
+; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    slli a1, a1, 8
+; CHECK-NEXT:    or a0, a1, a0
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 7
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr %base, <8 x i1> %mask, <8 x i16> %src0)
+  ret <8 x i16>%res
+}
+
+declare <1 x i32> @llvm.masked.expandload.v1i32(ptr, <1 x i1>, <1 x i32>)
+define <1 x i32> @expandload_v1i32(ptr align 4 %base, <1 x i32> %src0, <1 x i1> %mask) {
+; RV32-LABEL: expandload_v1i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB8_2
+; RV32-NEXT:  # %bb.1: # %cond.load
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vmv.s.x v8, a0
+; RV32-NEXT:  .LBB8_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v1i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB8_2
+; RV64-NEXT:  # %bb.1: # %cond.load
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lb a0, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a3
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT:    vmv.s.x v8, a0
+; RV64-NEXT:  .LBB8_2: # %else
+; RV64-NEXT:    ret
+  %res = call <1 x i32> @llvm.masked.expandload.v1i32(ptr %base, <1 x i1> %mask, <1 x i32> %src0)
+  ret <1 x i32>%res
+}
+
+declare <2 x i32> @llvm.masked.expandload.v2i32(ptr, <2 x i1>, <2 x i32>)
+define <2 x i32> @expandload_v2i32(ptr align 4 %base, <2 x i32> %src0, <2 x i1> %mask) {
+; RV32-LABEL: expandload_v2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB9_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB9_4
+; RV32-NEXT:  .LBB9_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB9_3: # %cond.load
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; RV32-NEXT:    vmv.s.x v8, a2
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB9_2
+; RV32-NEXT:  .LBB9_4: # %cond.load1
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vmv.s.x v9, a0
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB9_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB9_4
+; RV64-NEXT:  .LBB9_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB9_3: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lb a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; RV64-NEXT:    vmv.s.x v8, a2
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB9_2
+; RV64-NEXT:  .LBB9_4: # %cond.load1
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lb a0, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a3
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-NEXT:    vmv.s.x v9, a0
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    ret
+  %res = call <2 x i32> @llvm.masked.expandload.v2i32(ptr %base, <2 x i1> %mask, <2 x i32> %src0)
+  ret <2 x i32>%res
+}
+
+declare <4 x i32> @llvm.masked.expandload.v4i32(ptr, <4 x i1>, <4 x i32>)
+define <4 x i32> @expandload_v4i32(ptr align 4 %base, <4 x i32> %src0, <4 x i1> %mask) {
+; RV32-LABEL: expandload_v4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB10_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB10_6
+; RV32-NEXT:  .LBB10_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB10_7
+; RV32-NEXT:  .LBB10_3: # %else6
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB10_8
+; RV32-NEXT:  .LBB10_4: # %else10
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB10_5: # %cond.load
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; RV32-NEXT:    vmv.s.x v8, a2
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB10_2
+; RV32-NEXT:  .LBB10_6: # %cond.load1
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vmv.s.x v9, a2
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB10_3
+; RV32-NEXT:  .LBB10_7: # %cond.load5
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV32-NEXT:    vmv.s.x v9, a2
+; RV32-NEXT:    vslideup.vi v8, v9, 2
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB10_4
+; RV32-NEXT:  .LBB10_8: # %cond.load9
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.s.x v9, a0
+; RV32-NEXT:    vslideup.vi v8, v9, 3
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB10_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB10_6
+; RV64-NEXT:  .LBB10_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB10_7
+; RV64-NEXT:  .LBB10_3: # %else6
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB10_8
+; RV64-NEXT:  .LBB10_4: # %else10
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB10_5: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lb a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; RV64-NEXT:    vmv.s.x v8, a2
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB10_2
+; RV64-NEXT:  .LBB10_6: # %cond.load1
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lb a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64-NEXT:    vmv.s.x v9, a2
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB10_3
+; RV64-NEXT:  .LBB10_7: # %cond.load5
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lb a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64-NEXT:    vmv.s.x v9, a2
+; RV64-NEXT:    vslideup.vi v8, v9, 2
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB10_4
+; RV64-NEXT:  .LBB10_8: # %cond.load9
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lb a0, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a3
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vmv.s.x v9, a0
+; RV64-NEXT:    vslideup.vi v8, v9, 3
+; RV64-NEXT:    ret
+  %res = call <4 x i32> @llvm.masked.expandload.v4i32(ptr %base, <4 x i1> %mask, <4 x i32> %src0)
+  ret <4 x i32>%res
+}
+
+declare <8 x i32> @llvm.masked.expandload.v8i32(ptr, <8 x i1>, <8 x i32>)
+define <8 x i32> @expandload_v8i32(ptr align 4 %base, <8 x i32> %src0, <8 x i1> %mask) {
+; RV32-LABEL: expandload_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB11_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB11_10
+; RV32-NEXT:  .LBB11_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB11_11
+; RV32-NEXT:  .LBB11_3: # %else6
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB11_12
+; RV32-NEXT:  .LBB11_4: # %else10
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB11_13
+; RV32-NEXT:  .LBB11_5: # %else14
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB11_14
+; RV32-NEXT:  .LBB11_6: # %else18
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB11_15
+; RV32-NEXT:  .LBB11_7: # %else22
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB11_16
+; RV32-NEXT:  .LBB11_8: # %else26
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB11_9: # %cond.load
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
+; RV32-NEXT:    vmv.s.x v8, a2
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB11_2
+; RV32-NEXT:  .LBB11_10: # %cond.load1
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vmv.s.x v10, a2
+; RV32-NEXT:    vslideup.vi v8, v10, 1
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB11_3
+; RV32-NEXT:  .LBB11_11: # %cond.load5
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV32-NEXT:    vmv.s.x v10, a2
+; RV32-NEXT:    vslideup.vi v8, v10, 2
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB11_4
+; RV32-NEXT:  .LBB11_12: # %cond.load9
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV32-NEXT:    vmv.s.x v10, a2
+; RV32-NEXT:    vslideup.vi v8, v10, 3
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB11_5
+; RV32-NEXT:  .LBB11_13: # %cond.load13
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
+; RV32-NEXT:    vmv.s.x v10, a2
+; RV32-NEXT:    vslideup.vi v8, v10, 4
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB11_6
+; RV32-NEXT:  .LBB11_14: # %cond.load17
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
+; RV32-NEXT:    vmv.s.x v10, a2
+; RV32-NEXT:    vslideup.vi v8, v10, 5
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB11_7
+; RV32-NEXT:  .LBB11_15: # %cond.load21
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a5, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
+; RV32-NEXT:    vmv.s.x v10, a2
+; RV32-NEXT:    vslideup.vi v8, v10, 6
+; RV32-NEXT:    addi a0, a0, 4
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB11_8
+; RV32-NEXT:  .LBB11_16: # %cond.load25
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv.s.x v10, a0
+; RV32-NEXT:    vslideup.vi v8, v10, 7
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB11_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB11_10
+; RV64-NEXT:  .LBB11_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB11_11
+; RV64-NEXT:  .LBB11_3: # %else6
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB11_12
+; RV64-NEXT:  .LBB11_4: # %else10
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB11_13
+; RV64-NEXT:  .LBB11_5: # %else14
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB11_14
+; RV64-NEXT:  .LBB11_6: # %else18
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB11_15
+; RV64-NEXT:  .LBB11_7: # %else22
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB11_16
+; RV64-NEXT:  .LBB11_8: # %else26
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB11_9: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lb a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
+; RV64-NEXT:    vmv.s.x v8, a2
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB11_2
+; RV64-NEXT:  .LBB11_10: # %cond.load1
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lb a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64-NEXT:    vmv.s.x v10, a2
+; RV64-NEXT:    vslideup.vi v8, v10, 1
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB11_3
+; RV64-NEXT:  .LBB11_11: # %cond.load5
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lb a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64-NEXT:    vmv.s.x v10, a2
+; RV64-NEXT:    vslideup.vi v8, v10, 2
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB11_4
+; RV64-NEXT:  .LBB11_12: # %cond.load9
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lb a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64-NEXT:    vmv.s.x v10, a2
+; RV64-NEXT:    vslideup.vi v8, v10, 3
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB11_5
+; RV64-NEXT:  .LBB11_13: # %cond.load13
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lb a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
+; RV64-NEXT:    vmv.s.x v10, a2
+; RV64-NEXT:    vslideup.vi v8, v10, 4
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB11_6
+; RV64-NEXT:  .LBB11_14: # %cond.load17
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lb a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
+; RV64-NEXT:    vmv.s.x v10, a2
+; RV64-NEXT:    vslideup.vi v8, v10, 5
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB11_7
+; RV64-NEXT:  .LBB11_15: # %cond.load21
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lb a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
+; RV64-NEXT:    vmv.s.x v10, a2
+; RV64-NEXT:    vslideup.vi v8, v10, 6
+; RV64-NEXT:    addi a0, a0, 4
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB11_8
+; RV64-NEXT:  .LBB11_16: # %cond.load25
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lb a0, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a3
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT:    vmv.s.x v10, a0
+; RV64-NEXT:    vslideup.vi v8, v10, 7
+; RV64-NEXT:    ret
+  %res = call <8 x i32> @llvm.masked.expandload.v8i32(ptr %base, <8 x i1> %mask, <8 x i32> %src0)
+  ret <8 x i32>%res
+}
+
+declare <1 x i64> @llvm.masked.expandload.v1i64(ptr, <1 x i1>, <1 x i64>)
+define <1 x i64> @expandload_v1i64(ptr align 8 %base, <1 x i64> %src0, <1 x i1> %mask) {
+; RV32-LABEL: expandload_v1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vfirst.m a1, v0
+; RV32-NEXT:    bnez a1, .LBB12_2
+; RV32-NEXT:  # %bb.1: # %cond.load
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a2, 0(a0)
+; RV32-NEXT:    lbu a3, 2(a0)
+; RV32-NEXT:    lbu a4, 3(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a4, a4, 24
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    or a1, a3, a1
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a0, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a4
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v8, (a0), zero
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:  .LBB12_2: # %else
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vfirst.m a1, v0
+; RV64-NEXT:    bnez a1, .LBB12_2
+; RV64-NEXT:  # %bb.1: # %cond.load
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a4, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a4, a4, 24
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    or a1, a3, a1
+; RV64-NEXT:    lbu a2, 5(a0)
+; RV64-NEXT:    lbu a3, 4(a0)
+; RV64-NEXT:    lbu a4, 6(a0)
+; RV64-NEXT:    lbu a0, 7(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a4
+; RV64-NEXT:    or a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT:    vmv.s.x v8, a0
+; RV64-NEXT:  .LBB12_2: # %else
+; RV64-NEXT:    ret
+  %res = call <1 x i64> @llvm.masked.expandload.v1i64(ptr %base, <1 x i1> %mask, <1 x i64> %src0)
+  ret <1 x i64>%res
+}
+
+declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>)
+define <2 x i64> @expandload_v2i64(ptr align 8 %base, <2 x i64> %src0, <2 x i1> %mask) {
+; RV32-LABEL: expandload_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB13_3
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    bnez a1, .LBB13_4
+; RV32-NEXT:  .LBB13_2: # %else2
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB13_3: # %cond.load
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    lbu a3, 1(a0)
+; RV32-NEXT:    lbu a4, 0(a0)
+; RV32-NEXT:    lbu a5, 2(a0)
+; RV32-NEXT:    lbu a6, 3(a0)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a3
+; RV32-NEXT:    vslide1down.vx v8, v8, a2
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 2
+; RV32-NEXT:    beqz a1, .LBB13_2
+; RV32-NEXT:  .LBB13_4: # %cond.load1
+; RV32-NEXT:    lbu a1, 5(a0)
+; RV32-NEXT:    lbu a2, 4(a0)
+; RV32-NEXT:    lbu a3, 6(a0)
+; RV32-NEXT:    lbu a4, 7(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a4, a4, 24
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    or a1, a3, a1
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a4
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v9, v8, a0
+; RV32-NEXT:    vslide1down.vx v9, v9, a1
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB13_3
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    bnez a1, .LBB13_4
+; RV64-NEXT:  .LBB13_2: # %else2
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB13_3: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    vsetivli zero, 2, e64, m8, tu, ma
+; RV64-NEXT:    vmv.s.x v8, a2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 2
+; RV64-NEXT:    beqz a1, .LBB13_2
+; RV64-NEXT:  .LBB13_4: # %cond.load1
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a4, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a4, a4, 24
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    or a1, a3, a1
+; RV64-NEXT:    lbu a2, 5(a0)
+; RV64-NEXT:    lbu a3, 4(a0)
+; RV64-NEXT:    lbu a4, 6(a0)
+; RV64-NEXT:    lbu a0, 7(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a4
+; RV64-NEXT:    or a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vmv.s.x v9, a0
+; RV64-NEXT:    vslideup.vi v8, v9, 1
+; RV64-NEXT:    ret
+  %res = call <2 x i64> @llvm.masked.expandload.v2i64(ptr %base, <2 x i1> %mask, <2 x i64> %src0)
+  ret <2 x i64>%res
+}
+
+declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>)
+define <4 x i64> @expandload_v4i64(ptr align 8 %base, <4 x i64> %src0, <4 x i1> %mask) {
+; RV32-LABEL: expandload_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB14_5
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB14_6
+; RV32-NEXT:  .LBB14_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB14_7
+; RV32-NEXT:  .LBB14_3: # %else6
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    bnez a1, .LBB14_8
+; RV32-NEXT:  .LBB14_4: # %else10
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB14_5: # %cond.load
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    lbu a3, 1(a0)
+; RV32-NEXT:    lbu a4, 0(a0)
+; RV32-NEXT:    lbu a5, 2(a0)
+; RV32-NEXT:    lbu a6, 3(a0)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a3
+; RV32-NEXT:    vslide1down.vx v8, v8, a2
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB14_2
+; RV32-NEXT:  .LBB14_6: # %cond.load1
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    lbu a3, 1(a0)
+; RV32-NEXT:    lbu a4, 0(a0)
+; RV32-NEXT:    lbu a5, 2(a0)
+; RV32-NEXT:    lbu a6, 3(a0)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v10, v8, a3
+; RV32-NEXT:    vslide1down.vx v10, v10, a2
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v10, 1
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB14_3
+; RV32-NEXT:  .LBB14_7: # %cond.load5
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    lbu a3, 1(a0)
+; RV32-NEXT:    lbu a4, 0(a0)
+; RV32-NEXT:    lbu a5, 2(a0)
+; RV32-NEXT:    lbu a6, 3(a0)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v10, v8, a3
+; RV32-NEXT:    vslide1down.vx v10, v10, a2
+; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v10, 2
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, 8
+; RV32-NEXT:    beqz a1, .LBB14_4
+; RV32-NEXT:  .LBB14_8: # %cond.load9
+; RV32-NEXT:    lbu a1, 5(a0)
+; RV32-NEXT:    lbu a2, 4(a0)
+; RV32-NEXT:    lbu a3, 6(a0)
+; RV32-NEXT:    lbu a4, 7(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a4, a4, 24
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    or a1, a3, a1
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a4
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v10, v8, a0
+; RV32-NEXT:    vslide1down.vx v10, v10, a1
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT:    vslideup.vi v8, v10, 3
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB14_5
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB14_6
+; RV64-NEXT:  .LBB14_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB14_7
+; RV64-NEXT:  .LBB14_3: # %else6
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    bnez a1, .LBB14_8
+; RV64-NEXT:  .LBB14_4: # %else10
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB14_5: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
+; RV64-NEXT:    vmv.s.x v8, a2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB14_2
+; RV64-NEXT:  .LBB14_6: # %cond.load1
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV64-NEXT:    vmv.s.x v10, a2
+; RV64-NEXT:    vslideup.vi v8, v10, 1
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB14_3
+; RV64-NEXT:  .LBB14_7: # %cond.load5
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV64-NEXT:    vmv.s.x v10, a2
+; RV64-NEXT:    vslideup.vi v8, v10, 2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, 8
+; RV64-NEXT:    beqz a1, .LBB14_4
+; RV64-NEXT:  .LBB14_8: # %cond.load9
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a4, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a4, a4, 24
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    or a1, a3, a1
+; RV64-NEXT:    lbu a2, 5(a0)
+; RV64-NEXT:    lbu a3, 4(a0)
+; RV64-NEXT:    lbu a4, 6(a0)
+; RV64-NEXT:    lbu a0, 7(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a4
+; RV64-NEXT:    or a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vmv.s.x v10, a0
+; RV64-NEXT:    vslideup.vi v8, v10, 3
+; RV64-NEXT:    ret
+  %res = call <4 x i64> @llvm.masked.expandload.v4i64(ptr %base, <4 x i1> %mask, <4 x i64> %src0)
+  ret <4 x i64>%res
+}
+
+declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>)
+define <8 x i64> @expandload_v8i64(ptr align 8 %base, <8 x i64> %src0, <8 x i1> %mask) {
+; RV32-LABEL: expandload_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v0
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    bnez a2, .LBB15_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    bnez a2, .LBB15_10
+; RV32-NEXT:  .LBB15_2: # %else2
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    bnez a2, .LBB15_11
+; RV32-NEXT:  .LBB15_3: # %else6
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    bnez a2, .LBB15_12
+; RV32-NEXT:  .LBB15_4: # %else10
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    bnez a2, .LBB15_13
+; RV32-NEXT:  .LBB15_5: # %else14
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    bnez a2, .LBB15_14
+; RV32-NEXT:  .LBB15_6: # %else18
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    bnez a2, .LBB15_15
+; RV32-NEXT:  .LBB15_7: # %else22
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    bnez a1, .LBB15_16
+; RV32-NEXT:  .LBB15_8: # %else26
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB15_9: # %cond.load
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    lbu a3, 1(a0)
+; RV32-NEXT:    lbu a4, 0(a0)
+; RV32-NEXT:    lbu a5, 2(a0)
+; RV32-NEXT:    lbu a6, 3(a0)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vslide1down.vx v8, v8, a3
+; RV32-NEXT:    vslide1down.vx v8, v8, a2
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 2
+; RV32-NEXT:    beqz a2, .LBB15_2
+; RV32-NEXT:  .LBB15_10: # %cond.load1
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    lbu a3, 1(a0)
+; RV32-NEXT:    lbu a4, 0(a0)
+; RV32-NEXT:    lbu a5, 2(a0)
+; RV32-NEXT:    lbu a6, 3(a0)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a3
+; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 1
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 4
+; RV32-NEXT:    beqz a2, .LBB15_3
+; RV32-NEXT:  .LBB15_11: # %cond.load5
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    lbu a3, 1(a0)
+; RV32-NEXT:    lbu a4, 0(a0)
+; RV32-NEXT:    lbu a5, 2(a0)
+; RV32-NEXT:    lbu a6, 3(a0)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a3
+; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 2
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 8
+; RV32-NEXT:    beqz a2, .LBB15_4
+; RV32-NEXT:  .LBB15_12: # %cond.load9
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    lbu a3, 1(a0)
+; RV32-NEXT:    lbu a4, 0(a0)
+; RV32-NEXT:    lbu a5, 2(a0)
+; RV32-NEXT:    lbu a6, 3(a0)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a3
+; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 3
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 16
+; RV32-NEXT:    beqz a2, .LBB15_5
+; RV32-NEXT:  .LBB15_13: # %cond.load13
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    lbu a3, 1(a0)
+; RV32-NEXT:    lbu a4, 0(a0)
+; RV32-NEXT:    lbu a5, 2(a0)
+; RV32-NEXT:    lbu a6, 3(a0)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a3
+; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 4
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 32
+; RV32-NEXT:    beqz a2, .LBB15_6
+; RV32-NEXT:  .LBB15_14: # %cond.load17
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    lbu a3, 1(a0)
+; RV32-NEXT:    lbu a4, 0(a0)
+; RV32-NEXT:    lbu a5, 2(a0)
+; RV32-NEXT:    lbu a6, 3(a0)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a3
+; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 5
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a2, a1, 64
+; RV32-NEXT:    beqz a2, .LBB15_7
+; RV32-NEXT:  .LBB15_15: # %cond.load21
+; RV32-NEXT:    lbu a2, 5(a0)
+; RV32-NEXT:    lbu a3, 4(a0)
+; RV32-NEXT:    lbu a4, 6(a0)
+; RV32-NEXT:    lbu a5, 7(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a5, a5, 24
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    lbu a3, 1(a0)
+; RV32-NEXT:    lbu a4, 0(a0)
+; RV32-NEXT:    lbu a5, 2(a0)
+; RV32-NEXT:    lbu a6, 3(a0)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    slli a5, a5, 16
+; RV32-NEXT:    slli a6, a6, 24
+; RV32-NEXT:    or a4, a6, a5
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a3
+; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 6
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    andi a1, a1, -128
+; RV32-NEXT:    beqz a1, .LBB15_8
+; RV32-NEXT:  .LBB15_16: # %cond.load25
+; RV32-NEXT:    lbu a1, 5(a0)
+; RV32-NEXT:    lbu a2, 4(a0)
+; RV32-NEXT:    lbu a3, 6(a0)
+; RV32-NEXT:    lbu a4, 7(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    slli a3, a3, 16
+; RV32-NEXT:    slli a4, a4, 24
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    or a1, a3, a1
+; RV32-NEXT:    lbu a2, 1(a0)
+; RV32-NEXT:    lbu a3, 0(a0)
+; RV32-NEXT:    lbu a4, 2(a0)
+; RV32-NEXT:    lbu a0, 3(a0)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    slli a4, a4, 16
+; RV32-NEXT:    slli a0, a0, 24
+; RV32-NEXT:    or a0, a0, a4
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
+; RV32-NEXT:    vslide1down.vx v12, v8, a0
+; RV32-NEXT:    vslide1down.vx v12, v12, a1
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vslideup.vi v8, v12, 7
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: expandload_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v0
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB15_9
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB15_10
+; RV64-NEXT:  .LBB15_2: # %else2
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB15_11
+; RV64-NEXT:  .LBB15_3: # %else6
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB15_12
+; RV64-NEXT:  .LBB15_4: # %else10
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB15_13
+; RV64-NEXT:  .LBB15_5: # %else14
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB15_14
+; RV64-NEXT:  .LBB15_6: # %else18
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB15_15
+; RV64-NEXT:  .LBB15_7: # %else22
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    bnez a1, .LBB15_16
+; RV64-NEXT:  .LBB15_8: # %else26
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB15_9: # %cond.load
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    vsetivli zero, 8, e64, m8, tu, ma
+; RV64-NEXT:    vmv.s.x v8, a2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB15_2
+; RV64-NEXT:  .LBB15_10: # %cond.load1
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 1
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB15_3
+; RV64-NEXT:  .LBB15_11: # %cond.load5
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 2
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB15_4
+; RV64-NEXT:  .LBB15_12: # %cond.load9
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 3
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB15_5
+; RV64-NEXT:  .LBB15_13: # %cond.load13
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 4
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB15_6
+; RV64-NEXT:  .LBB15_14: # %cond.load17
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 5
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB15_7
+; RV64-NEXT:  .LBB15_15: # %cond.load21
+; RV64-NEXT:    lbu a2, 1(a0)
+; RV64-NEXT:    lbu a3, 0(a0)
+; RV64-NEXT:    lbu a4, 2(a0)
+; RV64-NEXT:    lbu a5, 3(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a5, a5, 24
+; RV64-NEXT:    or a4, a5, a4
+; RV64-NEXT:    or a2, a4, a2
+; RV64-NEXT:    lbu a3, 5(a0)
+; RV64-NEXT:    lbu a4, 4(a0)
+; RV64-NEXT:    lbu a5, 6(a0)
+; RV64-NEXT:    lbu a6, 7(a0)
+; RV64-NEXT:    slli a3, a3, 8
+; RV64-NEXT:    or a3, a3, a4
+; RV64-NEXT:    slli a5, a5, 16
+; RV64-NEXT:    slli a6, a6, 24
+; RV64-NEXT:    or a4, a6, a5
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    slli a3, a3, 32
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
+; RV64-NEXT:    vmv.s.x v12, a2
+; RV64-NEXT:    vslideup.vi v8, v12, 6
+; RV64-NEXT:    addi a0, a0, 8
+; RV64-NEXT:    andi a1, a1, -128
+; RV64-NEXT:    beqz a1, .LBB15_8
+; RV64-NEXT:  .LBB15_16: # %cond.load25
+; RV64-NEXT:    lbu a1, 1(a0)
+; RV64-NEXT:    lbu a2, 0(a0)
+; RV64-NEXT:    lbu a3, 2(a0)
+; RV64-NEXT:    lbu a4, 3(a0)
+; RV64-NEXT:    slli a1, a1, 8
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a3, a3, 16
+; RV64-NEXT:    slli a4, a4, 24
+; RV64-NEXT:    or a3, a4, a3
+; RV64-NEXT:    or a1, a3, a1
+; RV64-NEXT:    lbu a2, 5(a0)
+; RV64-NEXT:    lbu a3, 4(a0)
+; RV64-NEXT:    lbu a4, 6(a0)
+; RV64-NEXT:    lbu a0, 7(a0)
+; RV64-NEXT:    slli a2, a2, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    slli a4, a4, 16
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    or a0, a0, a4
+; RV64-NEXT:    or a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vmv.s.x v12, a0
+; RV64-NEXT:    vslideup.vi v8, v12, 7
+; RV64-NEXT:    ret
+  %res = call <8 x i64> @llvm.masked.expandload.v8i64(ptr %base, <8 x i1> %mask, <8 x i64> %src0)
+  ret <8 x i64>%res
+}

>From 2278aeaa8914579e3e857dc599bc8db18afcbb71 Mon Sep 17 00:00:00 2001
From: Yeting Kuo <yeting.kuo at sifive.com>
Date: Fri, 1 Mar 2024 10:21:38 +0800
Subject: [PATCH 2/3] [ScalarizeMaskedMemIntrin] Use pointer alignment from
 pointer of masked.compressstore/expandload.

Previously we used Align(1) for all scalarized load/stores from masked.compressstore/expandload.
For targets not supporting unaligned accesses, it make backend need to split
aligned large width loads/stores to byte loads/stores.
To solve this performance issue, this patch preserves the alignment of base
pointer after scalarizing.
---
 .../Scalar/ScalarizeMaskedMemIntrin.cpp       |   10 +-
 .../rvv/fixed-vectors-compressstore-fp.ll     | 1245 ++-----------
 .../rvv/fixed-vectors-compressstore-int.ll    |  733 +-------
 .../RISCV/rvv/fixed-vectors-expandload-fp.ll  | 1367 +-------------
 .../RISCV/rvv/fixed-vectors-expandload-int.ll | 1596 +++--------------
 5 files changed, 602 insertions(+), 4349 deletions(-)

diff --git a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
index c01d03f6447240..2fd5530ad0d0cc 100644
--- a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
+++ b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
@@ -627,6 +627,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
   Value *Ptr = CI->getArgOperand(0);
   Value *Mask = CI->getArgOperand(1);
   Value *PassThru = CI->getArgOperand(2);
+  Align Alignment = Ptr->getPointerAlignment(DL);
 
   auto *VecType = cast<FixedVectorType>(CI->getType());
 
@@ -659,7 +660,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
       } else {
         Value *NewPtr =
             Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
-        InsertElt = Builder.CreateAlignedLoad(EltTy, NewPtr, Align(1),
+        InsertElt = Builder.CreateAlignedLoad(EltTy, NewPtr, Alignment,
                                               "Load" + Twine(Idx));
         ShuffleMask[Idx] = Idx;
         ++MemIndex;
@@ -713,7 +714,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
     CondBlock->setName("cond.load");
 
     Builder.SetInsertPoint(CondBlock->getTerminator());
-    LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, Align(1));
+    LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, Alignment);
     Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
 
     // Move the pointer if there are more blocks to come.
@@ -755,6 +756,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
   Value *Src = CI->getArgOperand(0);
   Value *Ptr = CI->getArgOperand(1);
   Value *Mask = CI->getArgOperand(2);
+  Align Alignment = Ptr->getPointerAlignment(DL);
 
   auto *VecType = cast<FixedVectorType>(Src->getType());
 
@@ -778,7 +780,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
       Value *OneElt =
           Builder.CreateExtractElement(Src, Idx, "Elt" + Twine(Idx));
       Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
-      Builder.CreateAlignedStore(OneElt, NewPtr, Align(1));
+      Builder.CreateAlignedStore(OneElt, NewPtr, Alignment);
       ++MemIndex;
     }
     CI->eraseFromParent();
@@ -824,7 +826,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
 
     Builder.SetInsertPoint(CondBlock->getTerminator());
     Value *OneElt = Builder.CreateExtractElement(Src, Idx);
-    Builder.CreateAlignedStore(OneElt, Ptr, Align(1));
+    Builder.CreateAlignedStore(OneElt, Ptr, Alignment);
 
     // Move the pointer if there are more blocks to come.
     Value *NewPtr;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
index f32352b30c5ec6..8989a0c9f2ce1c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
@@ -10,16 +10,8 @@ define void @compressstore_v1f16(ptr align 2 %base, <1 x half> %v, <1 x i1> %mas
 ; RV32-NEXT:    vfirst.m a1, v0
 ; RV32-NEXT:    bnez a1, .LBB0_2
 ; RV32-NEXT:  # %bb.1: # %cond.store
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    addi a1, sp, 12
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT:    vse16.v v8, (a1)
-; RV32-NEXT:    lh a1, 12(sp)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    vse16.v v8, (a0)
 ; RV32-NEXT:  .LBB0_2: # %else
 ; RV32-NEXT:    ret
 ;
@@ -29,16 +21,8 @@ define void @compressstore_v1f16(ptr align 2 %base, <1 x half> %v, <1 x i1> %mas
 ; RV64-NEXT:    vfirst.m a1, v0
 ; RV64-NEXT:    bnez a1, .LBB0_2
 ; RV64-NEXT:  # %bb.1: # %cond.store
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
-; RV64-NEXT:    addi a1, sp, 8
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-NEXT:    vse16.v v8, (a1)
-; RV64-NEXT:    lh a1, 8(sp)
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:  .LBB0_2: # %else
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v1f16(<1 x half> %v, ptr %base, <1 x i1> %mask)
@@ -49,8 +33,6 @@ declare void @llvm.masked.compressstore.v2f16(<2 x half>, ptr, <2 x i1>)
 define void @compressstore_v2f16(ptr align 2 %base, <2 x half> %v, <2 x i1> %mask) {
 ; RV32-LABEL: compressstore_v2f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
@@ -59,35 +41,21 @@ define void @compressstore_v2f16(ptr align 2 %base, <2 x half> %v, <2 x i1> %mas
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    bnez a1, .LBB1_4
 ; RV32-NEXT:  .LBB1_2: # %else2
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB1_3: # %cond.store
-; RV32-NEXT:    addi a2, sp, 12
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT:    vse16.v v8, (a2)
-; RV32-NEXT:    lh a2, 12(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse16.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    beqz a1, .LBB1_2
 ; RV32-NEXT:  .LBB1_4: # %cond.store1
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    addi a1, sp, 8
-; RV32-NEXT:    vse16.v v8, (a1)
-; RV32-NEXT:    lh a1, 8(sp)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    vse16.v v8, (a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: compressstore_v2f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -96,29 +64,17 @@ define void @compressstore_v2f16(ptr align 2 %base, <2 x half> %v, <2 x i1> %mas
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    bnez a1, .LBB1_4
 ; RV64-NEXT:  .LBB1_2: # %else2
-; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB1_3: # %cond.store
-; RV64-NEXT:    addi a2, sp, 8
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-NEXT:    vse16.v v8, (a2)
-; RV64-NEXT:    lh a2, 8(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    beqz a1, .LBB1_2
 ; RV64-NEXT:  .LBB1_4: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-NEXT:    mv a1, sp
-; RV64-NEXT:    vse16.v v8, (a1)
-; RV64-NEXT:    lh a1, 0(sp)
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v2f16(<2 x half> %v, ptr %base, <2 x i1> %mask)
   ret void
@@ -128,8 +84,6 @@ declare void @llvm.masked.compressstore.v4f16(<4 x half>, ptr, <4 x i1>)
 define void @compressstore_v4f16(ptr align 2 %base, <4 x half> %v, <4 x i1> %mask) {
 ; RV32-LABEL: compressstore_v4f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
@@ -144,59 +98,35 @@ define void @compressstore_v4f16(ptr align 2 %base, <4 x half> %v, <4 x i1> %mas
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    bnez a1, .LBB2_8
 ; RV32-NEXT:  .LBB2_4: # %else8
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB2_5: # %cond.store
-; RV32-NEXT:    addi a2, sp, 12
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT:    vse16.v v8, (a2)
-; RV32-NEXT:    lh a2, 12(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse16.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB2_2
 ; RV32-NEXT:  .LBB2_6: # %cond.store1
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    addi a2, sp, 8
-; RV32-NEXT:    vse16.v v9, (a2)
-; RV32-NEXT:    lh a2, 8(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse16.v v9, (a0)
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB2_3
 ; RV32-NEXT:  .LBB2_7: # %cond.store4
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV32-NEXT:    vslidedown.vi v9, v8, 2
-; RV32-NEXT:    addi a2, sp, 4
-; RV32-NEXT:    vse16.v v9, (a2)
-; RV32-NEXT:    lh a2, 4(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse16.v v9, (a0)
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    beqz a1, .LBB2_4
 ; RV32-NEXT:  .LBB2_8: # %cond.store7
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    mv a1, sp
-; RV32-NEXT:    vse16.v v8, (a1)
-; RV32-NEXT:    lh a1, 0(sp)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    vse16.v v8, (a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: compressstore_v4f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -32
-; RV64-NEXT:    .cfi_def_cfa_offset 32
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -211,53 +141,31 @@ define void @compressstore_v4f16(ptr align 2 %base, <4 x half> %v, <4 x i1> %mas
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    bnez a1, .LBB2_8
 ; RV64-NEXT:  .LBB2_4: # %else8
-; RV64-NEXT:    addi sp, sp, 32
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB2_5: # %cond.store
-; RV64-NEXT:    addi a2, sp, 24
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-NEXT:    vse16.v v8, (a2)
-; RV64-NEXT:    lh a2, 24(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB2_2
 ; RV64-NEXT:  .LBB2_6: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-NEXT:    addi a2, sp, 16
-; RV64-NEXT:    vse16.v v9, (a2)
-; RV64-NEXT:    lh a2, 16(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse16.v v9, (a0)
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB2_3
 ; RV64-NEXT:  .LBB2_7: # %cond.store4
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v9, v8, 2
-; RV64-NEXT:    addi a2, sp, 8
-; RV64-NEXT:    vse16.v v9, (a2)
-; RV64-NEXT:    lh a2, 8(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse16.v v9, (a0)
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    beqz a1, .LBB2_4
 ; RV64-NEXT:  .LBB2_8: # %cond.store7
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-NEXT:    mv a1, sp
-; RV64-NEXT:    vse16.v v8, (a1)
-; RV64-NEXT:    lh a1, 0(sp)
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v4f16(<4 x half> %v, ptr %base, <4 x i1> %mask)
   ret void
@@ -267,8 +175,6 @@ declare void @llvm.masked.compressstore.v8f16(<8 x half>, ptr, <8 x i1>)
 define void @compressstore_v8f16(ptr align 2 %base, <8 x half> %v, <8 x i1> %mask) {
 ; RV32-LABEL: compressstore_v8f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
@@ -295,107 +201,63 @@ define void @compressstore_v8f16(ptr align 2 %base, <8 x half> %v, <8 x i1> %mas
 ; RV32-NEXT:    andi a1, a1, -128
 ; RV32-NEXT:    bnez a1, .LBB3_16
 ; RV32-NEXT:  .LBB3_8: # %else20
-; RV32-NEXT:    addi sp, sp, 32
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB3_9: # %cond.store
-; RV32-NEXT:    addi a2, sp, 28
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vse16.v v8, (a2)
-; RV32-NEXT:    lh a2, 28(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse16.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB3_2
 ; RV32-NEXT:  .LBB3_10: # %cond.store1
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    addi a2, sp, 24
-; RV32-NEXT:    vse16.v v9, (a2)
-; RV32-NEXT:    lh a2, 24(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse16.v v9, (a0)
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB3_3
 ; RV32-NEXT:  .LBB3_11: # %cond.store4
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v9, v8, 2
-; RV32-NEXT:    addi a2, sp, 20
-; RV32-NEXT:    vse16.v v9, (a2)
-; RV32-NEXT:    lh a2, 20(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse16.v v9, (a0)
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a2, a1, 8
 ; RV32-NEXT:    beqz a2, .LBB3_4
 ; RV32-NEXT:  .LBB3_12: # %cond.store7
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v9, v8, 3
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vse16.v v9, (a2)
-; RV32-NEXT:    lh a2, 16(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse16.v v9, (a0)
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a2, a1, 16
 ; RV32-NEXT:    beqz a2, .LBB3_5
 ; RV32-NEXT:  .LBB3_13: # %cond.store10
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v9, v8, 4
-; RV32-NEXT:    addi a2, sp, 12
-; RV32-NEXT:    vse16.v v9, (a2)
-; RV32-NEXT:    lh a2, 12(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse16.v v9, (a0)
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a2, a1, 32
 ; RV32-NEXT:    beqz a2, .LBB3_6
 ; RV32-NEXT:  .LBB3_14: # %cond.store13
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v9, v8, 5
-; RV32-NEXT:    addi a2, sp, 8
-; RV32-NEXT:    vse16.v v9, (a2)
-; RV32-NEXT:    lh a2, 8(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse16.v v9, (a0)
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a2, a1, 64
 ; RV32-NEXT:    beqz a2, .LBB3_7
 ; RV32-NEXT:  .LBB3_15: # %cond.store16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v9, v8, 6
-; RV32-NEXT:    addi a2, sp, 4
-; RV32-NEXT:    vse16.v v9, (a2)
-; RV32-NEXT:    lh a2, 4(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse16.v v9, (a0)
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a1, a1, -128
 ; RV32-NEXT:    beqz a1, .LBB3_8
 ; RV32-NEXT:  .LBB3_16: # %cond.store19
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v8, v8, 7
-; RV32-NEXT:    mv a1, sp
-; RV32-NEXT:    vse16.v v8, (a1)
-; RV32-NEXT:    lh a1, 0(sp)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    vse16.v v8, (a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: compressstore_v8f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -64
-; RV64-NEXT:    .cfi_def_cfa_offset 64
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -422,101 +284,59 @@ define void @compressstore_v8f16(ptr align 2 %base, <8 x half> %v, <8 x i1> %mas
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    bnez a1, .LBB3_16
 ; RV64-NEXT:  .LBB3_8: # %else20
-; RV64-NEXT:    addi sp, sp, 64
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB3_9: # %cond.store
-; RV64-NEXT:    addi a2, sp, 56
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT:    vse16.v v8, (a2)
-; RV64-NEXT:    lh a2, 56(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB3_2
 ; RV64-NEXT:  .LBB3_10: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-NEXT:    addi a2, sp, 48
-; RV64-NEXT:    vse16.v v9, (a2)
-; RV64-NEXT:    lh a2, 48(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse16.v v9, (a0)
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB3_3
 ; RV64-NEXT:  .LBB3_11: # %cond.store4
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v9, v8, 2
-; RV64-NEXT:    addi a2, sp, 40
-; RV64-NEXT:    vse16.v v9, (a2)
-; RV64-NEXT:    lh a2, 40(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse16.v v9, (a0)
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a2, a1, 8
 ; RV64-NEXT:    beqz a2, .LBB3_4
 ; RV64-NEXT:  .LBB3_12: # %cond.store7
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v9, v8, 3
-; RV64-NEXT:    addi a2, sp, 32
-; RV64-NEXT:    vse16.v v9, (a2)
-; RV64-NEXT:    lh a2, 32(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse16.v v9, (a0)
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a2, a1, 16
 ; RV64-NEXT:    beqz a2, .LBB3_5
 ; RV64-NEXT:  .LBB3_13: # %cond.store10
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v9, v8, 4
-; RV64-NEXT:    addi a2, sp, 24
-; RV64-NEXT:    vse16.v v9, (a2)
-; RV64-NEXT:    lh a2, 24(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse16.v v9, (a0)
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a2, a1, 32
 ; RV64-NEXT:    beqz a2, .LBB3_6
 ; RV64-NEXT:  .LBB3_14: # %cond.store13
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v9, v8, 5
-; RV64-NEXT:    addi a2, sp, 16
-; RV64-NEXT:    vse16.v v9, (a2)
-; RV64-NEXT:    lh a2, 16(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse16.v v9, (a0)
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a2, a1, 64
 ; RV64-NEXT:    beqz a2, .LBB3_7
 ; RV64-NEXT:  .LBB3_15: # %cond.store16
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v9, v8, 6
-; RV64-NEXT:    addi a2, sp, 8
-; RV64-NEXT:    vse16.v v9, (a2)
-; RV64-NEXT:    lh a2, 8(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse16.v v9, (a0)
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    beqz a1, .LBB3_8
 ; RV64-NEXT:  .LBB3_16: # %cond.store19
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 7
-; RV64-NEXT:    mv a1, sp
-; RV64-NEXT:    vse16.v v8, (a1)
-; RV64-NEXT:    lh a1, 0(sp)
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v8f16(<8 x half> %v, ptr %base, <8 x i1> %mask)
   ret void
@@ -530,16 +350,8 @@ define void @compressstore_v1f32(ptr align 4 %base, <1 x float> %v, <1 x i1> %ma
 ; RV32-NEXT:    vfirst.m a1, v0
 ; RV32-NEXT:    bnez a1, .LBB4_2
 ; RV32-NEXT:  # %bb.1: # %cond.store
-; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fmv.x.w a1, fa5
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
 ; RV32-NEXT:  .LBB4_2: # %else
 ; RV32-NEXT:    ret
 ;
@@ -549,20 +361,8 @@ define void @compressstore_v1f32(ptr align 4 %base, <1 x float> %v, <1 x i1> %ma
 ; RV64-NEXT:    vfirst.m a1, v0
 ; RV64-NEXT:    bnez a1, .LBB4_2
 ; RV64-NEXT:  # %bb.1: # %cond.store
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
-; RV64-NEXT:    addi a1, sp, 8
 ; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT:    vse32.v v8, (a1)
-; RV64-NEXT:    lw a1, 8(sp)
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:  .LBB4_2: # %else
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v1f32(<1 x float> %v, ptr %base, <1 x i1> %mask)
@@ -583,37 +383,19 @@ define void @compressstore_v2f32(ptr align 4 %base, <2 x float> %v, <2 x i1> %ma
 ; RV32-NEXT:  .LBB5_2: # %else2
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB5_3: # %cond.store
-; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fmv.x.w a2, fa5
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    beqz a1, .LBB5_2
 ; RV32-NEXT:  .LBB5_4: # %cond.store1
 ; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fmv.x.w a1, fa5
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    vse32.v v8, (a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: compressstore_v2f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -622,37 +404,17 @@ define void @compressstore_v2f32(ptr align 4 %base, <2 x float> %v, <2 x i1> %ma
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    bnez a1, .LBB5_4
 ; RV64-NEXT:  .LBB5_2: # %else2
-; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB5_3: # %cond.store
-; RV64-NEXT:    addi a2, sp, 8
 ; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT:    vse32.v v8, (a2)
-; RV64-NEXT:    lw a2, 8(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    beqz a1, .LBB5_2
 ; RV64-NEXT:  .LBB5_4: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-NEXT:    mv a1, sp
-; RV64-NEXT:    vse32.v v8, (a1)
-; RV64-NEXT:    lw a1, 0(sp)
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v2f32(<2 x float> %v, ptr %base, <2 x i1> %mask)
   ret void
@@ -678,67 +440,33 @@ define void @compressstore_v4f32(ptr align 4 %base, <4 x float> %v, <4 x i1> %ma
 ; RV32-NEXT:  .LBB6_4: # %else8
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB6_5: # %cond.store
-; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fmv.x.w a2, fa5
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB6_2
 ; RV32-NEXT:  .LBB6_6: # %cond.store1
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    fmv.x.w a2, fa5
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse32.v v9, (a0)
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB6_3
 ; RV32-NEXT:  .LBB6_7: # %cond.store4
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v9, v8, 2
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    fmv.x.w a2, fa5
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse32.v v9, (a0)
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    beqz a1, .LBB6_4
 ; RV32-NEXT:  .LBB6_8: # %cond.store7
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fmv.x.w a1, fa5
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    vse32.v v8, (a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: compressstore_v4f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -32
-; RV64-NEXT:    .cfi_def_cfa_offset 32
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -753,69 +481,31 @@ define void @compressstore_v4f32(ptr align 4 %base, <4 x float> %v, <4 x i1> %ma
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    bnez a1, .LBB6_8
 ; RV64-NEXT:  .LBB6_4: # %else8
-; RV64-NEXT:    addi sp, sp, 32
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB6_5: # %cond.store
-; RV64-NEXT:    addi a2, sp, 24
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT:    vse32.v v8, (a2)
-; RV64-NEXT:    lw a2, 24(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB6_2
 ; RV64-NEXT:  .LBB6_6: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-NEXT:    addi a2, sp, 16
-; RV64-NEXT:    vse32.v v9, (a2)
-; RV64-NEXT:    lw a2, 16(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse32.v v9, (a0)
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB6_3
 ; RV64-NEXT:  .LBB6_7: # %cond.store4
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v9, v8, 2
-; RV64-NEXT:    addi a2, sp, 8
-; RV64-NEXT:    vse32.v v9, (a2)
-; RV64-NEXT:    lw a2, 8(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse32.v v9, (a0)
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    beqz a1, .LBB6_4
 ; RV64-NEXT:  .LBB6_8: # %cond.store7
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-NEXT:    mv a1, sp
-; RV64-NEXT:    vse32.v v8, (a1)
-; RV64-NEXT:    lw a1, 0(sp)
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v4f32(<4 x float> %v, ptr %base, <4 x i1> %mask)
   ret void
@@ -853,127 +543,65 @@ define void @compressstore_v8f32(ptr align 4 %base, <8 x float> %v, <8 x i1> %ma
 ; RV32-NEXT:  .LBB7_8: # %else20
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB7_9: # %cond.store
-; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fmv.x.w a2, fa5
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB7_2
 ; RV32-NEXT:  .LBB7_10: # %cond.store1
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v10, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    fmv.x.w a2, fa5
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse32.v v10, (a0)
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB7_3
 ; RV32-NEXT:  .LBB7_11: # %cond.store4
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v10, v8, 2
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    fmv.x.w a2, fa5
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse32.v v10, (a0)
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a2, a1, 8
 ; RV32-NEXT:    beqz a2, .LBB7_4
 ; RV32-NEXT:  .LBB7_12: # %cond.store7
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v10, v8, 3
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    fmv.x.w a2, fa5
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vse32.v v10, (a0)
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a2, a1, 16
 ; RV32-NEXT:    beqz a2, .LBB7_5
 ; RV32-NEXT:  .LBB7_13: # %cond.store10
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; RV32-NEXT:    vslidedown.vi v10, v8, 4
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    fmv.x.w a2, fa5
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v10, (a0)
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a2, a1, 32
 ; RV32-NEXT:    beqz a2, .LBB7_6
 ; RV32-NEXT:  .LBB7_14: # %cond.store13
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; RV32-NEXT:    vslidedown.vi v10, v8, 5
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    fmv.x.w a2, fa5
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v10, (a0)
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a2, a1, 64
 ; RV32-NEXT:    beqz a2, .LBB7_7
 ; RV32-NEXT:  .LBB7_15: # %cond.store16
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; RV32-NEXT:    vslidedown.vi v10, v8, 6
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    fmv.x.w a2, fa5
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v10, (a0)
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a1, a1, -128
 ; RV32-NEXT:    beqz a1, .LBB7_8
 ; RV32-NEXT:  .LBB7_16: # %cond.store19
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; RV32-NEXT:    vslidedown.vi v8, v8, 7
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fmv.x.w a1, fa5
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vse32.v v8, (a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: compressstore_v8f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -64
-; RV64-NEXT:    .cfi_def_cfa_offset 64
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -1000,137 +628,63 @@ define void @compressstore_v8f32(ptr align 4 %base, <8 x float> %v, <8 x i1> %ma
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    bnez a1, .LBB7_16
 ; RV64-NEXT:  .LBB7_8: # %else20
-; RV64-NEXT:    addi sp, sp, 64
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB7_9: # %cond.store
-; RV64-NEXT:    addi a2, sp, 56
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT:    vse32.v v8, (a2)
-; RV64-NEXT:    lw a2, 56(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB7_2
 ; RV64-NEXT:  .LBB7_10: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v10, v8, 1
-; RV64-NEXT:    addi a2, sp, 48
-; RV64-NEXT:    vse32.v v10, (a2)
-; RV64-NEXT:    lw a2, 48(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse32.v v10, (a0)
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB7_3
 ; RV64-NEXT:  .LBB7_11: # %cond.store4
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-NEXT:    addi a2, sp, 40
-; RV64-NEXT:    vse32.v v10, (a2)
-; RV64-NEXT:    lw a2, 40(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse32.v v10, (a0)
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a2, a1, 8
 ; RV64-NEXT:    beqz a2, .LBB7_4
 ; RV64-NEXT:  .LBB7_12: # %cond.store7
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v10, v8, 3
-; RV64-NEXT:    addi a2, sp, 32
-; RV64-NEXT:    vse32.v v10, (a2)
-; RV64-NEXT:    lw a2, 32(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse32.v v10, (a0)
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a2, a1, 16
 ; RV64-NEXT:    beqz a2, .LBB7_5
 ; RV64-NEXT:  .LBB7_13: # %cond.store10
 ; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v10, v8, 4
-; RV64-NEXT:    addi a2, sp, 24
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT:    vse32.v v10, (a2)
-; RV64-NEXT:    lw a2, 24(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse32.v v10, (a0)
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a2, a1, 32
 ; RV64-NEXT:    beqz a2, .LBB7_6
 ; RV64-NEXT:  .LBB7_14: # %cond.store13
 ; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v10, v8, 5
-; RV64-NEXT:    addi a2, sp, 16
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT:    vse32.v v10, (a2)
-; RV64-NEXT:    lw a2, 16(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse32.v v10, (a0)
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a2, a1, 64
 ; RV64-NEXT:    beqz a2, .LBB7_7
 ; RV64-NEXT:  .LBB7_15: # %cond.store16
 ; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v10, v8, 6
-; RV64-NEXT:    addi a2, sp, 8
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT:    vse32.v v10, (a2)
-; RV64-NEXT:    lw a2, 8(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse32.v v10, (a0)
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    beqz a1, .LBB7_8
 ; RV64-NEXT:  .LBB7_16: # %cond.store19
 ; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 7
-; RV64-NEXT:    mv a1, sp
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT:    vse32.v v8, (a1)
-; RV64-NEXT:    lw a1, 0(sp)
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v8f32(<8 x float> %v, ptr %base, <8 x i1> %mask)
   ret void
@@ -1144,28 +698,8 @@ define void @compressstore_v1f64(ptr align 8 %base, <1 x double> %v, <1 x i1> %m
 ; RV32-NEXT:    vfirst.m a1, v0
 ; RV32-NEXT:    bnez a1, .LBB8_2
 ; RV32-NEXT:  # %bb.1: # %cond.store
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    addi a1, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vse64.v v8, (a1)
-; RV32-NEXT:    lw a1, 12(sp)
-; RV32-NEXT:    sb a1, 4(a0)
-; RV32-NEXT:    lw a2, 8(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a1, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a1, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 5(a0)
-; RV32-NEXT:    srli a1, a2, 24
-; RV32-NEXT:    sb a1, 3(a0)
-; RV32-NEXT:    srli a1, a2, 16
-; RV32-NEXT:    sb a1, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:  .LBB8_2: # %else
 ; RV32-NEXT:    ret
 ;
@@ -1175,24 +709,8 @@ define void @compressstore_v1f64(ptr align 8 %base, <1 x double> %v, <1 x i1> %m
 ; RV64-NEXT:    vfirst.m a1, v0
 ; RV64-NEXT:    bnez a1, .LBB8_2
 ; RV64-NEXT:  # %bb.1: # %cond.store
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fmv.x.d a1, fa5
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:  .LBB8_2: # %else
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v1f64(<1 x double> %v, ptr %base, <1 x i1> %mask)
@@ -1203,8 +721,6 @@ declare void @llvm.masked.compressstore.v2f64(<2 x double>, ptr, <2 x i1>)
 define void @compressstore_v2f64(ptr align 8 %base, <2 x double> %v, <2 x i1> %mask) {
 ; RV32-LABEL: compressstore_v2f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
@@ -1213,53 +729,17 @@ define void @compressstore_v2f64(ptr align 8 %base, <2 x double> %v, <2 x i1> %m
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    bnez a1, .LBB9_4
 ; RV32-NEXT:  .LBB9_2: # %else2
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB9_3: # %cond.store
-; RV32-NEXT:    addi a2, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vse64.v v8, (a2)
-; RV32-NEXT:    lw a2, 12(sp)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    lw a3, 8(sp)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    srli a4, a2, 24
-; RV32-NEXT:    sb a4, 7(a0)
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    sb a4, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    beqz a1, .LBB9_2
 ; RV32-NEXT:  .LBB9_4: # %cond.store1
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    mv a1, sp
-; RV32-NEXT:    vse64.v v8, (a1)
-; RV32-NEXT:    lw a1, 4(sp)
-; RV32-NEXT:    sb a1, 4(a0)
-; RV32-NEXT:    lw a2, 0(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a1, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a1, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 5(a0)
-; RV32-NEXT:    srli a1, a2, 24
-; RV32-NEXT:    sb a1, 3(a0)
-; RV32-NEXT:    srli a1, a2, 16
-; RV32-NEXT:    sb a1, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: compressstore_v2f64:
@@ -1274,47 +754,15 @@ define void @compressstore_v2f64(ptr align 8 %base, <2 x double> %v, <2 x i1> %m
 ; RV64-NEXT:  .LBB9_2: # %else2
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB9_3: # %cond.store
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fmv.x.d a2, fa5
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    beqz a1, .LBB9_2
 ; RV64-NEXT:  .LBB9_4: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fmv.x.d a1, fa5
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v2f64(<2 x double> %v, ptr %base, <2 x i1> %mask)
   ret void
@@ -1324,8 +772,6 @@ declare void @llvm.masked.compressstore.v4f64(<4 x double>, ptr, <4 x i1>)
 define void @compressstore_v4f64(ptr align 8 %base, <4 x double> %v, <4 x i1> %mask) {
 ; RV32-LABEL: compressstore_v4f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
@@ -1340,103 +786,33 @@ define void @compressstore_v4f64(ptr align 8 %base, <4 x double> %v, <4 x i1> %m
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    bnez a1, .LBB10_8
 ; RV32-NEXT:  .LBB10_4: # %else8
-; RV32-NEXT:    addi sp, sp, 32
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB10_5: # %cond.store
-; RV32-NEXT:    addi a2, sp, 24
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vse64.v v8, (a2)
-; RV32-NEXT:    lw a2, 28(sp)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    lw a3, 24(sp)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    srli a4, a2, 24
-; RV32-NEXT:    sb a4, 7(a0)
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    sb a4, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB10_2
 ; RV32-NEXT:  .LBB10_6: # %cond.store1
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v10, v8, 1
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vse64.v v10, (a2)
-; RV32-NEXT:    lw a2, 20(sp)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    lw a3, 16(sp)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    srli a4, a2, 24
-; RV32-NEXT:    sb a4, 7(a0)
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    sb a4, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    vse64.v v10, (a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB10_3
 ; RV32-NEXT:  .LBB10_7: # %cond.store4
 ; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; RV32-NEXT:    vslidedown.vi v10, v8, 2
-; RV32-NEXT:    addi a2, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vse64.v v10, (a2)
-; RV32-NEXT:    lw a2, 12(sp)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    lw a3, 8(sp)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    srli a4, a2, 24
-; RV32-NEXT:    sb a4, 7(a0)
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    sb a4, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    vse64.v v10, (a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    beqz a1, .LBB10_4
 ; RV32-NEXT:  .LBB10_8: # %cond.store7
 ; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    mv a1, sp
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vse64.v v8, (a1)
-; RV32-NEXT:    lw a1, 4(sp)
-; RV32-NEXT:    sb a1, 4(a0)
-; RV32-NEXT:    lw a2, 0(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a1, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a1, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 5(a0)
-; RV32-NEXT:    srli a1, a2, 24
-; RV32-NEXT:    sb a1, 3(a0)
-; RV32-NEXT:    srli a1, a2, 16
-; RV32-NEXT:    sb a1, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: compressstore_v4f64:
@@ -1457,93 +833,31 @@ define void @compressstore_v4f64(ptr align 8 %base, <4 x double> %v, <4 x i1> %m
 ; RV64-NEXT:  .LBB10_4: # %else8
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB10_5: # %cond.store
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fmv.x.d a2, fa5
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB10_2
 ; RV64-NEXT:  .LBB10_6: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v10, v8, 1
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    fmv.x.d a2, fa5
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse64.v v10, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB10_3
 ; RV64-NEXT:  .LBB10_7: # %cond.store4
 ; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    fmv.x.d a2, fa5
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v10, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    beqz a1, .LBB10_4
 ; RV64-NEXT:  .LBB10_8: # %cond.store7
 ; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fmv.x.d a1, fa5
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v4f64(<4 x double> %v, ptr %base, <4 x i1> %mask)
   ret void
@@ -1553,241 +867,108 @@ declare void @llvm.masked.compressstore.v8f64(<8 x double>, ptr, <8 x i1>)
 define void @compressstore_v8f64(ptr align 8 %base, <8 x double> %v, <8 x i1> %mask) {
 ; RV32-LABEL: compressstore_v8f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -512
-; RV32-NEXT:    .cfi_def_cfa_offset 512
-; RV32-NEXT:    sw ra, 508(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s0, 504(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    .cfi_offset s0, -8
-; RV32-NEXT:    addi s0, sp, 512
-; RV32-NEXT:    .cfi_def_cfa s0, 0
-; RV32-NEXT:    andi sp, sp, -64
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB11_10
+; RV32-NEXT:    bnez a2, .LBB11_11
 ; RV32-NEXT:  # %bb.1: # %else
 ; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    bnez a2, .LBB11_11
+; RV32-NEXT:    bnez a2, .LBB11_12
 ; RV32-NEXT:  .LBB11_2: # %else2
 ; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    bnez a2, .LBB11_12
+; RV32-NEXT:    bnez a2, .LBB11_13
 ; RV32-NEXT:  .LBB11_3: # %else5
 ; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    bnez a2, .LBB11_13
-; RV32-NEXT:  .LBB11_4: # %else8
+; RV32-NEXT:    beqz a2, .LBB11_5
+; RV32-NEXT:  .LBB11_4: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v8, 3
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vse64.v v12, (a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:  .LBB11_5: # %else8
+; RV32-NEXT:    addi sp, sp, -320
+; RV32-NEXT:    .cfi_def_cfa_offset 320
+; RV32-NEXT:    sw ra, 316(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 312(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 320
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    andi sp, sp, -64
 ; RV32-NEXT:    andi a2, a1, 16
 ; RV32-NEXT:    bnez a2, .LBB11_14
-; RV32-NEXT:  .LBB11_5: # %else11
+; RV32-NEXT:  # %bb.6: # %else11
 ; RV32-NEXT:    andi a2, a1, 32
 ; RV32-NEXT:    bnez a2, .LBB11_15
-; RV32-NEXT:  .LBB11_6: # %else14
+; RV32-NEXT:  .LBB11_7: # %else14
 ; RV32-NEXT:    andi a2, a1, 64
 ; RV32-NEXT:    bnez a2, .LBB11_16
-; RV32-NEXT:  .LBB11_7: # %else17
+; RV32-NEXT:  .LBB11_8: # %else17
 ; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    beqz a1, .LBB11_9
-; RV32-NEXT:  .LBB11_8: # %cond.store19
+; RV32-NEXT:    beqz a1, .LBB11_10
+; RV32-NEXT:  .LBB11_9: # %cond.store19
 ; RV32-NEXT:    mv a1, sp
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vse64.v v8, (a1)
 ; RV32-NEXT:    fld fa5, 56(sp)
-; RV32-NEXT:    fsd fa5, 120(sp)
-; RV32-NEXT:    lw a1, 124(sp)
-; RV32-NEXT:    sb a1, 4(a0)
-; RV32-NEXT:    lw a2, 120(sp)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    srli a3, a1, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a1, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 5(a0)
-; RV32-NEXT:    srli a1, a2, 24
-; RV32-NEXT:    sb a1, 3(a0)
-; RV32-NEXT:    srli a1, a2, 16
-; RV32-NEXT:    sb a1, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
-; RV32-NEXT:  .LBB11_9: # %else20
-; RV32-NEXT:    addi sp, s0, -512
-; RV32-NEXT:    lw ra, 508(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s0, 504(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 512
+; RV32-NEXT:    fsd fa5, 0(a0)
+; RV32-NEXT:  .LBB11_10: # %else20
+; RV32-NEXT:    addi sp, s0, -320
+; RV32-NEXT:    lw ra, 316(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 312(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 320
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB11_10: # %cond.store
-; RV32-NEXT:    addi a2, sp, 496
+; RV32-NEXT:  .LBB11_11: # %cond.store
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vse64.v v8, (a2)
-; RV32-NEXT:    lw a2, 500(sp)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    lw a3, 496(sp)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    srli a4, a2, 24
-; RV32-NEXT:    sb a4, 7(a0)
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    sb a4, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB11_2
-; RV32-NEXT:  .LBB11_11: # %cond.store1
+; RV32-NEXT:  .LBB11_12: # %cond.store1
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v12, v8, 1
-; RV32-NEXT:    addi a2, sp, 488
-; RV32-NEXT:    vse64.v v12, (a2)
-; RV32-NEXT:    lw a2, 492(sp)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    lw a3, 488(sp)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    srli a4, a2, 24
-; RV32-NEXT:    sb a4, 7(a0)
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    sb a4, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    vse64.v v12, (a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB11_3
-; RV32-NEXT:  .LBB11_12: # %cond.store4
+; RV32-NEXT:  .LBB11_13: # %cond.store4
 ; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; RV32-NEXT:    vslidedown.vi v12, v8, 2
-; RV32-NEXT:    addi a2, sp, 480
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vse64.v v12, (a2)
-; RV32-NEXT:    lw a2, 484(sp)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    lw a3, 480(sp)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    srli a4, a2, 24
-; RV32-NEXT:    sb a4, 7(a0)
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    sb a4, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    vse64.v v12, (a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    beqz a2, .LBB11_4
-; RV32-NEXT:  .LBB11_13: # %cond.store7
-; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v12, v8, 3
-; RV32-NEXT:    addi a2, sp, 472
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vse64.v v12, (a2)
-; RV32-NEXT:    lw a2, 476(sp)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    lw a3, 472(sp)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    srli a4, a2, 24
-; RV32-NEXT:    sb a4, 7(a0)
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    sb a4, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    addi a0, a0, 8
-; RV32-NEXT:    andi a2, a1, 16
-; RV32-NEXT:    beqz a2, .LBB11_5
+; RV32-NEXT:    bnez a2, .LBB11_4
+; RV32-NEXT:    j .LBB11_5
 ; RV32-NEXT:  .LBB11_14: # %cond.store10
-; RV32-NEXT:    addi a2, sp, 384
+; RV32-NEXT:    addi a2, sp, 192
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vse64.v v8, (a2)
-; RV32-NEXT:    fld fa5, 416(sp)
-; RV32-NEXT:    fsd fa5, 464(sp)
-; RV32-NEXT:    lw a2, 468(sp)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    lw a3, 464(sp)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    srli a4, a2, 24
-; RV32-NEXT:    sb a4, 7(a0)
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    sb a4, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    fld fa5, 224(sp)
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 32
-; RV32-NEXT:    beqz a2, .LBB11_6
+; RV32-NEXT:    beqz a2, .LBB11_7
 ; RV32-NEXT:  .LBB11_15: # %cond.store13
-; RV32-NEXT:    addi a2, sp, 256
+; RV32-NEXT:    addi a2, sp, 128
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vse64.v v8, (a2)
-; RV32-NEXT:    fld fa5, 296(sp)
-; RV32-NEXT:    fsd fa5, 376(sp)
-; RV32-NEXT:    lw a2, 380(sp)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    lw a3, 376(sp)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    srli a4, a2, 24
-; RV32-NEXT:    sb a4, 7(a0)
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    sb a4, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    fld fa5, 168(sp)
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 64
-; RV32-NEXT:    beqz a2, .LBB11_7
+; RV32-NEXT:    beqz a2, .LBB11_8
 ; RV32-NEXT:  .LBB11_16: # %cond.store16
-; RV32-NEXT:    addi a2, sp, 128
+; RV32-NEXT:    addi a2, sp, 64
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vse64.v v8, (a2)
-; RV32-NEXT:    fld fa5, 176(sp)
-; RV32-NEXT:    fsd fa5, 248(sp)
-; RV32-NEXT:    lw a2, 252(sp)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    lw a3, 248(sp)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    srli a4, a2, 24
-; RV32-NEXT:    sb a4, 7(a0)
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    sb a4, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    fld fa5, 112(sp)
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    bnez a1, .LBB11_8
-; RV32-NEXT:    j .LBB11_9
+; RV32-NEXT:    bnez a1, .LBB11_9
+; RV32-NEXT:    j .LBB11_10
 ;
 ; RV64-LABEL: compressstore_v8f64:
 ; RV64:       # %bb.0:
@@ -1807,23 +988,8 @@ define void @compressstore_v8f64(ptr align 8 %base, <8 x double> %v, <8 x i1> %m
 ; RV64-NEXT:  .LBB11_4: # %cond.store7
 ; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v12, v8, 3
-; RV64-NEXT:    vfmv.f.s fa5, v12
-; RV64-NEXT:    fmv.x.d a2, fa5
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v12, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:  .LBB11_5: # %else8
 ; RV64-NEXT:    addi sp, sp, -320
@@ -1850,22 +1016,8 @@ define void @compressstore_v8f64(ptr align 8 %base, <8 x double> %v, <8 x i1> %m
 ; RV64-NEXT:    mv a1, sp
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vse64.v v8, (a1)
-; RV64-NEXT:    ld a1, 56(sp)
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    fld fa5, 56(sp)
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:  .LBB11_10: # %else20
 ; RV64-NEXT:    addi sp, s0, -320
 ; RV64-NEXT:    ld ra, 312(sp) # 8-byte Folded Reload
@@ -1873,70 +1025,23 @@ define void @compressstore_v8f64(ptr align 8 %base, <8 x double> %v, <8 x i1> %m
 ; RV64-NEXT:    addi sp, sp, 320
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB11_11: # %cond.store
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fmv.x.d a2, fa5
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB11_2
 ; RV64-NEXT:  .LBB11_12: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v12, v8, 1
-; RV64-NEXT:    vfmv.f.s fa5, v12
-; RV64-NEXT:    fmv.x.d a2, fa5
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse64.v v12, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB11_3
 ; RV64-NEXT:  .LBB11_13: # %cond.store4
 ; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v12, v8, 2
-; RV64-NEXT:    vfmv.f.s fa5, v12
-; RV64-NEXT:    fmv.x.d a2, fa5
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v12, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 8
 ; RV64-NEXT:    bnez a2, .LBB11_4
@@ -1945,22 +1050,8 @@ define void @compressstore_v8f64(ptr align 8 %base, <8 x double> %v, <8 x i1> %m
 ; RV64-NEXT:    addi a2, sp, 192
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vse64.v v8, (a2)
-; RV64-NEXT:    ld a2, 224(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    fld fa5, 224(sp)
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 32
 ; RV64-NEXT:    beqz a2, .LBB11_7
@@ -1968,22 +1059,8 @@ define void @compressstore_v8f64(ptr align 8 %base, <8 x double> %v, <8 x i1> %m
 ; RV64-NEXT:    addi a2, sp, 128
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vse64.v v8, (a2)
-; RV64-NEXT:    ld a2, 168(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    fld fa5, 168(sp)
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 64
 ; RV64-NEXT:    beqz a2, .LBB11_8
@@ -1991,22 +1068,8 @@ define void @compressstore_v8f64(ptr align 8 %base, <8 x double> %v, <8 x i1> %m
 ; RV64-NEXT:    addi a2, sp, 64
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vse64.v v8, (a2)
-; RV64-NEXT:    ld a2, 112(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    fld fa5, 112(sp)
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    bnez a1, .LBB11_9
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
index b0387023294cf4..177cbf67a95e0a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
@@ -190,11 +190,8 @@ define void @compressstore_v1i16(ptr align 2 %base, <1 x i16> %v, <1 x i1> %mask
 ; CHECK-NEXT:    vfirst.m a1, v0
 ; CHECK-NEXT:    bnez a1, .LBB4_2
 ; CHECK-NEXT:  # %bb.1: # %cond.store
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    sb a1, 0(a0)
-; CHECK-NEXT:    srli a1, a1, 8
-; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:  .LBB4_2: # %else
 ; CHECK-NEXT:    ret
   call void @llvm.masked.compressstore.v1i16(<1 x i16> %v, ptr %base, <1 x i1> %mask)
@@ -215,21 +212,15 @@ define void @compressstore_v2i16(ptr align 2 %base, <2 x i16> %v, <2 x i1> %mask
 ; CHECK-NEXT:  .LBB5_2: # %else2
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB5_3: # %cond.store
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv.x.s a2, v8
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a1, a1, 2
 ; CHECK-NEXT:    beqz a1, .LBB5_2
 ; CHECK-NEXT:  .LBB5_4: # %cond.store1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    sb a1, 0(a0)
-; CHECK-NEXT:    srli a1, a1, 8
-; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   call void @llvm.masked.compressstore.v2i16(<2 x i16> %v, ptr %base, <2 x i1> %mask)
   ret void
@@ -255,41 +246,29 @@ define void @compressstore_v4i16(ptr align 2 %base, <4 x i16> %v, <4 x i1> %mask
 ; CHECK-NEXT:  .LBB6_4: # %else8
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB6_5: # %cond.store
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv.x.s a2, v8
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a2, a1, 2
 ; CHECK-NEXT:    beqz a2, .LBB6_2
 ; CHECK-NEXT:  .LBB6_6: # %cond.store1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 1
-; CHECK-NEXT:    vmv.x.s a2, v9
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse16.v v9, (a0)
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a2, a1, 4
 ; CHECK-NEXT:    beqz a2, .LBB6_3
 ; CHECK-NEXT:  .LBB6_7: # %cond.store4
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 2
-; CHECK-NEXT:    vmv.x.s a2, v9
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse16.v v9, (a0)
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a1, a1, 8
 ; CHECK-NEXT:    beqz a1, .LBB6_4
 ; CHECK-NEXT:  .LBB6_8: # %cond.store7
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    sb a1, 0(a0)
-; CHECK-NEXT:    srli a1, a1, 8
-; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   call void @llvm.masked.compressstore.v4i16(<4 x i16> %v, ptr %base, <4 x i1> %mask)
   ret void
@@ -327,81 +306,57 @@ define void @compressstore_v8i16(ptr align 2 %base, <8 x i16> %v, <8 x i1> %mask
 ; CHECK-NEXT:  .LBB7_8: # %else20
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB7_9: # %cond.store
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv.x.s a2, v8
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a2, a1, 2
 ; CHECK-NEXT:    beqz a2, .LBB7_2
 ; CHECK-NEXT:  .LBB7_10: # %cond.store1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 1
-; CHECK-NEXT:    vmv.x.s a2, v9
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse16.v v9, (a0)
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a2, a1, 4
 ; CHECK-NEXT:    beqz a2, .LBB7_3
 ; CHECK-NEXT:  .LBB7_11: # %cond.store4
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 2
-; CHECK-NEXT:    vmv.x.s a2, v9
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse16.v v9, (a0)
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a2, a1, 8
 ; CHECK-NEXT:    beqz a2, .LBB7_4
 ; CHECK-NEXT:  .LBB7_12: # %cond.store7
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 3
-; CHECK-NEXT:    vmv.x.s a2, v9
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse16.v v9, (a0)
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a2, a1, 16
 ; CHECK-NEXT:    beqz a2, .LBB7_5
 ; CHECK-NEXT:  .LBB7_13: # %cond.store10
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 4
-; CHECK-NEXT:    vmv.x.s a2, v9
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse16.v v9, (a0)
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a2, a1, 32
 ; CHECK-NEXT:    beqz a2, .LBB7_6
 ; CHECK-NEXT:  .LBB7_14: # %cond.store13
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 5
-; CHECK-NEXT:    vmv.x.s a2, v9
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse16.v v9, (a0)
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a2, a1, 64
 ; CHECK-NEXT:    beqz a2, .LBB7_7
 ; CHECK-NEXT:  .LBB7_15: # %cond.store16
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 6
-; CHECK-NEXT:    vmv.x.s a2, v9
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse16.v v9, (a0)
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a1, a1, -128
 ; CHECK-NEXT:    beqz a1, .LBB7_8
 ; CHECK-NEXT:  .LBB7_16: # %cond.store19
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    sb a1, 0(a0)
-; CHECK-NEXT:    srli a1, a1, 8
-; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   call void @llvm.masked.compressstore.v8i16(<8 x i16> %v, ptr %base, <8 x i1> %mask)
   ret void
@@ -415,15 +370,8 @@ define void @compressstore_v1i32(ptr align 4 %base, <1 x i32> %v, <1 x i1> %mask
 ; CHECK-NEXT:    vfirst.m a1, v0
 ; CHECK-NEXT:    bnez a1, .LBB8_2
 ; CHECK-NEXT:  # %bb.1: # %cond.store
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    sb a1, 0(a0)
-; CHECK-NEXT:    srli a2, a1, 24
-; CHECK-NEXT:    sb a2, 3(a0)
-; CHECK-NEXT:    srli a2, a1, 16
-; CHECK-NEXT:    sb a2, 2(a0)
-; CHECK-NEXT:    srli a1, a1, 8
-; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:  .LBB8_2: # %else
 ; CHECK-NEXT:    ret
   call void @llvm.masked.compressstore.v1i32(<1 x i32> %v, ptr %base, <1 x i1> %mask)
@@ -444,29 +392,15 @@ define void @compressstore_v2i32(ptr align 4 %base, <2 x i32> %v, <2 x i1> %mask
 ; CHECK-NEXT:  .LBB9_2: # %else2
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB9_3: # %cond.store
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmv.x.s a2, v8
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a3, a2, 24
-; CHECK-NEXT:    sb a3, 3(a0)
-; CHECK-NEXT:    srli a3, a2, 16
-; CHECK-NEXT:    sb a3, 2(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    andi a1, a1, 2
 ; CHECK-NEXT:    beqz a1, .LBB9_2
 ; CHECK-NEXT:  .LBB9_4: # %cond.store1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    sb a1, 0(a0)
-; CHECK-NEXT:    srli a2, a1, 24
-; CHECK-NEXT:    sb a2, 3(a0)
-; CHECK-NEXT:    srli a2, a1, 16
-; CHECK-NEXT:    sb a2, 2(a0)
-; CHECK-NEXT:    srli a1, a1, 8
-; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   call void @llvm.masked.compressstore.v2i32(<2 x i32> %v, ptr %base, <2 x i1> %mask)
   ret void
@@ -492,57 +426,29 @@ define void @compressstore_v4i32(ptr align 4 %base, <4 x i32> %v, <4 x i1> %mask
 ; CHECK-NEXT:  .LBB10_4: # %else8
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB10_5: # %cond.store
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmv.x.s a2, v8
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a3, a2, 24
-; CHECK-NEXT:    sb a3, 3(a0)
-; CHECK-NEXT:    srli a3, a2, 16
-; CHECK-NEXT:    sb a3, 2(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    andi a2, a1, 2
 ; CHECK-NEXT:    beqz a2, .LBB10_2
 ; CHECK-NEXT:  .LBB10_6: # %cond.store1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 1
-; CHECK-NEXT:    vmv.x.s a2, v9
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a3, a2, 24
-; CHECK-NEXT:    sb a3, 3(a0)
-; CHECK-NEXT:    srli a3, a2, 16
-; CHECK-NEXT:    sb a3, 2(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse32.v v9, (a0)
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    andi a2, a1, 4
 ; CHECK-NEXT:    beqz a2, .LBB10_3
 ; CHECK-NEXT:  .LBB10_7: # %cond.store4
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 2
-; CHECK-NEXT:    vmv.x.s a2, v9
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a3, a2, 24
-; CHECK-NEXT:    sb a3, 3(a0)
-; CHECK-NEXT:    srli a3, a2, 16
-; CHECK-NEXT:    sb a3, 2(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse32.v v9, (a0)
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    andi a1, a1, 8
 ; CHECK-NEXT:    beqz a1, .LBB10_4
 ; CHECK-NEXT:  .LBB10_8: # %cond.store7
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    sb a1, 0(a0)
-; CHECK-NEXT:    srli a2, a1, 24
-; CHECK-NEXT:    sb a2, 3(a0)
-; CHECK-NEXT:    srli a2, a1, 16
-; CHECK-NEXT:    sb a2, 2(a0)
-; CHECK-NEXT:    srli a1, a1, 8
-; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   call void @llvm.masked.compressstore.v4i32(<4 x i32> %v, ptr %base, <4 x i1> %mask)
   ret void
@@ -580,113 +486,61 @@ define void @compressstore_v8i32(ptr align 4 %base, <8 x i32> %v, <8 x i1> %mask
 ; CHECK-NEXT:  .LBB11_8: # %else20
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB11_9: # %cond.store
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmv.x.s a2, v8
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a3, a2, 24
-; CHECK-NEXT:    sb a3, 3(a0)
-; CHECK-NEXT:    srli a3, a2, 16
-; CHECK-NEXT:    sb a3, 2(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    andi a2, a1, 2
 ; CHECK-NEXT:    beqz a2, .LBB11_2
 ; CHECK-NEXT:  .LBB11_10: # %cond.store1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v8, 1
-; CHECK-NEXT:    vmv.x.s a2, v10
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a3, a2, 24
-; CHECK-NEXT:    sb a3, 3(a0)
-; CHECK-NEXT:    srli a3, a2, 16
-; CHECK-NEXT:    sb a3, 2(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse32.v v10, (a0)
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    andi a2, a1, 4
 ; CHECK-NEXT:    beqz a2, .LBB11_3
 ; CHECK-NEXT:  .LBB11_11: # %cond.store4
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v8, 2
-; CHECK-NEXT:    vmv.x.s a2, v10
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a3, a2, 24
-; CHECK-NEXT:    sb a3, 3(a0)
-; CHECK-NEXT:    srli a3, a2, 16
-; CHECK-NEXT:    sb a3, 2(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse32.v v10, (a0)
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    andi a2, a1, 8
 ; CHECK-NEXT:    beqz a2, .LBB11_4
 ; CHECK-NEXT:  .LBB11_12: # %cond.store7
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v8, 3
-; CHECK-NEXT:    vmv.x.s a2, v10
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a3, a2, 24
-; CHECK-NEXT:    sb a3, 3(a0)
-; CHECK-NEXT:    srli a3, a2, 16
-; CHECK-NEXT:    sb a3, 2(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vse32.v v10, (a0)
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    andi a2, a1, 16
 ; CHECK-NEXT:    beqz a2, .LBB11_5
 ; CHECK-NEXT:  .LBB11_13: # %cond.store10
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v8, 4
-; CHECK-NEXT:    vmv.x.s a2, v10
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a3, a2, 24
-; CHECK-NEXT:    sb a3, 3(a0)
-; CHECK-NEXT:    srli a3, a2, 16
-; CHECK-NEXT:    sb a3, 2(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v10, (a0)
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    andi a2, a1, 32
 ; CHECK-NEXT:    beqz a2, .LBB11_6
 ; CHECK-NEXT:  .LBB11_14: # %cond.store13
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v8, 5
-; CHECK-NEXT:    vmv.x.s a2, v10
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a3, a2, 24
-; CHECK-NEXT:    sb a3, 3(a0)
-; CHECK-NEXT:    srli a3, a2, 16
-; CHECK-NEXT:    sb a3, 2(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v10, (a0)
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    andi a2, a1, 64
 ; CHECK-NEXT:    beqz a2, .LBB11_7
 ; CHECK-NEXT:  .LBB11_15: # %cond.store16
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v8, 6
-; CHECK-NEXT:    vmv.x.s a2, v10
-; CHECK-NEXT:    sb a2, 0(a0)
-; CHECK-NEXT:    srli a3, a2, 24
-; CHECK-NEXT:    sb a3, 3(a0)
-; CHECK-NEXT:    srli a3, a2, 16
-; CHECK-NEXT:    sb a3, 2(a0)
-; CHECK-NEXT:    srli a2, a2, 8
-; CHECK-NEXT:    sb a2, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v10, (a0)
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    andi a1, a1, -128
 ; CHECK-NEXT:    beqz a1, .LBB11_8
 ; CHECK-NEXT:  .LBB11_16: # %cond.store19
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    sb a1, 0(a0)
-; CHECK-NEXT:    srli a2, a1, 24
-; CHECK-NEXT:    sb a2, 3(a0)
-; CHECK-NEXT:    srli a2, a1, 16
-; CHECK-NEXT:    sb a2, 2(a0)
-; CHECK-NEXT:    srli a1, a1, 8
-; CHECK-NEXT:    sb a1, 1(a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   call void @llvm.masked.compressstore.v8i32(<8 x i32> %v, ptr %base, <8 x i1> %mask)
   ret void
@@ -705,20 +559,8 @@ define void @compressstore_v1i64(ptr align 8 %base, <1 x i64> %v, <1 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v9, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a2, v8
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    sb a1, 4(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 7(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 6(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    sw a1, 4(a0)
 ; RV32-NEXT:  .LBB12_2: # %else
 ; RV32-NEXT:    ret
 ;
@@ -728,23 +570,8 @@ define void @compressstore_v1i64(ptr align 8 %base, <1 x i64> %v, <1 x i1> %mask
 ; RV64-NEXT:    vfirst.m a1, v0
 ; RV64-NEXT:    bnez a1, .LBB12_2
 ; RV64-NEXT:  # %bb.1: # %cond.store
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v8
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:  .LBB12_2: # %else
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v1i64(<1 x i64> %v, ptr %base, <1 x i1> %mask)
@@ -770,20 +597,8 @@ define void @compressstore_v2i64(ptr align 8 %base, <2 x i64> %v, <2 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v9, v8, a2
 ; RV32-NEXT:    vmv.x.s a2, v9
 ; RV32-NEXT:    vmv.x.s a3, v8
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    sb a4, 3(a0)
-; RV32-NEXT:    srli a4, a3, 16
-; RV32-NEXT:    sb a4, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    beqz a1, .LBB13_2
@@ -794,20 +609,8 @@ define void @compressstore_v2i64(ptr align 8 %base, <2 x i64> %v, <2 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v9, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a2, v8
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    sb a1, 4(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 7(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 6(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    sw a1, 4(a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: compressstore_v2i64:
@@ -822,45 +625,15 @@ define void @compressstore_v2i64(ptr align 8 %base, <2 x i64> %v, <2 x i1> %mask
 ; RV64-NEXT:  .LBB13_2: # %else2
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB13_3: # %cond.store
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vmv.x.s a2, v8
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    beqz a1, .LBB13_2
 ; RV64-NEXT:  .LBB13_4: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-NEXT:    vmv.x.s a1, v8
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v2i64(<2 x i64> %v, ptr %base, <2 x i1> %mask)
   ret void
@@ -891,20 +664,8 @@ define void @compressstore_v4i64(ptr align 8 %base, <4 x i64> %v, <4 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v10, v8, a2
 ; RV32-NEXT:    vmv.x.s a2, v10
 ; RV32-NEXT:    vmv.x.s a3, v8
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    sb a4, 3(a0)
-; RV32-NEXT:    srli a4, a3, 16
-; RV32-NEXT:    sb a4, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB14_2
@@ -915,20 +676,8 @@ define void @compressstore_v4i64(ptr align 8 %base, <4 x i64> %v, <4 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v12, v10, a2
 ; RV32-NEXT:    vmv.x.s a2, v12
 ; RV32-NEXT:    vmv.x.s a3, v10
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    sb a4, 3(a0)
-; RV32-NEXT:    srli a4, a3, 16
-; RV32-NEXT:    sb a4, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB14_3
@@ -939,20 +688,8 @@ define void @compressstore_v4i64(ptr align 8 %base, <4 x i64> %v, <4 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v12, v10, a2
 ; RV32-NEXT:    vmv.x.s a2, v12
 ; RV32-NEXT:    vmv.x.s a3, v10
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    sb a4, 3(a0)
-; RV32-NEXT:    srli a4, a3, 16
-; RV32-NEXT:    sb a4, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    beqz a1, .LBB14_4
@@ -963,20 +700,8 @@ define void @compressstore_v4i64(ptr align 8 %base, <4 x i64> %v, <4 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v10, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v10
 ; RV32-NEXT:    vmv.x.s a2, v8
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    sb a1, 4(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 7(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 6(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    sw a1, 4(a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: compressstore_v4i64:
@@ -997,89 +722,31 @@ define void @compressstore_v4i64(ptr align 8 %base, <4 x i64> %v, <4 x i1> %mask
 ; RV64-NEXT:  .LBB14_4: # %else8
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB14_5: # %cond.store
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vmv.x.s a2, v8
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB14_2
 ; RV64-NEXT:  .LBB14_6: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v10, v8, 1
-; RV64-NEXT:    vmv.x.s a2, v10
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse64.v v10, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB14_3
 ; RV64-NEXT:  .LBB14_7: # %cond.store4
 ; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-NEXT:    vmv.x.s a2, v10
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v10, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    beqz a1, .LBB14_4
 ; RV64-NEXT:  .LBB14_8: # %cond.store7
 ; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-NEXT:    vmv.x.s a1, v8
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    ret
   call void @llvm.masked.compressstore.v4i64(<4 x i64> %v, ptr %base, <4 x i1> %mask)
   ret void
@@ -1122,20 +789,8 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v12, v8, a2
 ; RV32-NEXT:    vmv.x.s a2, v12
 ; RV32-NEXT:    vmv.x.s a3, v8
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    sb a4, 3(a0)
-; RV32-NEXT:    srli a4, a3, 16
-; RV32-NEXT:    sb a4, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB15_2
@@ -1146,20 +801,8 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v16, v12, a2
 ; RV32-NEXT:    vmv.x.s a2, v16
 ; RV32-NEXT:    vmv.x.s a3, v12
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    sb a4, 3(a0)
-; RV32-NEXT:    srli a4, a3, 16
-; RV32-NEXT:    sb a4, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB15_3
@@ -1170,20 +813,8 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v16, v12, a2
 ; RV32-NEXT:    vmv.x.s a2, v16
 ; RV32-NEXT:    vmv.x.s a3, v12
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    sb a4, 3(a0)
-; RV32-NEXT:    srli a4, a3, 16
-; RV32-NEXT:    sb a4, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 8
 ; RV32-NEXT:    beqz a2, .LBB15_4
@@ -1194,20 +825,8 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v16, v12, a2
 ; RV32-NEXT:    vmv.x.s a2, v16
 ; RV32-NEXT:    vmv.x.s a3, v12
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    sb a4, 3(a0)
-; RV32-NEXT:    srli a4, a3, 16
-; RV32-NEXT:    sb a4, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 16
 ; RV32-NEXT:    beqz a2, .LBB15_5
@@ -1218,20 +837,8 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v16, v12, a2
 ; RV32-NEXT:    vmv.x.s a2, v16
 ; RV32-NEXT:    vmv.x.s a3, v12
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    sb a4, 3(a0)
-; RV32-NEXT:    srli a4, a3, 16
-; RV32-NEXT:    sb a4, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 32
 ; RV32-NEXT:    beqz a2, .LBB15_6
@@ -1242,20 +849,8 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v16, v12, a2
 ; RV32-NEXT:    vmv.x.s a2, v16
 ; RV32-NEXT:    vmv.x.s a3, v12
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    sb a4, 3(a0)
-; RV32-NEXT:    srli a4, a3, 16
-; RV32-NEXT:    sb a4, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 64
 ; RV32-NEXT:    beqz a2, .LBB15_7
@@ -1266,20 +861,8 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v16, v12, a2
 ; RV32-NEXT:    vmv.x.s a2, v16
 ; RV32-NEXT:    vmv.x.s a3, v12
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    sb a4, 3(a0)
-; RV32-NEXT:    srli a4, a3, 16
-; RV32-NEXT:    sb a4, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    sw a2, 4(a0)
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a1, a1, -128
 ; RV32-NEXT:    beqz a1, .LBB15_8
@@ -1290,20 +873,8 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV32-NEXT:    vsrl.vx v12, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v12
 ; RV32-NEXT:    vmv.x.s a2, v8
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    sb a1, 4(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 3(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 7(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 6(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 5(a0)
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    sw a1, 4(a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: compressstore_v8i64:
@@ -1324,22 +895,8 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV64-NEXT:  .LBB15_4: # %cond.store7
 ; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v12, v8, 3
-; RV64-NEXT:    vmv.x.s a2, v12
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v12, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:  .LBB15_5: # %else8
 ; RV64-NEXT:    addi sp, sp, -320
@@ -1367,21 +924,7 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vse64.v v8, (a1)
 ; RV64-NEXT:    ld a1, 56(sp)
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    sd a1, 0(a0)
 ; RV64-NEXT:  .LBB15_10: # %else20
 ; RV64-NEXT:    addi sp, s0, -320
 ; RV64-NEXT:    ld ra, 312(sp) # 8-byte Folded Reload
@@ -1389,67 +932,23 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV64-NEXT:    addi sp, sp, 320
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB15_11: # %cond.store
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT:    vmv.x.s a2, v8
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB15_2
 ; RV64-NEXT:  .LBB15_12: # %cond.store1
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v12, v8, 1
-; RV64-NEXT:    vmv.x.s a2, v12
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vse64.v v12, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB15_3
 ; RV64-NEXT:  .LBB15_13: # %cond.store4
 ; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; RV64-NEXT:    vslidedown.vi v12, v8, 2
-; RV64-NEXT:    vmv.x.s a2, v12
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vse64.v v12, (a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 8
 ; RV64-NEXT:    bnez a2, .LBB15_4
@@ -1459,21 +958,7 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vse64.v v8, (a2)
 ; RV64-NEXT:    ld a2, 224(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    sd a2, 0(a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 32
 ; RV64-NEXT:    beqz a2, .LBB15_7
@@ -1482,21 +967,7 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vse64.v v8, (a2)
 ; RV64-NEXT:    ld a2, 168(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    sd a2, 0(a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 64
 ; RV64-NEXT:    beqz a2, .LBB15_8
@@ -1505,21 +976,7 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vse64.v v8, (a2)
 ; RV64-NEXT:    ld a2, 112(sp)
-; RV64-NEXT:    sb a2, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 7(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 6(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 5(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 4(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 3(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 2(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    sd a2, 0(a0)
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    bnez a1, .LBB15_9
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
index ddf027f97ec34c..6385f1dee05ac0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
@@ -10,17 +10,8 @@ define <1 x half> @expandload_v1f16(ptr align 2 %base, <1 x half> %src0, <1 x i1
 ; RV32-NEXT:    vfirst.m a1, v0
 ; RV32-NEXT:    bnez a1, .LBB0_2
 ; RV32-NEXT:  # %bb.1: # %cond.load
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a0, 0(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a0, a1, a0
-; RV32-NEXT:    sh a0, 12(sp)
-; RV32-NEXT:    addi a0, sp, 12
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV32-NEXT:    vle16.v v8, (a0)
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:  .LBB0_2: # %else
 ; RV32-NEXT:    ret
 ;
@@ -30,17 +21,8 @@ define <1 x half> @expandload_v1f16(ptr align 2 %base, <1 x half> %src0, <1 x i1
 ; RV64-NEXT:    vfirst.m a1, v0
 ; RV64-NEXT:    bnez a1, .LBB0_2
 ; RV64-NEXT:  # %bb.1: # %cond.load
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a0, 0(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    sh a0, 8(sp)
-; RV64-NEXT:    addi a0, sp, 8
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV64-NEXT:    vle16.v v8, (a0)
-; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:  .LBB0_2: # %else
 ; RV64-NEXT:    ret
   %res = call <1 x half> @llvm.masked.expandload.v1f16(ptr %base, <1 x i1> %mask, <1 x half> %src0)
@@ -51,8 +33,6 @@ declare <2 x half> @llvm.masked.expandload.v2f16(ptr, <2 x i1>, <2 x half>)
 define <2 x half> @expandload_v2f16(ptr align 2 %base, <2 x half> %src0, <2 x i1> %mask) {
 ; RV32-LABEL: expandload_v2f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
@@ -61,37 +41,23 @@ define <2 x half> @expandload_v2f16(ptr align 2 %base, <2 x half> %src0, <2 x i1
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    bnez a1, .LBB1_4
 ; RV32-NEXT:  .LBB1_2: # %else2
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB1_3: # %cond.load
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    sh a2, 12(sp)
-; RV32-NEXT:    flh fa5, 12(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e16, m2, tu, ma
 ; RV32-NEXT:    vfmv.s.f v8, fa5
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    beqz a1, .LBB1_2
 ; RV32-NEXT:  .LBB1_4: # %cond.load1
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a0, 0(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a0, a1, a0
-; RV32-NEXT:    sh a0, 8(sp)
-; RV32-NEXT:    flh fa5, 8(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 1
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v2f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -100,31 +66,19 @@ define <2 x half> @expandload_v2f16(ptr align 2 %base, <2 x half> %src0, <2 x i1
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    bnez a1, .LBB1_4
 ; RV64-NEXT:  .LBB1_2: # %else2
-; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB1_3: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    sh a2, 8(sp)
-; RV64-NEXT:    flh fa5, 8(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e16, m2, tu, ma
 ; RV64-NEXT:    vfmv.s.f v8, fa5
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    beqz a1, .LBB1_2
 ; RV64-NEXT:  .LBB1_4: # %cond.load1
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a0, 0(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    sh a0, 0(sp)
-; RV64-NEXT:    flh fa5, 0(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
-; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
   %res = call <2 x half> @llvm.masked.expandload.v2f16(ptr %base, <2 x i1> %mask, <2 x half> %src0)
   ret <2 x half>%res
@@ -134,8 +88,6 @@ declare <4 x half> @llvm.masked.expandload.v4f16(ptr, <4 x i1>, <4 x half>)
 define <4 x half> @expandload_v4f16(ptr align 2 %base, <4 x half> %src0, <4 x i1> %mask) {
 ; RV32-LABEL: expandload_v4f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
@@ -150,27 +102,16 @@ define <4 x half> @expandload_v4f16(ptr align 2 %base, <4 x half> %src0, <4 x i1
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    bnez a1, .LBB2_8
 ; RV32-NEXT:  .LBB2_4: # %else10
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB2_5: # %cond.load
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    sh a2, 12(sp)
-; RV32-NEXT:    flh fa5, 12(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
 ; RV32-NEXT:    vfmv.s.f v8, fa5
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB2_2
 ; RV32-NEXT:  .LBB2_6: # %cond.load1
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    sh a2, 8(sp)
-; RV32-NEXT:    flh fa5, 8(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 1
@@ -178,12 +119,7 @@ define <4 x half> @expandload_v4f16(ptr align 2 %base, <4 x half> %src0, <4 x i1
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB2_3
 ; RV32-NEXT:  .LBB2_7: # %cond.load5
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    sh a2, 4(sp)
-; RV32-NEXT:    flh fa5, 4(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 2
@@ -191,22 +127,14 @@ define <4 x half> @expandload_v4f16(ptr align 2 %base, <4 x half> %src0, <4 x i1
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    beqz a1, .LBB2_4
 ; RV32-NEXT:  .LBB2_8: # %cond.load9
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a0, 0(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a0, a1, a0
-; RV32-NEXT:    sh a0, 0(sp)
-; RV32-NEXT:    flh fa5, 0(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 3
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v4f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -32
-; RV64-NEXT:    .cfi_def_cfa_offset 32
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -221,27 +149,16 @@ define <4 x half> @expandload_v4f16(ptr align 2 %base, <4 x half> %src0, <4 x i1
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    bnez a1, .LBB2_8
 ; RV64-NEXT:  .LBB2_4: # %else10
-; RV64-NEXT:    addi sp, sp, 32
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB2_5: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    sh a2, 24(sp)
-; RV64-NEXT:    flh fa5, 24(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
 ; RV64-NEXT:    vfmv.s.f v8, fa5
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB2_2
 ; RV64-NEXT:  .LBB2_6: # %cond.load1
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    sh a2, 16(sp)
-; RV64-NEXT:    flh fa5, 16(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
@@ -249,12 +166,7 @@ define <4 x half> @expandload_v4f16(ptr align 2 %base, <4 x half> %src0, <4 x i1
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB2_3
 ; RV64-NEXT:  .LBB2_7: # %cond.load5
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    sh a2, 8(sp)
-; RV64-NEXT:    flh fa5, 8(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 2
@@ -262,16 +174,10 @@ define <4 x half> @expandload_v4f16(ptr align 2 %base, <4 x half> %src0, <4 x i1
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    beqz a1, .LBB2_4
 ; RV64-NEXT:  .LBB2_8: # %cond.load9
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a0, 0(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    sh a0, 0(sp)
-; RV64-NEXT:    flh fa5, 0(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 3
-; RV64-NEXT:    addi sp, sp, 32
 ; RV64-NEXT:    ret
   %res = call <4 x half> @llvm.masked.expandload.v4f16(ptr %base, <4 x i1> %mask, <4 x half> %src0)
   ret <4 x half>%res
@@ -281,8 +187,6 @@ declare <8 x half> @llvm.masked.expandload.v8f16(ptr, <8 x i1>, <8 x half>)
 define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1> %mask) {
 ; RV32-LABEL: expandload_v8f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
@@ -309,27 +213,16 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV32-NEXT:    andi a1, a1, -128
 ; RV32-NEXT:    bnez a1, .LBB3_16
 ; RV32-NEXT:  .LBB3_8: # %else26
-; RV32-NEXT:    addi sp, sp, 32
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB3_9: # %cond.load
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    sh a2, 28(sp)
-; RV32-NEXT:    flh fa5, 28(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
 ; RV32-NEXT:    vfmv.s.f v8, fa5
 ; RV32-NEXT:    addi a0, a0, 2
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB3_2
 ; RV32-NEXT:  .LBB3_10: # %cond.load1
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    sh a2, 24(sp)
-; RV32-NEXT:    flh fa5, 24(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 1
@@ -337,12 +230,7 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB3_3
 ; RV32-NEXT:  .LBB3_11: # %cond.load5
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    sh a2, 20(sp)
-; RV32-NEXT:    flh fa5, 20(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 2
@@ -350,12 +238,7 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV32-NEXT:    andi a2, a1, 8
 ; RV32-NEXT:    beqz a2, .LBB3_4
 ; RV32-NEXT:  .LBB3_12: # %cond.load9
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    sh a2, 16(sp)
-; RV32-NEXT:    flh fa5, 16(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 3
@@ -363,12 +246,7 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV32-NEXT:    andi a2, a1, 16
 ; RV32-NEXT:    beqz a2, .LBB3_5
 ; RV32-NEXT:  .LBB3_13: # %cond.load13
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    sh a2, 12(sp)
-; RV32-NEXT:    flh fa5, 12(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 4
@@ -376,12 +254,7 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV32-NEXT:    andi a2, a1, 32
 ; RV32-NEXT:    beqz a2, .LBB3_6
 ; RV32-NEXT:  .LBB3_14: # %cond.load17
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    sh a2, 8(sp)
-; RV32-NEXT:    flh fa5, 8(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 5
@@ -389,12 +262,7 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV32-NEXT:    andi a2, a1, 64
 ; RV32-NEXT:    beqz a2, .LBB3_7
 ; RV32-NEXT:  .LBB3_15: # %cond.load21
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    sh a2, 4(sp)
-; RV32-NEXT:    flh fa5, 4(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 6
@@ -402,22 +270,14 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV32-NEXT:    andi a1, a1, -128
 ; RV32-NEXT:    beqz a1, .LBB3_8
 ; RV32-NEXT:  .LBB3_16: # %cond.load25
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a0, 0(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a0, a1, a0
-; RV32-NEXT:    sh a0, 0(sp)
-; RV32-NEXT:    flh fa5, 0(sp)
+; RV32-NEXT:    flh fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 7
-; RV32-NEXT:    addi sp, sp, 32
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v8f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -64
-; RV64-NEXT:    .cfi_def_cfa_offset 64
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -444,27 +304,16 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    bnez a1, .LBB3_16
 ; RV64-NEXT:  .LBB3_8: # %else26
-; RV64-NEXT:    addi sp, sp, 64
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB3_9: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    sh a2, 56(sp)
-; RV64-NEXT:    flh fa5, 56(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
 ; RV64-NEXT:    vfmv.s.f v8, fa5
 ; RV64-NEXT:    addi a0, a0, 2
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB3_2
 ; RV64-NEXT:  .LBB3_10: # %cond.load1
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    sh a2, 48(sp)
-; RV64-NEXT:    flh fa5, 48(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
@@ -472,12 +321,7 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB3_3
 ; RV64-NEXT:  .LBB3_11: # %cond.load5
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    sh a2, 40(sp)
-; RV64-NEXT:    flh fa5, 40(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 2
@@ -485,12 +329,7 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV64-NEXT:    andi a2, a1, 8
 ; RV64-NEXT:    beqz a2, .LBB3_4
 ; RV64-NEXT:  .LBB3_12: # %cond.load9
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    sh a2, 32(sp)
-; RV64-NEXT:    flh fa5, 32(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 3
@@ -498,12 +337,7 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV64-NEXT:    andi a2, a1, 16
 ; RV64-NEXT:    beqz a2, .LBB3_5
 ; RV64-NEXT:  .LBB3_13: # %cond.load13
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    sh a2, 24(sp)
-; RV64-NEXT:    flh fa5, 24(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 4
@@ -511,12 +345,7 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV64-NEXT:    andi a2, a1, 32
 ; RV64-NEXT:    beqz a2, .LBB3_6
 ; RV64-NEXT:  .LBB3_14: # %cond.load17
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    sh a2, 16(sp)
-; RV64-NEXT:    flh fa5, 16(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 5
@@ -524,12 +353,7 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV64-NEXT:    andi a2, a1, 64
 ; RV64-NEXT:    beqz a2, .LBB3_7
 ; RV64-NEXT:  .LBB3_15: # %cond.load21
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    sh a2, 8(sp)
-; RV64-NEXT:    flh fa5, 8(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 6
@@ -537,16 +361,10 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    beqz a1, .LBB3_8
 ; RV64-NEXT:  .LBB3_16: # %cond.load25
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a0, 0(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    sh a0, 0(sp)
-; RV64-NEXT:    flh fa5, 0(sp)
+; RV64-NEXT:    flh fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 7
-; RV64-NEXT:    addi sp, sp, 64
 ; RV64-NEXT:    ret
   %res = call <8 x half> @llvm.masked.expandload.v8f16(ptr %base, <8 x i1> %mask, <8 x half> %src0)
   ret <8 x half>%res
@@ -560,19 +378,8 @@ define <1 x float> @expandload_v1f32(ptr align 4 %base, <1 x float> %src0, <1 x
 ; RV32-NEXT:    vfirst.m a1, v0
 ; RV32-NEXT:    bnez a1, .LBB4_2
 ; RV32-NEXT:  # %bb.1: # %cond.load
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    fmv.w.x fa5, a0
-; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; RV32-NEXT:    vfmv.s.f v8, fa5
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:  .LBB4_2: # %else
 ; RV32-NEXT:    ret
 ;
@@ -582,23 +389,8 @@ define <1 x float> @expandload_v1f32(ptr align 4 %base, <1 x float> %src0, <1 x
 ; RV64-NEXT:    vfirst.m a1, v0
 ; RV64-NEXT:    bnez a1, .LBB4_2
 ; RV64-NEXT:  # %bb.1: # %cond.load
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a0, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a3
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    sw a0, 8(sp)
-; RV64-NEXT:    addi a0, sp, 8
 ; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV64-NEXT:    vle32.v v8, (a0)
-; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:  .LBB4_2: # %else
 ; RV64-NEXT:    ret
   %res = call <1 x float> @llvm.masked.expandload.v1f32(ptr %base, <1 x i1> %mask, <1 x float> %src0)
@@ -619,34 +411,14 @@ define <2 x float> @expandload_v2f32(ptr align 4 %base, <2 x float> %src0, <2 x
 ; RV32-NEXT:  .LBB5_2: # %else2
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB5_3: # %cond.load
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
 ; RV32-NEXT:    vfmv.s.f v8, fa5
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    beqz a1, .LBB5_2
 ; RV32-NEXT:  .LBB5_4: # %cond.load1
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 1
@@ -654,8 +426,6 @@ define <2 x float> @expandload_v2f32(ptr align 4 %base, <2 x float> %src0, <2 x
 ;
 ; RV64-LABEL: expandload_v2f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -664,43 +434,19 @@ define <2 x float> @expandload_v2f32(ptr align 4 %base, <2 x float> %src0, <2 x
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    bnez a1, .LBB5_4
 ; RV64-NEXT:  .LBB5_2: # %else2
-; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB5_3: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    sw a2, 8(sp)
-; RV64-NEXT:    flw fa5, 8(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
 ; RV64-NEXT:    vfmv.s.f v8, fa5
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    beqz a1, .LBB5_2
 ; RV64-NEXT:  .LBB5_4: # %cond.load1
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a0, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a3
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    sw a0, 0(sp)
-; RV64-NEXT:    flw fa5, 0(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
-; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
   %res = call <2 x float> @llvm.masked.expandload.v2f32(ptr %base, <2 x i1> %mask, <2 x float> %src0)
   ret <2 x float>%res
@@ -726,34 +472,14 @@ define <4 x float> @expandload_v4f32(ptr align 4 %base, <4 x float> %src0, <4 x
 ; RV32-NEXT:  .LBB6_4: # %else10
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB6_5: # %cond.load
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
 ; RV32-NEXT:    vfmv.s.f v8, fa5
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB6_2
 ; RV32-NEXT:  .LBB6_6: # %cond.load1
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 1
@@ -761,17 +487,7 @@ define <4 x float> @expandload_v4f32(ptr align 4 %base, <4 x float> %src0, <4 x
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB6_3
 ; RV32-NEXT:  .LBB6_7: # %cond.load5
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 2
@@ -779,17 +495,7 @@ define <4 x float> @expandload_v4f32(ptr align 4 %base, <4 x float> %src0, <4 x
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    beqz a1, .LBB6_4
 ; RV32-NEXT:  .LBB6_8: # %cond.load9
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 3
@@ -797,8 +503,6 @@ define <4 x float> @expandload_v4f32(ptr align 4 %base, <4 x float> %src0, <4 x
 ;
 ; RV64-LABEL: expandload_v4f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -32
-; RV64-NEXT:    .cfi_def_cfa_offset 32
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -813,39 +517,16 @@ define <4 x float> @expandload_v4f32(ptr align 4 %base, <4 x float> %src0, <4 x
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    bnez a1, .LBB6_8
 ; RV64-NEXT:  .LBB6_4: # %else10
-; RV64-NEXT:    addi sp, sp, 32
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB6_5: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    sw a2, 24(sp)
-; RV64-NEXT:    flw fa5, 24(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
 ; RV64-NEXT:    vfmv.s.f v8, fa5
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB6_2
 ; RV64-NEXT:  .LBB6_6: # %cond.load1
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    sw a2, 16(sp)
-; RV64-NEXT:    flw fa5, 16(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
@@ -853,18 +534,7 @@ define <4 x float> @expandload_v4f32(ptr align 4 %base, <4 x float> %src0, <4 x
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB6_3
 ; RV64-NEXT:  .LBB6_7: # %cond.load5
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    sw a2, 8(sp)
-; RV64-NEXT:    flw fa5, 8(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 2
@@ -872,22 +542,10 @@ define <4 x float> @expandload_v4f32(ptr align 4 %base, <4 x float> %src0, <4 x
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    beqz a1, .LBB6_4
 ; RV64-NEXT:  .LBB6_8: # %cond.load9
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a0, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a3
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    sw a0, 0(sp)
-; RV64-NEXT:    flw fa5, 0(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 3
-; RV64-NEXT:    addi sp, sp, 32
 ; RV64-NEXT:    ret
   %res = call <4 x float> @llvm.masked.expandload.v4f32(ptr %base, <4 x i1> %mask, <4 x float> %src0)
   ret <4 x float>%res
@@ -925,34 +583,14 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV32-NEXT:  .LBB7_8: # %else26
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB7_9: # %cond.load
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
 ; RV32-NEXT:    vfmv.s.f v8, fa5
 ; RV32-NEXT:    addi a0, a0, 4
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB7_2
 ; RV32-NEXT:  .LBB7_10: # %cond.load1
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vslideup.vi v8, v10, 1
@@ -960,17 +598,7 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB7_3
 ; RV32-NEXT:  .LBB7_11: # %cond.load5
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vslideup.vi v8, v10, 2
@@ -978,17 +606,7 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV32-NEXT:    andi a2, a1, 8
 ; RV32-NEXT:    beqz a2, .LBB7_4
 ; RV32-NEXT:  .LBB7_12: # %cond.load9
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vslideup.vi v8, v10, 3
@@ -996,17 +614,7 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV32-NEXT:    andi a2, a1, 16
 ; RV32-NEXT:    beqz a2, .LBB7_5
 ; RV32-NEXT:  .LBB7_13: # %cond.load13
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vslideup.vi v8, v10, 4
@@ -1014,17 +622,7 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV32-NEXT:    andi a2, a1, 32
 ; RV32-NEXT:    beqz a2, .LBB7_6
 ; RV32-NEXT:  .LBB7_14: # %cond.load17
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vslideup.vi v8, v10, 5
@@ -1032,17 +630,7 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV32-NEXT:    andi a2, a1, 64
 ; RV32-NEXT:    beqz a2, .LBB7_7
 ; RV32-NEXT:  .LBB7_15: # %cond.load21
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    fmv.w.x fa5, a2
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vslideup.vi v8, v10, 6
@@ -1050,17 +638,7 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV32-NEXT:    andi a1, a1, -128
 ; RV32-NEXT:    beqz a1, .LBB7_8
 ; RV32-NEXT:  .LBB7_16: # %cond.load25
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    flw fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vslideup.vi v8, v10, 7
@@ -1068,8 +646,6 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ;
 ; RV64-LABEL: expandload_v8f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -64
-; RV64-NEXT:    .cfi_def_cfa_offset 64
 ; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v0
 ; RV64-NEXT:    andi a2, a1, 1
@@ -1096,39 +672,16 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    bnez a1, .LBB7_16
 ; RV64-NEXT:  .LBB7_8: # %else26
-; RV64-NEXT:    addi sp, sp, 64
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB7_9: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    sw a2, 56(sp)
-; RV64-NEXT:    flw fa5, 56(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
 ; RV64-NEXT:    vfmv.s.f v8, fa5
 ; RV64-NEXT:    addi a0, a0, 4
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB7_2
 ; RV64-NEXT:  .LBB7_10: # %cond.load1
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    sw a2, 48(sp)
-; RV64-NEXT:    flw fa5, 48(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 1
@@ -1136,18 +689,7 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB7_3
 ; RV64-NEXT:  .LBB7_11: # %cond.load5
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    sw a2, 40(sp)
-; RV64-NEXT:    flw fa5, 40(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 2
@@ -1155,18 +697,7 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV64-NEXT:    andi a2, a1, 8
 ; RV64-NEXT:    beqz a2, .LBB7_4
 ; RV64-NEXT:  .LBB7_12: # %cond.load9
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    sw a2, 32(sp)
-; RV64-NEXT:    flw fa5, 32(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 3
@@ -1174,18 +705,7 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV64-NEXT:    andi a2, a1, 16
 ; RV64-NEXT:    beqz a2, .LBB7_5
 ; RV64-NEXT:  .LBB7_13: # %cond.load13
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    sw a2, 24(sp)
-; RV64-NEXT:    flw fa5, 24(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 4
@@ -1193,18 +713,7 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV64-NEXT:    andi a2, a1, 32
 ; RV64-NEXT:    beqz a2, .LBB7_6
 ; RV64-NEXT:  .LBB7_14: # %cond.load17
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    sw a2, 16(sp)
-; RV64-NEXT:    flw fa5, 16(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 5
@@ -1212,18 +721,7 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV64-NEXT:    andi a2, a1, 64
 ; RV64-NEXT:    beqz a2, .LBB7_7
 ; RV64-NEXT:  .LBB7_15: # %cond.load21
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    sw a2, 8(sp)
-; RV64-NEXT:    flw fa5, 8(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 6
@@ -1231,22 +729,10 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    beqz a1, .LBB7_8
 ; RV64-NEXT:  .LBB7_16: # %cond.load25
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a0, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a3
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    sw a0, 0(sp)
-; RV64-NEXT:    flw fa5, 0(sp)
+; RV64-NEXT:    flw fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 7
-; RV64-NEXT:    addi sp, sp, 64
 ; RV64-NEXT:    ret
   %res = call <8 x float> @llvm.masked.expandload.v8f32(ptr %base, <8 x i1> %mask, <8 x float> %src0)
   ret <8 x float>%res
@@ -1260,34 +746,8 @@ define <1 x double> @expandload_v1f64(ptr align 8 %base, <1 x double> %src0, <1
 ; RV32-NEXT:    vfirst.m a1, v0
 ; RV32-NEXT:    bnez a1, .LBB8_2
 ; RV32-NEXT:  # %bb.1: # %cond.load
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lbu a1, 5(a0)
-; RV32-NEXT:    lbu a2, 4(a0)
-; RV32-NEXT:    lbu a3, 6(a0)
-; RV32-NEXT:    lbu a4, 7(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a4, a4, 24
-; RV32-NEXT:    or a3, a4, a3
-; RV32-NEXT:    or a1, a3, a1
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:  .LBB8_2: # %else
 ; RV32-NEXT:    ret
 ;
@@ -1297,31 +757,8 @@ define <1 x double> @expandload_v1f64(ptr align 8 %base, <1 x double> %src0, <1
 ; RV64-NEXT:    vfirst.m a1, v0
 ; RV64-NEXT:    bnez a1, .LBB8_2
 ; RV64-NEXT:  # %bb.1: # %cond.load
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a4, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a4, a4, 24
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    or a1, a3, a1
-; RV64-NEXT:    lbu a2, 5(a0)
-; RV64-NEXT:    lbu a3, 4(a0)
-; RV64-NEXT:    lbu a4, 6(a0)
-; RV64-NEXT:    lbu a0, 7(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a4
-; RV64-NEXT:    or a0, a0, a2
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    fmv.d.x fa5, a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV64-NEXT:    vfmv.s.f v8, fa5
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:  .LBB8_2: # %else
 ; RV64-NEXT:    ret
   %res = call <1 x double> @llvm.masked.expandload.v1f64(ptr %base, <1 x i1> %mask, <1 x double> %src0)
@@ -1332,8 +769,6 @@ declare <2 x double> @llvm.masked.expandload.v2f64(ptr, <2 x i1>, <2 x double>)
 define <2 x double> @expandload_v2f64(ptr align 8 %base, <2 x double> %src0, <2 x i1> %mask) {
 ; RV32-LABEL: expandload_v2f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
@@ -1342,65 +777,19 @@ define <2 x double> @expandload_v2f64(ptr align 8 %base, <2 x double> %src0, <2
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    bnez a1, .LBB9_4
 ; RV32-NEXT:  .LBB9_2: # %else2
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB9_3: # %cond.load
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 12(sp)
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 8(sp)
-; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e64, m8, tu, ma
 ; RV32-NEXT:    vfmv.s.f v8, fa5
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    beqz a1, .LBB9_2
 ; RV32-NEXT:  .LBB9_4: # %cond.load1
-; RV32-NEXT:    lbu a1, 5(a0)
-; RV32-NEXT:    lbu a2, 4(a0)
-; RV32-NEXT:    lbu a3, 6(a0)
-; RV32-NEXT:    lbu a4, 7(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a4, a4, 24
-; RV32-NEXT:    or a3, a4, a3
-; RV32-NEXT:    or a1, a3, a1
-; RV32-NEXT:    sw a1, 4(sp)
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    sw a0, 0(sp)
-; RV32-NEXT:    fld fa5, 0(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vslideup.vi v8, v9, 1
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v2f64:
@@ -1415,58 +804,14 @@ define <2 x double> @expandload_v2f64(ptr align 8 %base, <2 x double> %src0, <2
 ; RV64-NEXT:  .LBB9_2: # %else2
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB9_3: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e64, m8, tu, ma
 ; RV64-NEXT:    vfmv.s.f v8, fa5
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    beqz a1, .LBB9_2
 ; RV64-NEXT:  .LBB9_4: # %cond.load1
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a4, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a4, a4, 24
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    or a1, a3, a1
-; RV64-NEXT:    lbu a2, 5(a0)
-; RV64-NEXT:    lbu a3, 4(a0)
-; RV64-NEXT:    lbu a4, 6(a0)
-; RV64-NEXT:    lbu a0, 7(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a4
-; RV64-NEXT:    or a0, a0, a2
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
@@ -1479,8 +824,6 @@ declare <4 x double> @llvm.masked.expandload.v4f64(ptr, <4 x i1>, <4 x double>)
 define <4 x double> @expandload_v4f64(ptr align 8 %base, <4 x double> %src0, <4 x i1> %mask) {
 ; RV32-LABEL: expandload_v4f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 32
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
@@ -1495,61 +838,16 @@ define <4 x double> @expandload_v4f64(ptr align 8 %base, <4 x double> %src0, <4
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    bnez a1, .LBB10_8
 ; RV32-NEXT:  .LBB10_4: # %else10
-; RV32-NEXT:    addi sp, sp, 32
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB10_5: # %cond.load
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 28(sp)
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 24(sp)
-; RV32-NEXT:    fld fa5, 24(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
 ; RV32-NEXT:    vfmv.s.f v8, fa5
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB10_2
 ; RV32-NEXT:  .LBB10_6: # %cond.load1
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 20(sp)
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 16(sp)
-; RV32-NEXT:    fld fa5, 16(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vslideup.vi v8, v10, 1
@@ -1557,29 +855,7 @@ define <4 x double> @expandload_v4f64(ptr align 8 %base, <4 x double> %src0, <4
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB10_3
 ; RV32-NEXT:  .LBB10_7: # %cond.load5
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 12(sp)
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 8(sp)
-; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vslideup.vi v8, v10, 2
@@ -1587,33 +863,10 @@ define <4 x double> @expandload_v4f64(ptr align 8 %base, <4 x double> %src0, <4
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    beqz a1, .LBB10_4
 ; RV32-NEXT:  .LBB10_8: # %cond.load9
-; RV32-NEXT:    lbu a1, 5(a0)
-; RV32-NEXT:    lbu a2, 4(a0)
-; RV32-NEXT:    lbu a3, 6(a0)
-; RV32-NEXT:    lbu a4, 7(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a4, a4, 24
-; RV32-NEXT:    or a3, a4, a3
-; RV32-NEXT:    or a1, a3, a1
-; RV32-NEXT:    sw a1, 4(sp)
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    sw a0, 0(sp)
-; RV32-NEXT:    fld fa5, 0(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vslideup.vi v8, v10, 3
-; RV32-NEXT:    addi sp, sp, 32
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v4f64:
@@ -1634,58 +887,14 @@ define <4 x double> @expandload_v4f64(ptr align 8 %base, <4 x double> %src0, <4
 ; RV64-NEXT:  .LBB10_4: # %else10
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB10_5: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
 ; RV64-NEXT:    vfmv.s.f v8, fa5
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB10_2
 ; RV64-NEXT:  .LBB10_6: # %cond.load1
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 1
@@ -1693,29 +902,7 @@ define <4 x double> @expandload_v4f64(ptr align 8 %base, <4 x double> %src0, <4
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB10_3
 ; RV64-NEXT:  .LBB10_7: # %cond.load5
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 2
@@ -1723,29 +910,7 @@ define <4 x double> @expandload_v4f64(ptr align 8 %base, <4 x double> %src0, <4
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    beqz a1, .LBB10_4
 ; RV64-NEXT:  .LBB10_8: # %cond.load9
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a4, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a4, a4, 24
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    or a1, a3, a1
-; RV64-NEXT:    lbu a2, 5(a0)
-; RV64-NEXT:    lbu a3, 4(a0)
-; RV64-NEXT:    lbu a4, 6(a0)
-; RV64-NEXT:    lbu a0, 7(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a4
-; RV64-NEXT:    or a0, a0, a2
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 3
@@ -1758,8 +923,6 @@ declare <8 x double> @llvm.masked.expandload.v8f64(ptr, <8 x i1>, <8 x double>)
 define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8 x i1> %mask) {
 ; RV32-LABEL: expandload_v8f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -64
-; RV32-NEXT:    .cfi_def_cfa_offset 64
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v0
 ; RV32-NEXT:    andi a2, a1, 1
@@ -1786,61 +949,16 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV32-NEXT:    andi a1, a1, -128
 ; RV32-NEXT:    bnez a1, .LBB11_16
 ; RV32-NEXT:  .LBB11_8: # %else26
-; RV32-NEXT:    addi sp, sp, 64
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB11_9: # %cond.load
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 60(sp)
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 56(sp)
-; RV32-NEXT:    fld fa5, 56(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 8, e64, m8, tu, ma
 ; RV32-NEXT:    vfmv.s.f v8, fa5
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB11_2
 ; RV32-NEXT:  .LBB11_10: # %cond.load1
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 52(sp)
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 48(sp)
-; RV32-NEXT:    fld fa5, 48(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
 ; RV32-NEXT:    vfmv.s.f v12, fa5
 ; RV32-NEXT:    vslideup.vi v8, v12, 1
@@ -1848,29 +966,7 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB11_3
 ; RV32-NEXT:  .LBB11_11: # %cond.load5
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 44(sp)
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 40(sp)
-; RV32-NEXT:    fld fa5, 40(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; RV32-NEXT:    vfmv.s.f v12, fa5
 ; RV32-NEXT:    vslideup.vi v8, v12, 2
@@ -1878,29 +974,7 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV32-NEXT:    andi a2, a1, 8
 ; RV32-NEXT:    beqz a2, .LBB11_4
 ; RV32-NEXT:  .LBB11_12: # %cond.load9
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 36(sp)
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 32(sp)
-; RV32-NEXT:    fld fa5, 32(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
 ; RV32-NEXT:    vfmv.s.f v12, fa5
 ; RV32-NEXT:    vslideup.vi v8, v12, 3
@@ -1908,29 +982,7 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV32-NEXT:    andi a2, a1, 16
 ; RV32-NEXT:    beqz a2, .LBB11_5
 ; RV32-NEXT:  .LBB11_13: # %cond.load13
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 28(sp)
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 24(sp)
-; RV32-NEXT:    fld fa5, 24(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
 ; RV32-NEXT:    vfmv.s.f v12, fa5
 ; RV32-NEXT:    vslideup.vi v8, v12, 4
@@ -1938,29 +990,7 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV32-NEXT:    andi a2, a1, 32
 ; RV32-NEXT:    beqz a2, .LBB11_6
 ; RV32-NEXT:  .LBB11_14: # %cond.load17
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 20(sp)
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 16(sp)
-; RV32-NEXT:    fld fa5, 16(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
 ; RV32-NEXT:    vfmv.s.f v12, fa5
 ; RV32-NEXT:    vslideup.vi v8, v12, 5
@@ -1968,29 +998,7 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV32-NEXT:    andi a2, a1, 64
 ; RV32-NEXT:    beqz a2, .LBB11_7
 ; RV32-NEXT:  .LBB11_15: # %cond.load21
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 12(sp)
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    sw a2, 8(sp)
-; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
 ; RV32-NEXT:    vfmv.s.f v12, fa5
 ; RV32-NEXT:    vslideup.vi v8, v12, 6
@@ -1998,33 +1006,10 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV32-NEXT:    andi a1, a1, -128
 ; RV32-NEXT:    beqz a1, .LBB11_8
 ; RV32-NEXT:  .LBB11_16: # %cond.load25
-; RV32-NEXT:    lbu a1, 5(a0)
-; RV32-NEXT:    lbu a2, 4(a0)
-; RV32-NEXT:    lbu a3, 6(a0)
-; RV32-NEXT:    lbu a4, 7(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a4, a4, 24
-; RV32-NEXT:    or a3, a4, a3
-; RV32-NEXT:    or a1, a3, a1
-; RV32-NEXT:    sw a1, 4(sp)
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    sw a0, 0(sp)
-; RV32-NEXT:    fld fa5, 0(sp)
+; RV32-NEXT:    fld fa5, 0(a0)
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vfmv.s.f v12, fa5
 ; RV32-NEXT:    vslideup.vi v8, v12, 7
-; RV32-NEXT:    addi sp, sp, 64
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: expandload_v8f64:
@@ -2057,58 +1042,14 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV64-NEXT:  .LBB11_8: # %else26
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB11_9: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e64, m8, tu, ma
 ; RV64-NEXT:    vfmv.s.f v8, fa5
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB11_2
 ; RV64-NEXT:  .LBB11_10: # %cond.load1
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
 ; RV64-NEXT:    vfmv.s.f v12, fa5
 ; RV64-NEXT:    vslideup.vi v8, v12, 1
@@ -2116,29 +1057,7 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB11_3
 ; RV64-NEXT:  .LBB11_11: # %cond.load5
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; RV64-NEXT:    vfmv.s.f v12, fa5
 ; RV64-NEXT:    vslideup.vi v8, v12, 2
@@ -2146,29 +1065,7 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV64-NEXT:    andi a2, a1, 8
 ; RV64-NEXT:    beqz a2, .LBB11_4
 ; RV64-NEXT:  .LBB11_12: # %cond.load9
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
 ; RV64-NEXT:    vfmv.s.f v12, fa5
 ; RV64-NEXT:    vslideup.vi v8, v12, 3
@@ -2176,29 +1073,7 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV64-NEXT:    andi a2, a1, 16
 ; RV64-NEXT:    beqz a2, .LBB11_5
 ; RV64-NEXT:  .LBB11_13: # %cond.load13
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
 ; RV64-NEXT:    vfmv.s.f v12, fa5
 ; RV64-NEXT:    vslideup.vi v8, v12, 4
@@ -2206,29 +1081,7 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV64-NEXT:    andi a2, a1, 32
 ; RV64-NEXT:    beqz a2, .LBB11_6
 ; RV64-NEXT:  .LBB11_14: # %cond.load17
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
 ; RV64-NEXT:    vfmv.s.f v12, fa5
 ; RV64-NEXT:    vslideup.vi v8, v12, 5
@@ -2236,29 +1089,7 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV64-NEXT:    andi a2, a1, 64
 ; RV64-NEXT:    beqz a2, .LBB11_7
 ; RV64-NEXT:  .LBB11_15: # %cond.load21
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    fmv.d.x fa5, a2
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
 ; RV64-NEXT:    vfmv.s.f v12, fa5
 ; RV64-NEXT:    vslideup.vi v8, v12, 6
@@ -2266,29 +1097,7 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    beqz a1, .LBB11_8
 ; RV64-NEXT:  .LBB11_16: # %cond.load25
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a4, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a4, a4, 24
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    or a1, a3, a1
-; RV64-NEXT:    lbu a2, 5(a0)
-; RV64-NEXT:    lbu a3, 4(a0)
-; RV64-NEXT:    lbu a4, 6(a0)
-; RV64-NEXT:    lbu a0, 7(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a4
-; RV64-NEXT:    or a0, a0, a2
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    fld fa5, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vfmv.s.f v12, fa5
 ; RV64-NEXT:    vslideup.vi v8, v12, 7
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
index 42d099e0a3dc8c..ecea1e303ddf70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
@@ -204,12 +204,8 @@ define <1 x i16> @expandload_v1i16(ptr align 2 %base, <1 x i16> %src0, <1 x i1>
 ; CHECK-NEXT:    vfirst.m a1, v0
 ; CHECK-NEXT:    bnez a1, .LBB4_2
 ; CHECK-NEXT:  # %bb.1: # %cond.load
-; CHECK-NEXT:    lbu a1, 1(a0)
-; CHECK-NEXT:    lbu a0, 0(a0)
-; CHECK-NEXT:    slli a1, a1, 8
-; CHECK-NEXT:    or a0, a1, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vmv.s.x v8, a0
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:  .LBB4_2: # %else
 ; CHECK-NEXT:    ret
   %res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr %base, <1 x i1> %mask, <1 x i16> %src0)
@@ -230,20 +226,14 @@ define <2 x i16> @expandload_v2i16(ptr align 2 %base, <2 x i16> %src0, <2 x i1>
 ; CHECK-NEXT:  .LBB5_2: # %else2
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB5_3: # %cond.load
-; CHECK-NEXT:    lbu a2, 1(a0)
-; CHECK-NEXT:    lbu a3, 0(a0)
-; CHECK-NEXT:    slli a2, a2, 8
-; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    lh a2, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 2, e16, m2, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a2
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a1, a1, 2
 ; CHECK-NEXT:    beqz a1, .LBB5_2
 ; CHECK-NEXT:  .LBB5_4: # %cond.load1
-; CHECK-NEXT:    lbu a1, 1(a0)
-; CHECK-NEXT:    lbu a0, 0(a0)
-; CHECK-NEXT:    slli a1, a1, 8
-; CHECK-NEXT:    or a0, a1, a0
+; CHECK-NEXT:    lh a0, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
@@ -272,20 +262,14 @@ define <4 x i16> @expandload_v4i16(ptr align 2 %base, <4 x i16> %src0, <4 x i1>
 ; CHECK-NEXT:  .LBB6_4: # %else10
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB6_5: # %cond.load
-; CHECK-NEXT:    lbu a2, 1(a0)
-; CHECK-NEXT:    lbu a3, 0(a0)
-; CHECK-NEXT:    slli a2, a2, 8
-; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    lh a2, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a2
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a2, a1, 2
 ; CHECK-NEXT:    beqz a2, .LBB6_2
 ; CHECK-NEXT:  .LBB6_6: # %cond.load1
-; CHECK-NEXT:    lbu a2, 1(a0)
-; CHECK-NEXT:    lbu a3, 0(a0)
-; CHECK-NEXT:    slli a2, a2, 8
-; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    lh a2, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
 ; CHECK-NEXT:    vmv.s.x v9, a2
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
@@ -293,10 +277,7 @@ define <4 x i16> @expandload_v4i16(ptr align 2 %base, <4 x i16> %src0, <4 x i1>
 ; CHECK-NEXT:    andi a2, a1, 4
 ; CHECK-NEXT:    beqz a2, .LBB6_3
 ; CHECK-NEXT:  .LBB6_7: # %cond.load5
-; CHECK-NEXT:    lbu a2, 1(a0)
-; CHECK-NEXT:    lbu a3, 0(a0)
-; CHECK-NEXT:    slli a2, a2, 8
-; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    lh a2, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
 ; CHECK-NEXT:    vmv.s.x v9, a2
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
@@ -304,10 +285,7 @@ define <4 x i16> @expandload_v4i16(ptr align 2 %base, <4 x i16> %src0, <4 x i1>
 ; CHECK-NEXT:    andi a1, a1, 8
 ; CHECK-NEXT:    beqz a1, .LBB6_4
 ; CHECK-NEXT:  .LBB6_8: # %cond.load9
-; CHECK-NEXT:    lbu a1, 1(a0)
-; CHECK-NEXT:    lbu a0, 0(a0)
-; CHECK-NEXT:    slli a1, a1, 8
-; CHECK-NEXT:    or a0, a1, a0
+; CHECK-NEXT:    lh a0, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
@@ -348,20 +326,14 @@ define <8 x i16> @expandload_v8i16(ptr align 2 %base, <8 x i16> %src0, <8 x i1>
 ; CHECK-NEXT:  .LBB7_8: # %else26
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB7_9: # %cond.load
-; CHECK-NEXT:    lbu a2, 1(a0)
-; CHECK-NEXT:    lbu a3, 0(a0)
-; CHECK-NEXT:    slli a2, a2, 8
-; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    lh a2, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a2
 ; CHECK-NEXT:    addi a0, a0, 2
 ; CHECK-NEXT:    andi a2, a1, 2
 ; CHECK-NEXT:    beqz a2, .LBB7_2
 ; CHECK-NEXT:  .LBB7_10: # %cond.load1
-; CHECK-NEXT:    lbu a2, 1(a0)
-; CHECK-NEXT:    lbu a3, 0(a0)
-; CHECK-NEXT:    slli a2, a2, 8
-; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    lh a2, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v9, a2
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
@@ -369,10 +341,7 @@ define <8 x i16> @expandload_v8i16(ptr align 2 %base, <8 x i16> %src0, <8 x i1>
 ; CHECK-NEXT:    andi a2, a1, 4
 ; CHECK-NEXT:    beqz a2, .LBB7_3
 ; CHECK-NEXT:  .LBB7_11: # %cond.load5
-; CHECK-NEXT:    lbu a2, 1(a0)
-; CHECK-NEXT:    lbu a3, 0(a0)
-; CHECK-NEXT:    slli a2, a2, 8
-; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    lh a2, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v9, a2
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
@@ -380,10 +349,7 @@ define <8 x i16> @expandload_v8i16(ptr align 2 %base, <8 x i16> %src0, <8 x i1>
 ; CHECK-NEXT:    andi a2, a1, 8
 ; CHECK-NEXT:    beqz a2, .LBB7_4
 ; CHECK-NEXT:  .LBB7_12: # %cond.load9
-; CHECK-NEXT:    lbu a2, 1(a0)
-; CHECK-NEXT:    lbu a3, 0(a0)
-; CHECK-NEXT:    slli a2, a2, 8
-; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    lh a2, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v9, a2
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
@@ -391,10 +357,7 @@ define <8 x i16> @expandload_v8i16(ptr align 2 %base, <8 x i16> %src0, <8 x i1>
 ; CHECK-NEXT:    andi a2, a1, 16
 ; CHECK-NEXT:    beqz a2, .LBB7_5
 ; CHECK-NEXT:  .LBB7_13: # %cond.load13
-; CHECK-NEXT:    lbu a2, 1(a0)
-; CHECK-NEXT:    lbu a3, 0(a0)
-; CHECK-NEXT:    slli a2, a2, 8
-; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    lh a2, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v9, a2
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
@@ -402,10 +365,7 @@ define <8 x i16> @expandload_v8i16(ptr align 2 %base, <8 x i16> %src0, <8 x i1>
 ; CHECK-NEXT:    andi a2, a1, 32
 ; CHECK-NEXT:    beqz a2, .LBB7_6
 ; CHECK-NEXT:  .LBB7_14: # %cond.load17
-; CHECK-NEXT:    lbu a2, 1(a0)
-; CHECK-NEXT:    lbu a3, 0(a0)
-; CHECK-NEXT:    slli a2, a2, 8
-; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    lh a2, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v9, a2
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
@@ -413,10 +373,7 @@ define <8 x i16> @expandload_v8i16(ptr align 2 %base, <8 x i16> %src0, <8 x i1>
 ; CHECK-NEXT:    andi a2, a1, 64
 ; CHECK-NEXT:    beqz a2, .LBB7_7
 ; CHECK-NEXT:  .LBB7_15: # %cond.load21
-; CHECK-NEXT:    lbu a2, 1(a0)
-; CHECK-NEXT:    lbu a3, 0(a0)
-; CHECK-NEXT:    slli a2, a2, 8
-; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    lh a2, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v9, a2
 ; CHECK-NEXT:    vslideup.vi v8, v9, 6
@@ -424,10 +381,7 @@ define <8 x i16> @expandload_v8i16(ptr align 2 %base, <8 x i16> %src0, <8 x i1>
 ; CHECK-NEXT:    andi a1, a1, -128
 ; CHECK-NEXT:    beqz a1, .LBB7_8
 ; CHECK-NEXT:  .LBB7_16: # %cond.load25
-; CHECK-NEXT:    lbu a1, 1(a0)
-; CHECK-NEXT:    lbu a0, 0(a0)
-; CHECK-NEXT:    slli a1, a1, 8
-; CHECK-NEXT:    or a0, a1, a0
+; CHECK-NEXT:    lh a0, 0(a0)
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 7
@@ -438,640 +392,194 @@ define <8 x i16> @expandload_v8i16(ptr align 2 %base, <8 x i16> %src0, <8 x i1>
 
 declare <1 x i32> @llvm.masked.expandload.v1i32(ptr, <1 x i1>, <1 x i32>)
 define <1 x i32> @expandload_v1i32(ptr align 4 %base, <1 x i32> %src0, <1 x i1> %mask) {
-; RV32-LABEL: expandload_v1i32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT:    vfirst.m a1, v0
-; RV32-NEXT:    bnez a1, .LBB8_2
-; RV32-NEXT:  # %bb.1: # %cond.load
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; RV32-NEXT:    vmv.s.x v8, a0
-; RV32-NEXT:  .LBB8_2: # %else
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: expandload_v1i32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT:    vfirst.m a1, v0
-; RV64-NEXT:    bnez a1, .LBB8_2
-; RV64-NEXT:  # %bb.1: # %cond.load
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lb a0, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a3
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; RV64-NEXT:    vmv.s.x v8, a0
-; RV64-NEXT:  .LBB8_2: # %else
-; RV64-NEXT:    ret
+; CHECK-LABEL: expandload_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfirst.m a1, v0
+; CHECK-NEXT:    bnez a1, .LBB8_2
+; CHECK-NEXT:  # %bb.1: # %cond.load
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:  .LBB8_2: # %else
+; CHECK-NEXT:    ret
   %res = call <1 x i32> @llvm.masked.expandload.v1i32(ptr %base, <1 x i1> %mask, <1 x i32> %src0)
   ret <1 x i32>%res
 }
 
 declare <2 x i32> @llvm.masked.expandload.v2i32(ptr, <2 x i1>, <2 x i32>)
 define <2 x i32> @expandload_v2i32(ptr align 4 %base, <2 x i32> %src0, <2 x i1> %mask) {
-; RV32-LABEL: expandload_v2i32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB9_3
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    bnez a1, .LBB9_4
-; RV32-NEXT:  .LBB9_2: # %else2
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB9_3: # %cond.load
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
-; RV32-NEXT:    vmv.s.x v8, a2
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a1, a1, 2
-; RV32-NEXT:    beqz a1, .LBB9_2
-; RV32-NEXT:  .LBB9_4: # %cond.load1
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vslideup.vi v8, v9, 1
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: expandload_v2i32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB9_3
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    bnez a1, .LBB9_4
-; RV64-NEXT:  .LBB9_2: # %else2
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB9_3: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lb a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
-; RV64-NEXT:    vmv.s.x v8, a2
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a1, a1, 2
-; RV64-NEXT:    beqz a1, .LBB9_2
-; RV64-NEXT:  .LBB9_4: # %cond.load1
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lb a0, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a3
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vslideup.vi v8, v9, 1
-; RV64-NEXT:    ret
+; CHECK-LABEL: expandload_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB9_3
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB9_4
+; CHECK-NEXT:  .LBB9_2: # %else2
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB9_3: # %cond.load
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB9_2
+; CHECK-NEXT:  .LBB9_4: # %cond.load1
+; CHECK-NEXT:    lw a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    ret
   %res = call <2 x i32> @llvm.masked.expandload.v2i32(ptr %base, <2 x i1> %mask, <2 x i32> %src0)
   ret <2 x i32>%res
 }
 
 declare <4 x i32> @llvm.masked.expandload.v4i32(ptr, <4 x i1>, <4 x i32>)
 define <4 x i32> @expandload_v4i32(ptr align 4 %base, <4 x i32> %src0, <4 x i1> %mask) {
-; RV32-LABEL: expandload_v4i32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB10_5
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    bnez a2, .LBB10_6
-; RV32-NEXT:  .LBB10_2: # %else2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    bnez a2, .LBB10_7
-; RV32-NEXT:  .LBB10_3: # %else6
-; RV32-NEXT:    andi a1, a1, 8
-; RV32-NEXT:    bnez a1, .LBB10_8
-; RV32-NEXT:  .LBB10_4: # %else10
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB10_5: # %cond.load
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
-; RV32-NEXT:    vmv.s.x v8, a2
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    beqz a2, .LBB10_2
-; RV32-NEXT:  .LBB10_6: # %cond.load1
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT:    vmv.s.x v9, a2
-; RV32-NEXT:    vslideup.vi v8, v9, 1
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    beqz a2, .LBB10_3
-; RV32-NEXT:  .LBB10_7: # %cond.load5
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; RV32-NEXT:    vmv.s.x v9, a2
-; RV32-NEXT:    vslideup.vi v8, v9, 2
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a1, a1, 8
-; RV32-NEXT:    beqz a1, .LBB10_4
-; RV32-NEXT:  .LBB10_8: # %cond.load9
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vslideup.vi v8, v9, 3
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: expandload_v4i32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB10_5
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    bnez a2, .LBB10_6
-; RV64-NEXT:  .LBB10_2: # %else2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    bnez a2, .LBB10_7
-; RV64-NEXT:  .LBB10_3: # %else6
-; RV64-NEXT:    andi a1, a1, 8
-; RV64-NEXT:    bnez a1, .LBB10_8
-; RV64-NEXT:  .LBB10_4: # %else10
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB10_5: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lb a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
-; RV64-NEXT:    vmv.s.x v8, a2
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    beqz a2, .LBB10_2
-; RV64-NEXT:  .LBB10_6: # %cond.load1
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lb a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV64-NEXT:    vmv.s.x v9, a2
-; RV64-NEXT:    vslideup.vi v8, v9, 1
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    beqz a2, .LBB10_3
-; RV64-NEXT:  .LBB10_7: # %cond.load5
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lb a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; RV64-NEXT:    vmv.s.x v9, a2
-; RV64-NEXT:    vslideup.vi v8, v9, 2
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a1, a1, 8
-; RV64-NEXT:    beqz a1, .LBB10_4
-; RV64-NEXT:  .LBB10_8: # %cond.load9
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lb a0, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a3
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vslideup.vi v8, v9, 3
-; RV64-NEXT:    ret
+; CHECK-LABEL: expandload_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB10_5
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB10_6
+; CHECK-NEXT:  .LBB10_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB10_7
+; CHECK-NEXT:  .LBB10_3: # %else6
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    bnez a1, .LBB10_8
+; CHECK-NEXT:  .LBB10_4: # %else10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB10_5: # %cond.load
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB10_2
+; CHECK-NEXT:  .LBB10_6: # %cond.load1
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB10_3
+; CHECK-NEXT:  .LBB10_7: # %cond.load5
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v9, a2
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, 8
+; CHECK-NEXT:    beqz a1, .LBB10_4
+; CHECK-NEXT:  .LBB10_8: # %cond.load9
+; CHECK-NEXT:    lw a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.s.x v9, a0
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    ret
   %res = call <4 x i32> @llvm.masked.expandload.v4i32(ptr %base, <4 x i1> %mask, <4 x i32> %src0)
   ret <4 x i32>%res
 }
 
 declare <8 x i32> @llvm.masked.expandload.v8i32(ptr, <8 x i1>, <8 x i32>)
 define <8 x i32> @expandload_v8i32(ptr align 4 %base, <8 x i32> %src0, <8 x i1> %mask) {
-; RV32-LABEL: expandload_v8i32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v0
-; RV32-NEXT:    andi a2, a1, 1
-; RV32-NEXT:    bnez a2, .LBB11_9
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    bnez a2, .LBB11_10
-; RV32-NEXT:  .LBB11_2: # %else2
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    bnez a2, .LBB11_11
-; RV32-NEXT:  .LBB11_3: # %else6
-; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    bnez a2, .LBB11_12
-; RV32-NEXT:  .LBB11_4: # %else10
-; RV32-NEXT:    andi a2, a1, 16
-; RV32-NEXT:    bnez a2, .LBB11_13
-; RV32-NEXT:  .LBB11_5: # %else14
-; RV32-NEXT:    andi a2, a1, 32
-; RV32-NEXT:    bnez a2, .LBB11_14
-; RV32-NEXT:  .LBB11_6: # %else18
-; RV32-NEXT:    andi a2, a1, 64
-; RV32-NEXT:    bnez a2, .LBB11_15
-; RV32-NEXT:  .LBB11_7: # %else22
-; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    bnez a1, .LBB11_16
-; RV32-NEXT:  .LBB11_8: # %else26
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB11_9: # %cond.load
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
-; RV32-NEXT:    vmv.s.x v8, a2
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 2
-; RV32-NEXT:    beqz a2, .LBB11_2
-; RV32-NEXT:  .LBB11_10: # %cond.load1
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT:    vmv.s.x v10, a2
-; RV32-NEXT:    vslideup.vi v8, v10, 1
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 4
-; RV32-NEXT:    beqz a2, .LBB11_3
-; RV32-NEXT:  .LBB11_11: # %cond.load5
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; RV32-NEXT:    vmv.s.x v10, a2
-; RV32-NEXT:    vslideup.vi v8, v10, 2
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 8
-; RV32-NEXT:    beqz a2, .LBB11_4
-; RV32-NEXT:  .LBB11_12: # %cond.load9
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
-; RV32-NEXT:    vmv.s.x v10, a2
-; RV32-NEXT:    vslideup.vi v8, v10, 3
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 16
-; RV32-NEXT:    beqz a2, .LBB11_5
-; RV32-NEXT:  .LBB11_13: # %cond.load13
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
-; RV32-NEXT:    vmv.s.x v10, a2
-; RV32-NEXT:    vslideup.vi v8, v10, 4
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 32
-; RV32-NEXT:    beqz a2, .LBB11_6
-; RV32-NEXT:  .LBB11_14: # %cond.load17
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
-; RV32-NEXT:    vmv.s.x v10, a2
-; RV32-NEXT:    vslideup.vi v8, v10, 5
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a2, a1, 64
-; RV32-NEXT:    beqz a2, .LBB11_7
-; RV32-NEXT:  .LBB11_15: # %cond.load21
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a5, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
-; RV32-NEXT:    vmv.s.x v10, a2
-; RV32-NEXT:    vslideup.vi v8, v10, 6
-; RV32-NEXT:    addi a0, a0, 4
-; RV32-NEXT:    andi a1, a1, -128
-; RV32-NEXT:    beqz a1, .LBB11_8
-; RV32-NEXT:  .LBB11_16: # %cond.load25
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a3
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vmv.s.x v10, a0
-; RV32-NEXT:    vslideup.vi v8, v10, 7
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: expandload_v8i32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT:    vmv.x.s a1, v0
-; RV64-NEXT:    andi a2, a1, 1
-; RV64-NEXT:    bnez a2, .LBB11_9
-; RV64-NEXT:  # %bb.1: # %else
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    bnez a2, .LBB11_10
-; RV64-NEXT:  .LBB11_2: # %else2
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    bnez a2, .LBB11_11
-; RV64-NEXT:  .LBB11_3: # %else6
-; RV64-NEXT:    andi a2, a1, 8
-; RV64-NEXT:    bnez a2, .LBB11_12
-; RV64-NEXT:  .LBB11_4: # %else10
-; RV64-NEXT:    andi a2, a1, 16
-; RV64-NEXT:    bnez a2, .LBB11_13
-; RV64-NEXT:  .LBB11_5: # %else14
-; RV64-NEXT:    andi a2, a1, 32
-; RV64-NEXT:    bnez a2, .LBB11_14
-; RV64-NEXT:  .LBB11_6: # %else18
-; RV64-NEXT:    andi a2, a1, 64
-; RV64-NEXT:    bnez a2, .LBB11_15
-; RV64-NEXT:  .LBB11_7: # %else22
-; RV64-NEXT:    andi a1, a1, -128
-; RV64-NEXT:    bnez a1, .LBB11_16
-; RV64-NEXT:  .LBB11_8: # %else26
-; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB11_9: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lb a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
-; RV64-NEXT:    vmv.s.x v8, a2
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 2
-; RV64-NEXT:    beqz a2, .LBB11_2
-; RV64-NEXT:  .LBB11_10: # %cond.load1
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lb a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV64-NEXT:    vmv.s.x v10, a2
-; RV64-NEXT:    vslideup.vi v8, v10, 1
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 4
-; RV64-NEXT:    beqz a2, .LBB11_3
-; RV64-NEXT:  .LBB11_11: # %cond.load5
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lb a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; RV64-NEXT:    vmv.s.x v10, a2
-; RV64-NEXT:    vslideup.vi v8, v10, 2
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 8
-; RV64-NEXT:    beqz a2, .LBB11_4
-; RV64-NEXT:  .LBB11_12: # %cond.load9
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lb a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
-; RV64-NEXT:    vmv.s.x v10, a2
-; RV64-NEXT:    vslideup.vi v8, v10, 3
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 16
-; RV64-NEXT:    beqz a2, .LBB11_5
-; RV64-NEXT:  .LBB11_13: # %cond.load13
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lb a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
-; RV64-NEXT:    vmv.s.x v10, a2
-; RV64-NEXT:    vslideup.vi v8, v10, 4
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 32
-; RV64-NEXT:    beqz a2, .LBB11_6
-; RV64-NEXT:  .LBB11_14: # %cond.load17
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lb a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
-; RV64-NEXT:    vmv.s.x v10, a2
-; RV64-NEXT:    vslideup.vi v8, v10, 5
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a2, a1, 64
-; RV64-NEXT:    beqz a2, .LBB11_7
-; RV64-NEXT:  .LBB11_15: # %cond.load21
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lb a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
-; RV64-NEXT:    vmv.s.x v10, a2
-; RV64-NEXT:    vslideup.vi v8, v10, 6
-; RV64-NEXT:    addi a0, a0, 4
-; RV64-NEXT:    andi a1, a1, -128
-; RV64-NEXT:    beqz a1, .LBB11_8
-; RV64-NEXT:  .LBB11_16: # %cond.load25
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lb a0, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a3
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vslideup.vi v8, v10, 7
-; RV64-NEXT:    ret
+; CHECK-LABEL: expandload_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v0
+; CHECK-NEXT:    andi a2, a1, 1
+; CHECK-NEXT:    bnez a2, .LBB11_9
+; CHECK-NEXT:  # %bb.1: # %else
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    bnez a2, .LBB11_10
+; CHECK-NEXT:  .LBB11_2: # %else2
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    bnez a2, .LBB11_11
+; CHECK-NEXT:  .LBB11_3: # %else6
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    bnez a2, .LBB11_12
+; CHECK-NEXT:  .LBB11_4: # %else10
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    bnez a2, .LBB11_13
+; CHECK-NEXT:  .LBB11_5: # %else14
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    bnez a2, .LBB11_14
+; CHECK-NEXT:  .LBB11_6: # %else18
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    bnez a2, .LBB11_15
+; CHECK-NEXT:  .LBB11_7: # %else22
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    bnez a1, .LBB11_16
+; CHECK-NEXT:  .LBB11_8: # %else26
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB11_9: # %cond.load
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
+; CHECK-NEXT:    vmv.s.x v8, a2
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 2
+; CHECK-NEXT:    beqz a2, .LBB11_2
+; CHECK-NEXT:  .LBB11_10: # %cond.load1
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 1
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 4
+; CHECK-NEXT:    beqz a2, .LBB11_3
+; CHECK-NEXT:  .LBB11_11: # %cond.load5
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 2
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 8
+; CHECK-NEXT:    beqz a2, .LBB11_4
+; CHECK-NEXT:  .LBB11_12: # %cond.load9
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 3
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 16
+; CHECK-NEXT:    beqz a2, .LBB11_5
+; CHECK-NEXT:  .LBB11_13: # %cond.load13
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 4
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 32
+; CHECK-NEXT:    beqz a2, .LBB11_6
+; CHECK-NEXT:  .LBB11_14: # %cond.load17
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 5
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a2, a1, 64
+; CHECK-NEXT:    beqz a2, .LBB11_7
+; CHECK-NEXT:  .LBB11_15: # %cond.load21
+; CHECK-NEXT:    lw a2, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v10, a2
+; CHECK-NEXT:    vslideup.vi v8, v10, 6
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    andi a1, a1, -128
+; CHECK-NEXT:    beqz a1, .LBB11_8
+; CHECK-NEXT:  .LBB11_16: # %cond.load25
+; CHECK-NEXT:    lw a0, 0(a0)
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vmv.s.x v10, a0
+; CHECK-NEXT:    vslideup.vi v8, v10, 7
+; CHECK-NEXT:    ret
   %res = call <8 x i32> @llvm.masked.expandload.v8i32(ptr %base, <8 x i1> %mask, <8 x i32> %src0)
   ret <8 x i32>%res
 }
@@ -1086,28 +594,10 @@ define <1 x i64> @expandload_v1i64(ptr align 8 %base, <1 x i64> %src0, <1 x i1>
 ; RV32-NEXT:  # %bb.1: # %cond.load
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a2, 0(a0)
-; RV32-NEXT:    lbu a3, 2(a0)
-; RV32-NEXT:    lbu a4, 3(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a4, a4, 24
-; RV32-NEXT:    or a3, a4, a3
-; RV32-NEXT:    or a1, a3, a1
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a0, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a4
-; RV32-NEXT:    or a0, a0, a2
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    lw a1, 4(a0)
+; RV32-NEXT:    lw a0, 0(a0)
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v8, (a0), zero
@@ -1121,30 +611,8 @@ define <1 x i64> @expandload_v1i64(ptr align 8 %base, <1 x i64> %src0, <1 x i1>
 ; RV64-NEXT:    vfirst.m a1, v0
 ; RV64-NEXT:    bnez a1, .LBB12_2
 ; RV64-NEXT:  # %bb.1: # %cond.load
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a4, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a4, a4, 24
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    or a1, a3, a1
-; RV64-NEXT:    lbu a2, 5(a0)
-; RV64-NEXT:    lbu a3, 4(a0)
-; RV64-NEXT:    lbu a4, 6(a0)
-; RV64-NEXT:    lbu a0, 7(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a4
-; RV64-NEXT:    or a0, a0, a2
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV64-NEXT:    vmv.s.x v8, a0
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:  .LBB12_2: # %else
 ; RV64-NEXT:    ret
   %res = call <1 x i64> @llvm.masked.expandload.v1i64(ptr %base, <1 x i1> %mask, <1 x i64> %src0)
@@ -1165,56 +633,20 @@ define <2 x i64> @expandload_v2i64(ptr align 8 %base, <2 x i64> %src0, <2 x i1>
 ; RV32-NEXT:  .LBB13_2: # %else2
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB13_3: # %cond.load
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    lbu a3, 1(a0)
-; RV32-NEXT:    lbu a4, 0(a0)
-; RV32-NEXT:    lbu a5, 2(a0)
-; RV32-NEXT:    lbu a6, 3(a0)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a5, a5, 16
-; RV32-NEXT:    slli a6, a6, 24
-; RV32-NEXT:    or a4, a6, a5
-; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
 ; RV32-NEXT:    vslide1down.vx v8, v8, a2
+; RV32-NEXT:    vslide1down.vx v8, v8, a3
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a1, a1, 2
 ; RV32-NEXT:    beqz a1, .LBB13_2
 ; RV32-NEXT:  .LBB13_4: # %cond.load1
-; RV32-NEXT:    lbu a1, 5(a0)
-; RV32-NEXT:    lbu a2, 4(a0)
-; RV32-NEXT:    lbu a3, 6(a0)
-; RV32-NEXT:    lbu a4, 7(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a4, a4, 24
-; RV32-NEXT:    or a3, a4, a3
-; RV32-NEXT:    or a1, a3, a1
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a4
-; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    lw a1, 0(a0)
+; RV32-NEXT:    lw a0, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v9, v8, a0
-; RV32-NEXT:    vslide1down.vx v9, v9, a1
+; RV32-NEXT:    vslide1down.vx v9, v8, a1
+; RV32-NEXT:    vslide1down.vx v9, v9, a0
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    vslideup.vi v8, v9, 1
 ; RV32-NEXT:    ret
@@ -1231,56 +663,14 @@ define <2 x i64> @expandload_v2i64(ptr align 8 %base, <2 x i64> %src0, <2 x i1>
 ; RV64-NEXT:  .LBB13_2: # %else2
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB13_3: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    ld a2, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e64, m8, tu, ma
 ; RV64-NEXT:    vmv.s.x v8, a2
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a1, a1, 2
 ; RV64-NEXT:    beqz a1, .LBB13_2
 ; RV64-NEXT:  .LBB13_4: # %cond.load1
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a4, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a4, a4, 24
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    or a1, a3, a1
-; RV64-NEXT:    lbu a2, 5(a0)
-; RV64-NEXT:    lbu a3, 4(a0)
-; RV64-NEXT:    lbu a4, 6(a0)
-; RV64-NEXT:    lbu a0, 7(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a4
-; RV64-NEXT:    or a0, a0, a2
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    ld a0, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
@@ -1309,114 +699,42 @@ define <4 x i64> @expandload_v4i64(ptr align 8 %base, <4 x i64> %src0, <4 x i1>
 ; RV32-NEXT:  .LBB14_4: # %else10
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB14_5: # %cond.load
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    lbu a3, 1(a0)
-; RV32-NEXT:    lbu a4, 0(a0)
-; RV32-NEXT:    lbu a5, 2(a0)
-; RV32-NEXT:    lbu a6, 3(a0)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a5, a5, 16
-; RV32-NEXT:    slli a6, a6, 24
-; RV32-NEXT:    or a4, a6, a5
-; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
 ; RV32-NEXT:    vslide1down.vx v8, v8, a2
+; RV32-NEXT:    vslide1down.vx v8, v8, a3
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB14_2
 ; RV32-NEXT:  .LBB14_6: # %cond.load1
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    lbu a3, 1(a0)
-; RV32-NEXT:    lbu a4, 0(a0)
-; RV32-NEXT:    lbu a5, 2(a0)
-; RV32-NEXT:    lbu a6, 3(a0)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a5, a5, 16
-; RV32-NEXT:    slli a6, a6, 24
-; RV32-NEXT:    or a4, a6, a5
-; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v10, v8, a3
-; RV32-NEXT:    vslide1down.vx v10, v10, a2
+; RV32-NEXT:    vslide1down.vx v10, v8, a2
+; RV32-NEXT:    vslide1down.vx v10, v10, a3
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
 ; RV32-NEXT:    vslideup.vi v8, v10, 1
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB14_3
 ; RV32-NEXT:  .LBB14_7: # %cond.load5
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    lbu a3, 1(a0)
-; RV32-NEXT:    lbu a4, 0(a0)
-; RV32-NEXT:    lbu a5, 2(a0)
-; RV32-NEXT:    lbu a6, 3(a0)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a5, a5, 16
-; RV32-NEXT:    slli a6, a6, 24
-; RV32-NEXT:    or a4, a6, a5
-; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v10, v8, a3
-; RV32-NEXT:    vslide1down.vx v10, v10, a2
+; RV32-NEXT:    vslide1down.vx v10, v8, a2
+; RV32-NEXT:    vslide1down.vx v10, v10, a3
 ; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; RV32-NEXT:    vslideup.vi v8, v10, 2
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a1, a1, 8
 ; RV32-NEXT:    beqz a1, .LBB14_4
 ; RV32-NEXT:  .LBB14_8: # %cond.load9
-; RV32-NEXT:    lbu a1, 5(a0)
-; RV32-NEXT:    lbu a2, 4(a0)
-; RV32-NEXT:    lbu a3, 6(a0)
-; RV32-NEXT:    lbu a4, 7(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a4, a4, 24
-; RV32-NEXT:    or a3, a4, a3
-; RV32-NEXT:    or a1, a3, a1
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a4
-; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    lw a1, 0(a0)
+; RV32-NEXT:    lw a0, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v10, v8, a0
-; RV32-NEXT:    vslide1down.vx v10, v10, a1
+; RV32-NEXT:    vslide1down.vx v10, v8, a1
+; RV32-NEXT:    vslide1down.vx v10, v10, a0
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    vslideup.vi v8, v10, 3
 ; RV32-NEXT:    ret
@@ -1439,56 +757,14 @@ define <4 x i64> @expandload_v4i64(ptr align 8 %base, <4 x i64> %src0, <4 x i1>
 ; RV64-NEXT:  .LBB14_4: # %else10
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB14_5: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    ld a2, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
 ; RV64-NEXT:    vmv.s.x v8, a2
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB14_2
 ; RV64-NEXT:  .LBB14_6: # %cond.load1
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    ld a2, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
 ; RV64-NEXT:    vmv.s.x v10, a2
 ; RV64-NEXT:    vslideup.vi v8, v10, 1
@@ -1496,28 +772,7 @@ define <4 x i64> @expandload_v4i64(ptr align 8 %base, <4 x i64> %src0, <4 x i1>
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB14_3
 ; RV64-NEXT:  .LBB14_7: # %cond.load5
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    ld a2, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; RV64-NEXT:    vmv.s.x v10, a2
 ; RV64-NEXT:    vslideup.vi v8, v10, 2
@@ -1525,28 +780,7 @@ define <4 x i64> @expandload_v4i64(ptr align 8 %base, <4 x i64> %src0, <4 x i1>
 ; RV64-NEXT:    andi a1, a1, 8
 ; RV64-NEXT:    beqz a1, .LBB14_4
 ; RV64-NEXT:  .LBB14_8: # %cond.load9
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a4, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a4, a4, 24
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    or a1, a3, a1
-; RV64-NEXT:    lbu a2, 5(a0)
-; RV64-NEXT:    lbu a3, 4(a0)
-; RV64-NEXT:    lbu a4, 6(a0)
-; RV64-NEXT:    lbu a0, 7(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a4
-; RV64-NEXT:    or a0, a0, a2
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    ld a0, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
 ; RV64-NEXT:    vslideup.vi v8, v10, 3
@@ -1587,230 +821,86 @@ define <8 x i64> @expandload_v8i64(ptr align 8 %base, <8 x i64> %src0, <8 x i1>
 ; RV32-NEXT:  .LBB15_8: # %else26
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB15_9: # %cond.load
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    lbu a3, 1(a0)
-; RV32-NEXT:    lbu a4, 0(a0)
-; RV32-NEXT:    lbu a5, 2(a0)
-; RV32-NEXT:    lbu a6, 3(a0)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a5, a5, 16
-; RV32-NEXT:    slli a6, a6, 24
-; RV32-NEXT:    or a4, a6, a5
-; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
 ; RV32-NEXT:    vslide1down.vx v8, v8, a2
+; RV32-NEXT:    vslide1down.vx v8, v8, a3
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 2
 ; RV32-NEXT:    beqz a2, .LBB15_2
 ; RV32-NEXT:  .LBB15_10: # %cond.load1
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    lbu a3, 1(a0)
-; RV32-NEXT:    lbu a4, 0(a0)
-; RV32-NEXT:    lbu a5, 2(a0)
-; RV32-NEXT:    lbu a6, 3(a0)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a5, a5, 16
-; RV32-NEXT:    slli a6, a6, 24
-; RV32-NEXT:    or a4, a6, a5
-; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a3
-; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
 ; RV32-NEXT:    vslideup.vi v8, v12, 1
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 4
 ; RV32-NEXT:    beqz a2, .LBB15_3
 ; RV32-NEXT:  .LBB15_11: # %cond.load5
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    lbu a3, 1(a0)
-; RV32-NEXT:    lbu a4, 0(a0)
-; RV32-NEXT:    lbu a5, 2(a0)
-; RV32-NEXT:    lbu a6, 3(a0)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a5, a5, 16
-; RV32-NEXT:    slli a6, a6, 24
-; RV32-NEXT:    or a4, a6, a5
-; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a3
-; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
 ; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; RV32-NEXT:    vslideup.vi v8, v12, 2
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 8
 ; RV32-NEXT:    beqz a2, .LBB15_4
 ; RV32-NEXT:  .LBB15_12: # %cond.load9
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    lbu a3, 1(a0)
-; RV32-NEXT:    lbu a4, 0(a0)
-; RV32-NEXT:    lbu a5, 2(a0)
-; RV32-NEXT:    lbu a6, 3(a0)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a5, a5, 16
-; RV32-NEXT:    slli a6, a6, 24
-; RV32-NEXT:    or a4, a6, a5
-; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a3
-; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
 ; RV32-NEXT:    vslideup.vi v8, v12, 3
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 16
 ; RV32-NEXT:    beqz a2, .LBB15_5
 ; RV32-NEXT:  .LBB15_13: # %cond.load13
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    lbu a3, 1(a0)
-; RV32-NEXT:    lbu a4, 0(a0)
-; RV32-NEXT:    lbu a5, 2(a0)
-; RV32-NEXT:    lbu a6, 3(a0)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a5, a5, 16
-; RV32-NEXT:    slli a6, a6, 24
-; RV32-NEXT:    or a4, a6, a5
-; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a3
-; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
 ; RV32-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
 ; RV32-NEXT:    vslideup.vi v8, v12, 4
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 32
 ; RV32-NEXT:    beqz a2, .LBB15_6
 ; RV32-NEXT:  .LBB15_14: # %cond.load17
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    lbu a3, 1(a0)
-; RV32-NEXT:    lbu a4, 0(a0)
-; RV32-NEXT:    lbu a5, 2(a0)
-; RV32-NEXT:    lbu a6, 3(a0)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a5, a5, 16
-; RV32-NEXT:    slli a6, a6, 24
-; RV32-NEXT:    or a4, a6, a5
-; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a3
-; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
 ; RV32-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
 ; RV32-NEXT:    vslideup.vi v8, v12, 5
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a2, a1, 64
 ; RV32-NEXT:    beqz a2, .LBB15_7
 ; RV32-NEXT:  .LBB15_15: # %cond.load21
-; RV32-NEXT:    lbu a2, 5(a0)
-; RV32-NEXT:    lbu a3, 4(a0)
-; RV32-NEXT:    lbu a4, 6(a0)
-; RV32-NEXT:    lbu a5, 7(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a5, a5, 24
-; RV32-NEXT:    or a4, a5, a4
-; RV32-NEXT:    or a2, a4, a2
-; RV32-NEXT:    lbu a3, 1(a0)
-; RV32-NEXT:    lbu a4, 0(a0)
-; RV32-NEXT:    lbu a5, 2(a0)
-; RV32-NEXT:    lbu a6, 3(a0)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    slli a5, a5, 16
-; RV32-NEXT:    slli a6, a6, 24
-; RV32-NEXT:    or a4, a6, a5
-; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    lw a3, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a3
-; RV32-NEXT:    vslide1down.vx v12, v12, a2
+; RV32-NEXT:    vslide1down.vx v12, v8, a2
+; RV32-NEXT:    vslide1down.vx v12, v12, a3
 ; RV32-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
 ; RV32-NEXT:    vslideup.vi v8, v12, 6
 ; RV32-NEXT:    addi a0, a0, 8
 ; RV32-NEXT:    andi a1, a1, -128
 ; RV32-NEXT:    beqz a1, .LBB15_8
 ; RV32-NEXT:  .LBB15_16: # %cond.load25
-; RV32-NEXT:    lbu a1, 5(a0)
-; RV32-NEXT:    lbu a2, 4(a0)
-; RV32-NEXT:    lbu a3, 6(a0)
-; RV32-NEXT:    lbu a4, 7(a0)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a2
-; RV32-NEXT:    slli a3, a3, 16
-; RV32-NEXT:    slli a4, a4, 24
-; RV32-NEXT:    or a3, a4, a3
-; RV32-NEXT:    or a1, a3, a1
-; RV32-NEXT:    lbu a2, 1(a0)
-; RV32-NEXT:    lbu a3, 0(a0)
-; RV32-NEXT:    lbu a4, 2(a0)
-; RV32-NEXT:    lbu a0, 3(a0)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    slli a4, a4, 16
-; RV32-NEXT:    slli a0, a0, 24
-; RV32-NEXT:    or a0, a0, a4
-; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    lw a1, 0(a0)
+; RV32-NEXT:    lw a0, 4(a0)
 ; RV32-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT:    vslide1down.vx v12, v8, a0
-; RV32-NEXT:    vslide1down.vx v12, v12, a1
+; RV32-NEXT:    vslide1down.vx v12, v8, a1
+; RV32-NEXT:    vslide1down.vx v12, v12, a0
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vslideup.vi v8, v12, 7
 ; RV32-NEXT:    ret
@@ -1845,56 +935,14 @@ define <8 x i64> @expandload_v8i64(ptr align 8 %base, <8 x i64> %src0, <8 x i1>
 ; RV64-NEXT:  .LBB15_8: # %else26
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB15_9: # %cond.load
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    ld a2, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e64, m8, tu, ma
 ; RV64-NEXT:    vmv.s.x v8, a2
 ; RV64-NEXT:    addi a0, a0, 8
 ; RV64-NEXT:    andi a2, a1, 2
 ; RV64-NEXT:    beqz a2, .LBB15_2
 ; RV64-NEXT:  .LBB15_10: # %cond.load1
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    ld a2, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
 ; RV64-NEXT:    vmv.s.x v12, a2
 ; RV64-NEXT:    vslideup.vi v8, v12, 1
@@ -1902,28 +950,7 @@ define <8 x i64> @expandload_v8i64(ptr align 8 %base, <8 x i64> %src0, <8 x i1>
 ; RV64-NEXT:    andi a2, a1, 4
 ; RV64-NEXT:    beqz a2, .LBB15_3
 ; RV64-NEXT:  .LBB15_11: # %cond.load5
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    ld a2, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
 ; RV64-NEXT:    vmv.s.x v12, a2
 ; RV64-NEXT:    vslideup.vi v8, v12, 2
@@ -1931,28 +958,7 @@ define <8 x i64> @expandload_v8i64(ptr align 8 %base, <8 x i64> %src0, <8 x i1>
 ; RV64-NEXT:    andi a2, a1, 8
 ; RV64-NEXT:    beqz a2, .LBB15_4
 ; RV64-NEXT:  .LBB15_12: # %cond.load9
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    ld a2, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
 ; RV64-NEXT:    vmv.s.x v12, a2
 ; RV64-NEXT:    vslideup.vi v8, v12, 3
@@ -1960,28 +966,7 @@ define <8 x i64> @expandload_v8i64(ptr align 8 %base, <8 x i64> %src0, <8 x i1>
 ; RV64-NEXT:    andi a2, a1, 16
 ; RV64-NEXT:    beqz a2, .LBB15_5
 ; RV64-NEXT:  .LBB15_13: # %cond.load13
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    ld a2, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 5, e64, m4, tu, ma
 ; RV64-NEXT:    vmv.s.x v12, a2
 ; RV64-NEXT:    vslideup.vi v8, v12, 4
@@ -1989,28 +974,7 @@ define <8 x i64> @expandload_v8i64(ptr align 8 %base, <8 x i64> %src0, <8 x i1>
 ; RV64-NEXT:    andi a2, a1, 32
 ; RV64-NEXT:    beqz a2, .LBB15_6
 ; RV64-NEXT:  .LBB15_14: # %cond.load17
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    ld a2, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
 ; RV64-NEXT:    vmv.s.x v12, a2
 ; RV64-NEXT:    vslideup.vi v8, v12, 5
@@ -2018,28 +982,7 @@ define <8 x i64> @expandload_v8i64(ptr align 8 %base, <8 x i64> %src0, <8 x i1>
 ; RV64-NEXT:    andi a2, a1, 64
 ; RV64-NEXT:    beqz a2, .LBB15_7
 ; RV64-NEXT:  .LBB15_15: # %cond.load21
-; RV64-NEXT:    lbu a2, 1(a0)
-; RV64-NEXT:    lbu a3, 0(a0)
-; RV64-NEXT:    lbu a4, 2(a0)
-; RV64-NEXT:    lbu a5, 3(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a5, a5, 24
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    or a2, a4, a2
-; RV64-NEXT:    lbu a3, 5(a0)
-; RV64-NEXT:    lbu a4, 4(a0)
-; RV64-NEXT:    lbu a5, 6(a0)
-; RV64-NEXT:    lbu a6, 7(a0)
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    slli a5, a5, 16
-; RV64-NEXT:    slli a6, a6, 24
-; RV64-NEXT:    or a4, a6, a5
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 32
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    ld a2, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 7, e64, m4, tu, ma
 ; RV64-NEXT:    vmv.s.x v12, a2
 ; RV64-NEXT:    vslideup.vi v8, v12, 6
@@ -2047,28 +990,7 @@ define <8 x i64> @expandload_v8i64(ptr align 8 %base, <8 x i64> %src0, <8 x i1>
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    beqz a1, .LBB15_8
 ; RV64-NEXT:  .LBB15_16: # %cond.load25
-; RV64-NEXT:    lbu a1, 1(a0)
-; RV64-NEXT:    lbu a2, 0(a0)
-; RV64-NEXT:    lbu a3, 2(a0)
-; RV64-NEXT:    lbu a4, 3(a0)
-; RV64-NEXT:    slli a1, a1, 8
-; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    slli a3, a3, 16
-; RV64-NEXT:    slli a4, a4, 24
-; RV64-NEXT:    or a3, a4, a3
-; RV64-NEXT:    or a1, a3, a1
-; RV64-NEXT:    lbu a2, 5(a0)
-; RV64-NEXT:    lbu a3, 4(a0)
-; RV64-NEXT:    lbu a4, 6(a0)
-; RV64-NEXT:    lbu a0, 7(a0)
-; RV64-NEXT:    slli a2, a2, 8
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli a4, a4, 16
-; RV64-NEXT:    slli a0, a0, 24
-; RV64-NEXT:    or a0, a0, a4
-; RV64-NEXT:    or a0, a0, a2
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    ld a0, 0(a0)
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vmv.s.x v12, a0
 ; RV64-NEXT:    vslideup.vi v8, v12, 7

>From 11e23b6675ce7f1c59890df7912676e67b3809f0 Mon Sep 17 00:00:00 2001
From: Yeting Kuo <yeting.kuo at sifive.com>
Date: Fri, 1 Mar 2024 15:20:08 +0800
Subject: [PATCH 3/3] Use pram align and adjust aligment for scalar
 instructions

---
 .../Scalar/ScalarizeMaskedMemIntrin.cpp       | 20 +++++--
 .../rvv/fixed-vectors-compressstore-fp.ll     | 48 ++++++++--------
 .../rvv/fixed-vectors-compressstore-int.ll    | 56 +++++++++----------
 .../RISCV/rvv/fixed-vectors-expandload-fp.ll  | 48 ++++++++--------
 .../RISCV/rvv/fixed-vectors-expandload-int.ll | 48 ++++++++--------
 5 files changed, 114 insertions(+), 106 deletions(-)

diff --git a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
index 2fd5530ad0d0cc..f362dc5708b799 100644
--- a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
+++ b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
@@ -627,7 +627,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
   Value *Ptr = CI->getArgOperand(0);
   Value *Mask = CI->getArgOperand(1);
   Value *PassThru = CI->getArgOperand(2);
-  Align Alignment = Ptr->getPointerAlignment(DL);
+  Align Alignment = CI->getParamAlign(0).valueOrOne();
 
   auto *VecType = cast<FixedVectorType>(CI->getType());
 
@@ -645,6 +645,10 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
   // The result vector
   Value *VResult = PassThru;
 
+  // Adjust alignment for the scalar instruction.
+  const Align AdjustedAlignment =
+      commonAlignment(Alignment, EltTy->getPrimitiveSizeInBits() / 8);
+
   // Shorten the way if the mask is a vector of constants.
   // Create a build_vector pattern, with loads/poisons as necessary and then
   // shuffle blend with the pass through value.
@@ -660,7 +664,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
       } else {
         Value *NewPtr =
             Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
-        InsertElt = Builder.CreateAlignedLoad(EltTy, NewPtr, Alignment,
+        InsertElt = Builder.CreateAlignedLoad(EltTy, NewPtr, AdjustedAlignment,
                                               "Load" + Twine(Idx));
         ShuffleMask[Idx] = Idx;
         ++MemIndex;
@@ -714,7 +718,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
     CondBlock->setName("cond.load");
 
     Builder.SetInsertPoint(CondBlock->getTerminator());
-    LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, Alignment);
+    LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, AdjustedAlignment);
     Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
 
     // Move the pointer if there are more blocks to come.
@@ -756,7 +760,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
   Value *Src = CI->getArgOperand(0);
   Value *Ptr = CI->getArgOperand(1);
   Value *Mask = CI->getArgOperand(2);
-  Align Alignment = Ptr->getPointerAlignment(DL);
+  Align Alignment = CI->getParamAlign(1).valueOrOne();
 
   auto *VecType = cast<FixedVectorType>(Src->getType());
 
@@ -769,6 +773,10 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
 
   Type *EltTy = VecType->getElementType();
 
+  // Adjust alignment for the scalar instruction.
+  const Align AdjustedAlignment =
+      commonAlignment(Alignment, EltTy->getPrimitiveSizeInBits() / 8);
+
   unsigned VectorWidth = VecType->getNumElements();
 
   // Shorten the way if the mask is a vector of constants.
@@ -780,7 +788,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
       Value *OneElt =
           Builder.CreateExtractElement(Src, Idx, "Elt" + Twine(Idx));
       Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
-      Builder.CreateAlignedStore(OneElt, NewPtr, Alignment);
+      Builder.CreateAlignedStore(OneElt, NewPtr, AdjustedAlignment);
       ++MemIndex;
     }
     CI->eraseFromParent();
@@ -826,7 +834,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
 
     Builder.SetInsertPoint(CondBlock->getTerminator());
     Value *OneElt = Builder.CreateExtractElement(Src, Idx);
-    Builder.CreateAlignedStore(OneElt, Ptr, Alignment);
+    Builder.CreateAlignedStore(OneElt, Ptr, AdjustedAlignment);
 
     // Move the pointer if there are more blocks to come.
     Value *NewPtr;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
index 8989a0c9f2ce1c..52c52921e7e1d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
 
 declare void @llvm.masked.compressstore.v1f16(<1 x half>, ptr, <1 x i1>)
-define void @compressstore_v1f16(ptr align 2 %base, <1 x half> %v, <1 x i1> %mask) {
+define void @compressstore_v1f16(ptr %base, <1 x half> %v, <1 x i1> %mask) {
 ; RV32-LABEL: compressstore_v1f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -25,12 +25,12 @@ define void @compressstore_v1f16(ptr align 2 %base, <1 x half> %v, <1 x i1> %mas
 ; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:  .LBB0_2: # %else
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v1f16(<1 x half> %v, ptr %base, <1 x i1> %mask)
+  call void @llvm.masked.compressstore.v1f16(<1 x half> %v, ptr align 2 %base, <1 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v2f16(<2 x half>, ptr, <2 x i1>)
-define void @compressstore_v2f16(ptr align 2 %base, <2 x half> %v, <2 x i1> %mask) {
+define void @compressstore_v2f16(ptr %base, <2 x half> %v, <2 x i1> %mask) {
 ; RV32-LABEL: compressstore_v2f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -76,12 +76,12 @@ define void @compressstore_v2f16(ptr align 2 %base, <2 x half> %v, <2 x i1> %mas
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
 ; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v2f16(<2 x half> %v, ptr %base, <2 x i1> %mask)
+  call void @llvm.masked.compressstore.v2f16(<2 x half> %v, ptr align 2 %base, <2 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v4f16(<4 x half>, ptr, <4 x i1>)
-define void @compressstore_v4f16(ptr align 2 %base, <4 x half> %v, <4 x i1> %mask) {
+define void @compressstore_v4f16(ptr %base, <4 x half> %v, <4 x i1> %mask) {
 ; RV32-LABEL: compressstore_v4f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -167,12 +167,12 @@ define void @compressstore_v4f16(ptr align 2 %base, <4 x half> %v, <4 x i1> %mas
 ; RV64-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v4f16(<4 x half> %v, ptr %base, <4 x i1> %mask)
+  call void @llvm.masked.compressstore.v4f16(<4 x half> %v, ptr align 2 %base, <4 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v8f16(<8 x half>, ptr, <8 x i1>)
-define void @compressstore_v8f16(ptr align 2 %base, <8 x half> %v, <8 x i1> %mask) {
+define void @compressstore_v8f16(ptr %base, <8 x half> %v, <8 x i1> %mask) {
 ; RV32-LABEL: compressstore_v8f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -338,12 +338,12 @@ define void @compressstore_v8f16(ptr align 2 %base, <8 x half> %v, <8 x i1> %mas
 ; RV64-NEXT:    vslidedown.vi v8, v8, 7
 ; RV64-NEXT:    vse16.v v8, (a0)
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v8f16(<8 x half> %v, ptr %base, <8 x i1> %mask)
+  call void @llvm.masked.compressstore.v8f16(<8 x half> %v, ptr align 2 %base, <8 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v1f32(<1 x float>, ptr, <1 x i1>)
-define void @compressstore_v1f32(ptr align 4 %base, <1 x float> %v, <1 x i1> %mask) {
+define void @compressstore_v1f32(ptr %base, <1 x float> %v, <1 x i1> %mask) {
 ; RV32-LABEL: compressstore_v1f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -365,12 +365,12 @@ define void @compressstore_v1f32(ptr align 4 %base, <1 x float> %v, <1 x i1> %ma
 ; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:  .LBB4_2: # %else
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v1f32(<1 x float> %v, ptr %base, <1 x i1> %mask)
+  call void @llvm.masked.compressstore.v1f32(<1 x float> %v, ptr align 4 %base, <1 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v2f32(<2 x float>, ptr, <2 x i1>)
-define void @compressstore_v2f32(ptr align 4 %base, <2 x float> %v, <2 x i1> %mask) {
+define void @compressstore_v2f32(ptr %base, <2 x float> %v, <2 x i1> %mask) {
 ; RV32-LABEL: compressstore_v2f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -416,12 +416,12 @@ define void @compressstore_v2f32(ptr align 4 %base, <2 x float> %v, <2 x i1> %ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
 ; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v2f32(<2 x float> %v, ptr %base, <2 x i1> %mask)
+  call void @llvm.masked.compressstore.v2f32(<2 x float> %v, ptr align 4 %base, <2 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v4f32(<4 x float>, ptr, <4 x i1>)
-define void @compressstore_v4f32(ptr align 4 %base, <4 x float> %v, <4 x i1> %mask) {
+define void @compressstore_v4f32(ptr %base, <4 x float> %v, <4 x i1> %mask) {
 ; RV32-LABEL: compressstore_v4f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -507,12 +507,12 @@ define void @compressstore_v4f32(ptr align 4 %base, <4 x float> %v, <4 x i1> %ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v4f32(<4 x float> %v, ptr %base, <4 x i1> %mask)
+  call void @llvm.masked.compressstore.v4f32(<4 x float> %v, ptr align 4 %base, <4 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v8f32(<8 x float>, ptr, <8 x i1>)
-define void @compressstore_v8f32(ptr align 4 %base, <8 x float> %v, <8 x i1> %mask) {
+define void @compressstore_v8f32(ptr %base, <8 x float> %v, <8 x i1> %mask) {
 ; RV32-LABEL: compressstore_v8f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -686,12 +686,12 @@ define void @compressstore_v8f32(ptr align 4 %base, <8 x float> %v, <8 x i1> %ma
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vse32.v v8, (a0)
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v8f32(<8 x float> %v, ptr %base, <8 x i1> %mask)
+  call void @llvm.masked.compressstore.v8f32(<8 x float> %v, ptr align 4 %base, <8 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v1f64(<1 x double>, ptr, <1 x i1>)
-define void @compressstore_v1f64(ptr align 8 %base, <1 x double> %v, <1 x i1> %mask) {
+define void @compressstore_v1f64(ptr %base, <1 x double> %v, <1 x i1> %mask) {
 ; RV32-LABEL: compressstore_v1f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -713,12 +713,12 @@ define void @compressstore_v1f64(ptr align 8 %base, <1 x double> %v, <1 x i1> %m
 ; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:  .LBB8_2: # %else
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v1f64(<1 x double> %v, ptr %base, <1 x i1> %mask)
+  call void @llvm.masked.compressstore.v1f64(<1 x double> %v, ptr align 8 %base, <1 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v2f64(<2 x double>, ptr, <2 x i1>)
-define void @compressstore_v2f64(ptr align 8 %base, <2 x double> %v, <2 x i1> %mask) {
+define void @compressstore_v2f64(ptr %base, <2 x double> %v, <2 x i1> %mask) {
 ; RV32-LABEL: compressstore_v2f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -764,12 +764,12 @@ define void @compressstore_v2f64(ptr align 8 %base, <2 x double> %v, <2 x i1> %m
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
 ; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v2f64(<2 x double> %v, ptr %base, <2 x i1> %mask)
+  call void @llvm.masked.compressstore.v2f64(<2 x double> %v, ptr align 8 %base, <2 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v4f64(<4 x double>, ptr, <4 x i1>)
-define void @compressstore_v4f64(ptr align 8 %base, <4 x double> %v, <4 x i1> %mask) {
+define void @compressstore_v4f64(ptr %base, <4 x double> %v, <4 x i1> %mask) {
 ; RV32-LABEL: compressstore_v4f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -859,12 +859,12 @@ define void @compressstore_v4f64(ptr align 8 %base, <4 x double> %v, <4 x i1> %m
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v4f64(<4 x double> %v, ptr %base, <4 x i1> %mask)
+  call void @llvm.masked.compressstore.v4f64(<4 x double> %v, ptr align 8 %base, <4 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v8f64(<8 x double>, ptr, <8 x i1>)
-define void @compressstore_v8f64(ptr align 8 %base, <8 x double> %v, <8 x i1> %mask) {
+define void @compressstore_v8f64(ptr %base, <8 x double> %v, <8 x i1> %mask) {
 ; RV32-LABEL: compressstore_v8f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -1074,6 +1074,6 @@ define void @compressstore_v8f64(ptr align 8 %base, <8 x double> %v, <8 x i1> %m
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    bnez a1, .LBB11_9
 ; RV64-NEXT:    j .LBB11_10
-  call void @llvm.masked.compressstore.v8f64(<8 x double> %v, ptr %base, <8 x i1> %mask)
+  call void @llvm.masked.compressstore.v8f64(<8 x double> %v, ptr align 8 %base, <8 x i1> %mask)
   ret void
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
index 177cbf67a95e0a..eb0096dbfba6de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>)
-define void @compressstore_v1i8(ptr align 2 %base, <1 x i8> %v, <1 x i1> %mask) {
+define void @compressstore_v1i8(ptr %base, <1 x i8> %v, <1 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -19,7 +19,7 @@ define void @compressstore_v1i8(ptr align 2 %base, <1 x i8> %v, <1 x i1> %mask)
 }
 
 declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
-define void @compressstore_v2i8(ptr align 2 %base, <2 x i8> %v, <2 x i1> %mask) {
+define void @compressstore_v2i8(ptr %base, <2 x i8> %v, <2 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -47,7 +47,7 @@ define void @compressstore_v2i8(ptr align 2 %base, <2 x i8> %v, <2 x i1> %mask)
 }
 
 declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>)
-define void @compressstore_v4i8(ptr align 2 %base, <4 x i8> %v, <4 x i1> %mask) {
+define void @compressstore_v4i8(ptr %base, <4 x i8> %v, <4 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -95,7 +95,7 @@ define void @compressstore_v4i8(ptr align 2 %base, <4 x i8> %v, <4 x i1> %mask)
 }
 
 declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>)
-define void @compressstore_v8i8(ptr align 2 %base, <8 x i8> %v, <8 x i1> %mask) {
+define void @compressstore_v8i8(ptr %base, <8 x i8> %v, <8 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -183,7 +183,7 @@ define void @compressstore_v8i8(ptr align 2 %base, <8 x i8> %v, <8 x i1> %mask)
 }
 
 declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>)
-define void @compressstore_v1i16(ptr align 2 %base, <1 x i16> %v, <1 x i1> %mask) {
+define void @compressstore_v1i16(ptr %base, <1 x i16> %v, <1 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -194,12 +194,12 @@ define void @compressstore_v1i16(ptr align 2 %base, <1 x i16> %v, <1 x i1> %mask
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:  .LBB4_2: # %else
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.compressstore.v1i16(<1 x i16> %v, ptr %base, <1 x i1> %mask)
+  call void @llvm.masked.compressstore.v1i16(<1 x i16> %v, ptr align 2 %base, <1 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>)
-define void @compressstore_v2i16(ptr align 2 %base, <2 x i16> %v, <2 x i1> %mask) {
+define void @compressstore_v2i16(ptr %base, <2 x i16> %v, <2 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -222,12 +222,12 @@ define void @compressstore_v2i16(ptr align 2 %base, <2 x i16> %v, <2 x i1> %mask
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.compressstore.v2i16(<2 x i16> %v, ptr %base, <2 x i1> %mask)
+  call void @llvm.masked.compressstore.v2i16(<2 x i16> %v, ptr align 2 %base, <2 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>)
-define void @compressstore_v4i16(ptr align 2 %base, <4 x i16> %v, <4 x i1> %mask) {
+define void @compressstore_v4i16(ptr %base, <4 x i16> %v, <4 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -270,12 +270,12 @@ define void @compressstore_v4i16(ptr align 2 %base, <4 x i16> %v, <4 x i1> %mask
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.compressstore.v4i16(<4 x i16> %v, ptr %base, <4 x i1> %mask)
+  call void @llvm.masked.compressstore.v4i16(<4 x i16> %v, ptr align 2 %base, <4 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>)
-define void @compressstore_v8i16(ptr align 2 %base, <8 x i16> %v, <8 x i1> %mask) {
+define void @compressstore_v8i16(ptr %base, <8 x i16> %v, <8 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -358,12 +358,12 @@ define void @compressstore_v8i16(ptr align 2 %base, <8 x i16> %v, <8 x i1> %mask
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 7
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.compressstore.v8i16(<8 x i16> %v, ptr %base, <8 x i1> %mask)
+  call void @llvm.masked.compressstore.v8i16(<8 x i16> %v, ptr align 2 %base, <8 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>)
-define void @compressstore_v1i32(ptr align 4 %base, <1 x i32> %v, <1 x i1> %mask) {
+define void @compressstore_v1i32(ptr %base, <1 x i32> %v, <1 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -374,12 +374,12 @@ define void @compressstore_v1i32(ptr align 4 %base, <1 x i32> %v, <1 x i1> %mask
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:  .LBB8_2: # %else
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.compressstore.v1i32(<1 x i32> %v, ptr %base, <1 x i1> %mask)
+  call void @llvm.masked.compressstore.v1i32(<1 x i32> %v, ptr align 4 %base, <1 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>)
-define void @compressstore_v2i32(ptr align 4 %base, <2 x i32> %v, <2 x i1> %mask) {
+define void @compressstore_v2i32(ptr %base, <2 x i32> %v, <2 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -402,12 +402,12 @@ define void @compressstore_v2i32(ptr align 4 %base, <2 x i32> %v, <2 x i1> %mask
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.compressstore.v2i32(<2 x i32> %v, ptr %base, <2 x i1> %mask)
+  call void @llvm.masked.compressstore.v2i32(<2 x i32> %v, ptr align 4 %base, <2 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>)
-define void @compressstore_v4i32(ptr align 4 %base, <4 x i32> %v, <4 x i1> %mask) {
+define void @compressstore_v4i32(ptr %base, <4 x i32> %v, <4 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -450,12 +450,12 @@ define void @compressstore_v4i32(ptr align 4 %base, <4 x i32> %v, <4 x i1> %mask
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.compressstore.v4i32(<4 x i32> %v, ptr %base, <4 x i1> %mask)
+  call void @llvm.masked.compressstore.v4i32(<4 x i32> %v, ptr align 4 %base, <4 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>)
-define void @compressstore_v8i32(ptr align 4 %base, <8 x i32> %v, <8 x i1> %mask) {
+define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) {
 ; CHECK-LABEL: compressstore_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -542,12 +542,12 @@ define void @compressstore_v8i32(ptr align 4 %base, <8 x i32> %v, <8 x i1> %mask
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.compressstore.v8i32(<8 x i32> %v, ptr %base, <8 x i1> %mask)
+  call void @llvm.masked.compressstore.v8i32(<8 x i32> %v, ptr align 4 %base, <8 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>)
-define void @compressstore_v1i64(ptr align 8 %base, <1 x i64> %v, <1 x i1> %mask) {
+define void @compressstore_v1i64(ptr %base, <1 x i64> %v, <1 x i1> %mask) {
 ; RV32-LABEL: compressstore_v1i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -574,12 +574,12 @@ define void @compressstore_v1i64(ptr align 8 %base, <1 x i64> %v, <1 x i1> %mask
 ; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:  .LBB12_2: # %else
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v1i64(<1 x i64> %v, ptr %base, <1 x i1> %mask)
+  call void @llvm.masked.compressstore.v1i64(<1 x i64> %v, ptr align 8 %base, <1 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>)
-define void @compressstore_v2i64(ptr align 8 %base, <2 x i64> %v, <2 x i1> %mask) {
+define void @compressstore_v2i64(ptr %base, <2 x i64> %v, <2 x i1> %mask) {
 ; RV32-LABEL: compressstore_v2i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -635,12 +635,12 @@ define void @compressstore_v2i64(ptr align 8 %base, <2 x i64> %v, <2 x i1> %mask
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
 ; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v2i64(<2 x i64> %v, ptr %base, <2 x i1> %mask)
+  call void @llvm.masked.compressstore.v2i64(<2 x i64> %v, ptr align 8 %base, <2 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>)
-define void @compressstore_v4i64(ptr align 8 %base, <4 x i64> %v, <4 x i1> %mask) {
+define void @compressstore_v4i64(ptr %base, <4 x i64> %v, <4 x i1> %mask) {
 ; RV32-LABEL: compressstore_v4i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -748,12 +748,12 @@ define void @compressstore_v4i64(ptr align 8 %base, <4 x i64> %v, <4 x i1> %mask
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    ret
-  call void @llvm.masked.compressstore.v4i64(<4 x i64> %v, ptr %base, <4 x i1> %mask)
+  call void @llvm.masked.compressstore.v4i64(<4 x i64> %v, ptr align 8 %base, <4 x i1> %mask)
   ret void
 }
 
 declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>)
-define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask) {
+define void @compressstore_v8i64(ptr %base, <8 x i64> %v, <8 x i1> %mask) {
 ; RV32-LABEL: compressstore_v8i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -981,6 +981,6 @@ define void @compressstore_v8i64(ptr align 8 %base, <8 x i64> %v, <8 x i1> %mask
 ; RV64-NEXT:    andi a1, a1, -128
 ; RV64-NEXT:    bnez a1, .LBB15_9
 ; RV64-NEXT:    j .LBB15_10
-  call void @llvm.masked.compressstore.v8i64(<8 x i64> %v, ptr %base, <8 x i1> %mask)
+  call void @llvm.masked.compressstore.v8i64(<8 x i64> %v, ptr align 8 %base, <8 x i1> %mask)
   ret void
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
index 6385f1dee05ac0..48e820243c9578 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
 
 declare <1 x half> @llvm.masked.expandload.v1f16(ptr, <1 x i1>, <1 x half>)
-define <1 x half> @expandload_v1f16(ptr align 2 %base, <1 x half> %src0, <1 x i1> %mask) {
+define <1 x half> @expandload_v1f16(ptr %base, <1 x half> %src0, <1 x i1> %mask) {
 ; RV32-LABEL: expandload_v1f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -25,12 +25,12 @@ define <1 x half> @expandload_v1f16(ptr align 2 %base, <1 x half> %src0, <1 x i1
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:  .LBB0_2: # %else
 ; RV64-NEXT:    ret
-  %res = call <1 x half> @llvm.masked.expandload.v1f16(ptr %base, <1 x i1> %mask, <1 x half> %src0)
+  %res = call <1 x half> @llvm.masked.expandload.v1f16(ptr align 2 %base, <1 x i1> %mask, <1 x half> %src0)
   ret <1 x half>%res
 }
 
 declare <2 x half> @llvm.masked.expandload.v2f16(ptr, <2 x i1>, <2 x half>)
-define <2 x half> @expandload_v2f16(ptr align 2 %base, <2 x half> %src0, <2 x i1> %mask) {
+define <2 x half> @expandload_v2f16(ptr %base, <2 x half> %src0, <2 x i1> %mask) {
 ; RV32-LABEL: expandload_v2f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -80,12 +80,12 @@ define <2 x half> @expandload_v2f16(ptr align 2 %base, <2 x half> %src0, <2 x i1
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
 ; RV64-NEXT:    ret
-  %res = call <2 x half> @llvm.masked.expandload.v2f16(ptr %base, <2 x i1> %mask, <2 x half> %src0)
+  %res = call <2 x half> @llvm.masked.expandload.v2f16(ptr align 2 %base, <2 x i1> %mask, <2 x half> %src0)
   ret <2 x half>%res
 }
 
 declare <4 x half> @llvm.masked.expandload.v4f16(ptr, <4 x i1>, <4 x half>)
-define <4 x half> @expandload_v4f16(ptr align 2 %base, <4 x half> %src0, <4 x i1> %mask) {
+define <4 x half> @expandload_v4f16(ptr %base, <4 x half> %src0, <4 x i1> %mask) {
 ; RV32-LABEL: expandload_v4f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -179,12 +179,12 @@ define <4 x half> @expandload_v4f16(ptr align 2 %base, <4 x half> %src0, <4 x i1
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 3
 ; RV64-NEXT:    ret
-  %res = call <4 x half> @llvm.masked.expandload.v4f16(ptr %base, <4 x i1> %mask, <4 x half> %src0)
+  %res = call <4 x half> @llvm.masked.expandload.v4f16(ptr align 2 %base, <4 x i1> %mask, <4 x half> %src0)
   ret <4 x half>%res
 }
 
 declare <8 x half> @llvm.masked.expandload.v8f16(ptr, <8 x i1>, <8 x half>)
-define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1> %mask) {
+define <8 x half> @expandload_v8f16(ptr %base, <8 x half> %src0, <8 x i1> %mask) {
 ; RV32-LABEL: expandload_v8f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -366,12 +366,12 @@ define <8 x half> @expandload_v8f16(ptr align 2 %base, <8 x half> %src0, <8 x i1
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 7
 ; RV64-NEXT:    ret
-  %res = call <8 x half> @llvm.masked.expandload.v8f16(ptr %base, <8 x i1> %mask, <8 x half> %src0)
+  %res = call <8 x half> @llvm.masked.expandload.v8f16(ptr align 2 %base, <8 x i1> %mask, <8 x half> %src0)
   ret <8 x half>%res
 }
 
 declare <1 x float> @llvm.masked.expandload.v1f32(ptr, <1 x i1>, <1 x float>)
-define <1 x float> @expandload_v1f32(ptr align 4 %base, <1 x float> %src0, <1 x i1> %mask) {
+define <1 x float> @expandload_v1f32(ptr %base, <1 x float> %src0, <1 x i1> %mask) {
 ; RV32-LABEL: expandload_v1f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -393,12 +393,12 @@ define <1 x float> @expandload_v1f32(ptr align 4 %base, <1 x float> %src0, <1 x
 ; RV64-NEXT:    vle32.v v8, (a0)
 ; RV64-NEXT:  .LBB4_2: # %else
 ; RV64-NEXT:    ret
-  %res = call <1 x float> @llvm.masked.expandload.v1f32(ptr %base, <1 x i1> %mask, <1 x float> %src0)
+  %res = call <1 x float> @llvm.masked.expandload.v1f32(ptr align 4 %base, <1 x i1> %mask, <1 x float> %src0)
   ret <1 x float>%res
 }
 
 declare <2 x float> @llvm.masked.expandload.v2f32(ptr, <2 x i1>, <2 x float>)
-define <2 x float> @expandload_v2f32(ptr align 4 %base, <2 x float> %src0, <2 x i1> %mask) {
+define <2 x float> @expandload_v2f32(ptr %base, <2 x float> %src0, <2 x i1> %mask) {
 ; RV32-LABEL: expandload_v2f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -448,12 +448,12 @@ define <2 x float> @expandload_v2f32(ptr align 4 %base, <2 x float> %src0, <2 x
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
 ; RV64-NEXT:    ret
-  %res = call <2 x float> @llvm.masked.expandload.v2f32(ptr %base, <2 x i1> %mask, <2 x float> %src0)
+  %res = call <2 x float> @llvm.masked.expandload.v2f32(ptr align 4 %base, <2 x i1> %mask, <2 x float> %src0)
   ret <2 x float>%res
 }
 
 declare <4 x float> @llvm.masked.expandload.v4f32(ptr, <4 x i1>, <4 x float>)
-define <4 x float> @expandload_v4f32(ptr align 4 %base, <4 x float> %src0, <4 x i1> %mask) {
+define <4 x float> @expandload_v4f32(ptr %base, <4 x float> %src0, <4 x i1> %mask) {
 ; RV32-LABEL: expandload_v4f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -547,12 +547,12 @@ define <4 x float> @expandload_v4f32(ptr align 4 %base, <4 x float> %src0, <4 x
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 3
 ; RV64-NEXT:    ret
-  %res = call <4 x float> @llvm.masked.expandload.v4f32(ptr %base, <4 x i1> %mask, <4 x float> %src0)
+  %res = call <4 x float> @llvm.masked.expandload.v4f32(ptr align 4 %base, <4 x i1> %mask, <4 x float> %src0)
   ret <4 x float>%res
 }
 
 declare <8 x float> @llvm.masked.expandload.v8f32(ptr, <8 x i1>, <8 x float>)
-define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x i1> %mask) {
+define <8 x float> @expandload_v8f32(ptr %base, <8 x float> %src0, <8 x i1> %mask) {
 ; RV32-LABEL: expandload_v8f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -734,12 +734,12 @@ define <8 x float> @expandload_v8f32(ptr align 4 %base, <8 x float> %src0, <8 x
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 7
 ; RV64-NEXT:    ret
-  %res = call <8 x float> @llvm.masked.expandload.v8f32(ptr %base, <8 x i1> %mask, <8 x float> %src0)
+  %res = call <8 x float> @llvm.masked.expandload.v8f32(ptr align 4 %base, <8 x i1> %mask, <8 x float> %src0)
   ret <8 x float>%res
 }
 
 declare <1 x double> @llvm.masked.expandload.v1f64(ptr, <1 x i1>, <1 x double>)
-define <1 x double> @expandload_v1f64(ptr align 8 %base, <1 x double> %src0, <1 x i1> %mask) {
+define <1 x double> @expandload_v1f64(ptr %base, <1 x double> %src0, <1 x i1> %mask) {
 ; RV32-LABEL: expandload_v1f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -761,12 +761,12 @@ define <1 x double> @expandload_v1f64(ptr align 8 %base, <1 x double> %src0, <1
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:  .LBB8_2: # %else
 ; RV64-NEXT:    ret
-  %res = call <1 x double> @llvm.masked.expandload.v1f64(ptr %base, <1 x i1> %mask, <1 x double> %src0)
+  %res = call <1 x double> @llvm.masked.expandload.v1f64(ptr align 8 %base, <1 x i1> %mask, <1 x double> %src0)
   ret <1 x double>%res
 }
 
 declare <2 x double> @llvm.masked.expandload.v2f64(ptr, <2 x i1>, <2 x double>)
-define <2 x double> @expandload_v2f64(ptr align 8 %base, <2 x double> %src0, <2 x i1> %mask) {
+define <2 x double> @expandload_v2f64(ptr %base, <2 x double> %src0, <2 x i1> %mask) {
 ; RV32-LABEL: expandload_v2f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -816,12 +816,12 @@ define <2 x double> @expandload_v2f64(ptr align 8 %base, <2 x double> %src0, <2
 ; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
 ; RV64-NEXT:    ret
-  %res = call <2 x double> @llvm.masked.expandload.v2f64(ptr %base, <2 x i1> %mask, <2 x double> %src0)
+  %res = call <2 x double> @llvm.masked.expandload.v2f64(ptr align 8 %base, <2 x i1> %mask, <2 x double> %src0)
   ret <2 x double>%res
 }
 
 declare <4 x double> @llvm.masked.expandload.v4f64(ptr, <4 x i1>, <4 x double>)
-define <4 x double> @expandload_v4f64(ptr align 8 %base, <4 x double> %src0, <4 x i1> %mask) {
+define <4 x double> @expandload_v4f64(ptr %base, <4 x double> %src0, <4 x i1> %mask) {
 ; RV32-LABEL: expandload_v4f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -915,12 +915,12 @@ define <4 x double> @expandload_v4f64(ptr align 8 %base, <4 x double> %src0, <4
 ; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vslideup.vi v8, v10, 3
 ; RV64-NEXT:    ret
-  %res = call <4 x double> @llvm.masked.expandload.v4f64(ptr %base, <4 x i1> %mask, <4 x double> %src0)
+  %res = call <4 x double> @llvm.masked.expandload.v4f64(ptr align 8 %base, <4 x i1> %mask, <4 x double> %src0)
   ret <4 x double>%res
 }
 
 declare <8 x double> @llvm.masked.expandload.v8f64(ptr, <8 x i1>, <8 x double>)
-define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8 x i1> %mask) {
+define <8 x double> @expandload_v8f64(ptr %base, <8 x double> %src0, <8 x i1> %mask) {
 ; RV32-LABEL: expandload_v8f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -1102,6 +1102,6 @@ define <8 x double> @expandload_v8f64(ptr align 8 %base, <8 x double> %src0, <8
 ; RV64-NEXT:    vfmv.s.f v12, fa5
 ; RV64-NEXT:    vslideup.vi v8, v12, 7
 ; RV64-NEXT:    ret
-  %res = call <8 x double> @llvm.masked.expandload.v8f64(ptr %base, <8 x i1> %mask, <8 x double> %src0)
+  %res = call <8 x double> @llvm.masked.expandload.v8f64(ptr align 8 %base, <8 x i1> %mask, <8 x double> %src0)
   ret <8 x double>%res
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
index ecea1e303ddf70..d6aca55fbde59d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
@@ -197,7 +197,7 @@ define <8 x i8> @expandload_v8i8(ptr %base, <8 x i8> %src0, <8 x i1> %mask) {
 }
 
 declare <1 x i16> @llvm.masked.expandload.v1i16(ptr, <1 x i1>, <1 x i16>)
-define <1 x i16> @expandload_v1i16(ptr align 2 %base, <1 x i16> %src0, <1 x i1> %mask) {
+define <1 x i16> @expandload_v1i16(ptr %base, <1 x i16> %src0, <1 x i1> %mask) {
 ; CHECK-LABEL: expandload_v1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -208,12 +208,12 @@ define <1 x i16> @expandload_v1i16(ptr align 2 %base, <1 x i16> %src0, <1 x i1>
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:  .LBB4_2: # %else
 ; CHECK-NEXT:    ret
-  %res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr %base, <1 x i1> %mask, <1 x i16> %src0)
+  %res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr align 2 %base, <1 x i1> %mask, <1 x i16> %src0)
   ret <1 x i16>%res
 }
 
 declare <2 x i16> @llvm.masked.expandload.v2i16(ptr, <2 x i1>, <2 x i16>)
-define <2 x i16> @expandload_v2i16(ptr align 2 %base, <2 x i16> %src0, <2 x i1> %mask) {
+define <2 x i16> @expandload_v2i16(ptr %base, <2 x i16> %src0, <2 x i1> %mask) {
 ; CHECK-LABEL: expandload_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -238,12 +238,12 @@ define <2 x i16> @expandload_v2i16(ptr align 2 %base, <2 x i16> %src0, <2 x i1>
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
-  %res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr %base, <2 x i1> %mask, <2 x i16> %src0)
+  %res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr align 2 %base, <2 x i1> %mask, <2 x i16> %src0)
   ret <2 x i16>%res
 }
 
 declare <4 x i16> @llvm.masked.expandload.v4i16(ptr, <4 x i1>, <4 x i16>)
-define <4 x i16> @expandload_v4i16(ptr align 2 %base, <4 x i16> %src0, <4 x i1> %mask) {
+define <4 x i16> @expandload_v4i16(ptr %base, <4 x i16> %src0, <4 x i1> %mask) {
 ; CHECK-LABEL: expandload_v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -290,12 +290,12 @@ define <4 x i16> @expandload_v4i16(ptr align 2 %base, <4 x i16> %src0, <4 x i1>
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
-  %res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr %base, <4 x i1> %mask, <4 x i16> %src0)
+  %res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr align 2 %base, <4 x i1> %mask, <4 x i16> %src0)
   ret <4 x i16>%res
 }
 
 declare <8 x i16> @llvm.masked.expandload.v8i16(ptr, <8 x i1>, <8 x i16>)
-define <8 x i16> @expandload_v8i16(ptr align 2 %base, <8 x i16> %src0, <8 x i1> %mask) {
+define <8 x i16> @expandload_v8i16(ptr %base, <8 x i16> %src0, <8 x i1> %mask) {
 ; CHECK-LABEL: expandload_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -386,12 +386,12 @@ define <8 x i16> @expandload_v8i16(ptr align 2 %base, <8 x i16> %src0, <8 x i1>
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 7
 ; CHECK-NEXT:    ret
-  %res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr %base, <8 x i1> %mask, <8 x i16> %src0)
+  %res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr align 2 %base, <8 x i1> %mask, <8 x i16> %src0)
   ret <8 x i16>%res
 }
 
 declare <1 x i32> @llvm.masked.expandload.v1i32(ptr, <1 x i1>, <1 x i32>)
-define <1 x i32> @expandload_v1i32(ptr align 4 %base, <1 x i32> %src0, <1 x i1> %mask) {
+define <1 x i32> @expandload_v1i32(ptr %base, <1 x i32> %src0, <1 x i1> %mask) {
 ; CHECK-LABEL: expandload_v1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -402,12 +402,12 @@ define <1 x i32> @expandload_v1i32(ptr align 4 %base, <1 x i32> %src0, <1 x i1>
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:  .LBB8_2: # %else
 ; CHECK-NEXT:    ret
-  %res = call <1 x i32> @llvm.masked.expandload.v1i32(ptr %base, <1 x i1> %mask, <1 x i32> %src0)
+  %res = call <1 x i32> @llvm.masked.expandload.v1i32(ptr align 4 %base, <1 x i1> %mask, <1 x i32> %src0)
   ret <1 x i32>%res
 }
 
 declare <2 x i32> @llvm.masked.expandload.v2i32(ptr, <2 x i1>, <2 x i32>)
-define <2 x i32> @expandload_v2i32(ptr align 4 %base, <2 x i32> %src0, <2 x i1> %mask) {
+define <2 x i32> @expandload_v2i32(ptr %base, <2 x i32> %src0, <2 x i1> %mask) {
 ; CHECK-LABEL: expandload_v2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -432,12 +432,12 @@ define <2 x i32> @expandload_v2i32(ptr align 4 %base, <2 x i32> %src0, <2 x i1>
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
-  %res = call <2 x i32> @llvm.masked.expandload.v2i32(ptr %base, <2 x i1> %mask, <2 x i32> %src0)
+  %res = call <2 x i32> @llvm.masked.expandload.v2i32(ptr align 4 %base, <2 x i1> %mask, <2 x i32> %src0)
   ret <2 x i32>%res
 }
 
 declare <4 x i32> @llvm.masked.expandload.v4i32(ptr, <4 x i1>, <4 x i32>)
-define <4 x i32> @expandload_v4i32(ptr align 4 %base, <4 x i32> %src0, <4 x i1> %mask) {
+define <4 x i32> @expandload_v4i32(ptr %base, <4 x i32> %src0, <4 x i1> %mask) {
 ; CHECK-LABEL: expandload_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -484,12 +484,12 @@ define <4 x i32> @expandload_v4i32(ptr align 4 %base, <4 x i32> %src0, <4 x i1>
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
-  %res = call <4 x i32> @llvm.masked.expandload.v4i32(ptr %base, <4 x i1> %mask, <4 x i32> %src0)
+  %res = call <4 x i32> @llvm.masked.expandload.v4i32(ptr align 4 %base, <4 x i1> %mask, <4 x i32> %src0)
   ret <4 x i32>%res
 }
 
 declare <8 x i32> @llvm.masked.expandload.v8i32(ptr, <8 x i1>, <8 x i32>)
-define <8 x i32> @expandload_v8i32(ptr align 4 %base, <8 x i32> %src0, <8 x i1> %mask) {
+define <8 x i32> @expandload_v8i32(ptr %base, <8 x i32> %src0, <8 x i1> %mask) {
 ; CHECK-LABEL: expandload_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -580,12 +580,12 @@ define <8 x i32> @expandload_v8i32(ptr align 4 %base, <8 x i32> %src0, <8 x i1>
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vslideup.vi v8, v10, 7
 ; CHECK-NEXT:    ret
-  %res = call <8 x i32> @llvm.masked.expandload.v8i32(ptr %base, <8 x i1> %mask, <8 x i32> %src0)
+  %res = call <8 x i32> @llvm.masked.expandload.v8i32(ptr align 4 %base, <8 x i1> %mask, <8 x i32> %src0)
   ret <8 x i32>%res
 }
 
 declare <1 x i64> @llvm.masked.expandload.v1i64(ptr, <1 x i1>, <1 x i64>)
-define <1 x i64> @expandload_v1i64(ptr align 8 %base, <1 x i64> %src0, <1 x i1> %mask) {
+define <1 x i64> @expandload_v1i64(ptr %base, <1 x i64> %src0, <1 x i1> %mask) {
 ; RV32-LABEL: expandload_v1i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
@@ -615,12 +615,12 @@ define <1 x i64> @expandload_v1i64(ptr align 8 %base, <1 x i64> %src0, <1 x i1>
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:  .LBB12_2: # %else
 ; RV64-NEXT:    ret
-  %res = call <1 x i64> @llvm.masked.expandload.v1i64(ptr %base, <1 x i1> %mask, <1 x i64> %src0)
+  %res = call <1 x i64> @llvm.masked.expandload.v1i64(ptr align 8 %base, <1 x i1> %mask, <1 x i64> %src0)
   ret <1 x i64>%res
 }
 
 declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>)
-define <2 x i64> @expandload_v2i64(ptr align 8 %base, <2 x i64> %src0, <2 x i1> %mask) {
+define <2 x i64> @expandload_v2i64(ptr %base, <2 x i64> %src0, <2 x i1> %mask) {
 ; RV32-LABEL: expandload_v2i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -675,12 +675,12 @@ define <2 x i64> @expandload_v2i64(ptr align 8 %base, <2 x i64> %src0, <2 x i1>
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
 ; RV64-NEXT:    ret
-  %res = call <2 x i64> @llvm.masked.expandload.v2i64(ptr %base, <2 x i1> %mask, <2 x i64> %src0)
+  %res = call <2 x i64> @llvm.masked.expandload.v2i64(ptr align 8 %base, <2 x i1> %mask, <2 x i64> %src0)
   ret <2 x i64>%res
 }
 
 declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>)
-define <4 x i64> @expandload_v4i64(ptr align 8 %base, <4 x i64> %src0, <4 x i1> %mask) {
+define <4 x i64> @expandload_v4i64(ptr %base, <4 x i64> %src0, <4 x i1> %mask) {
 ; RV32-LABEL: expandload_v4i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -785,12 +785,12 @@ define <4 x i64> @expandload_v4i64(ptr align 8 %base, <4 x i64> %src0, <4 x i1>
 ; RV64-NEXT:    vmv.s.x v10, a0
 ; RV64-NEXT:    vslideup.vi v8, v10, 3
 ; RV64-NEXT:    ret
-  %res = call <4 x i64> @llvm.masked.expandload.v4i64(ptr %base, <4 x i1> %mask, <4 x i64> %src0)
+  %res = call <4 x i64> @llvm.masked.expandload.v4i64(ptr align 8 %base, <4 x i1> %mask, <4 x i64> %src0)
   ret <4 x i64>%res
 }
 
 declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>)
-define <8 x i64> @expandload_v8i64(ptr align 8 %base, <8 x i64> %src0, <8 x i1> %mask) {
+define <8 x i64> @expandload_v8i64(ptr %base, <8 x i64> %src0, <8 x i1> %mask) {
 ; RV32-LABEL: expandload_v8i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
@@ -995,6 +995,6 @@ define <8 x i64> @expandload_v8i64(ptr align 8 %base, <8 x i64> %src0, <8 x i1>
 ; RV64-NEXT:    vmv.s.x v12, a0
 ; RV64-NEXT:    vslideup.vi v8, v12, 7
 ; RV64-NEXT:    ret
-  %res = call <8 x i64> @llvm.masked.expandload.v8i64(ptr %base, <8 x i1> %mask, <8 x i64> %src0)
+  %res = call <8 x i64> @llvm.masked.expandload.v8i64(ptr align 8 %base, <8 x i1> %mask, <8 x i64> %src0)
   ret <8 x i64>%res
 }



More information about the llvm-commits mailing list