[llvm] [RISCV] Support llvm.masked.expandload intrinsic (PR #101954)
Pengcheng Wang via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 5 03:19:32 PDT 2024
https://github.com/wangpc-pp created https://github.com/llvm/llvm-project/pull/101954
We can use `viota+vrgather` to synthesize `vdecompress` and lower
expanding load to `vcpop+load+vdecompress`.
Fixes #101914
>From aa662d73dd6a299e3a8412b75ef90e63c9793ee1 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Mon, 5 Aug 2024 17:30:35 +0800
Subject: [PATCH] [RISCV] Support llvm.masked.expandload intrinsic
We can use `iota+vrgather` to synthesize `vdecompress` and lower
expanding load to `vcpop+load+vdecompress`.
Fixes #101914
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 37 +-
.../Target/RISCV/RISCVTargetTransformInfo.cpp | 10 +
.../Target/RISCV/RISCVTargetTransformInfo.h | 2 +
llvm/test/CodeGen/RISCV/rvv/expandload.ll | 1541 +++++++++++++++++
.../RISCV/rvv/fixed-vectors-expandload-fp.ll | 1134 ++----------
.../RISCV/rvv/fixed-vectors-expandload-int.ll | 996 ++---------
6 files changed, 1874 insertions(+), 1846 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/expandload.ll
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9ee60b9db2837..67908c480fed3 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -10732,18 +10732,21 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
SDValue Chain = MemSD->getChain();
SDValue BasePtr = MemSD->getBasePtr();
- SDValue Mask, PassThru, VL;
+ SDValue Mask, PassThru, LoadVL;
+ bool IsExpandingLoad = false;
if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
Mask = VPLoad->getMask();
PassThru = DAG.getUNDEF(VT);
- VL = VPLoad->getVectorLength();
+ LoadVL = VPLoad->getVectorLength();
} else {
const auto *MLoad = cast<MaskedLoadSDNode>(Op);
Mask = MLoad->getMask();
PassThru = MLoad->getPassThru();
+ IsExpandingLoad = MLoad->isExpandingLoad();
}
- bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+ bool IsUnmasked =
+ ISD::isConstantSplatVectorAllOnes(Mask.getNode()) || IsExpandingLoad;
MVT XLenVT = Subtarget.getXLenVT();
@@ -10751,14 +10754,22 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
if (VT.isFixedLengthVector()) {
ContainerVT = getContainerForFixedLengthVector(VT);
PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
- if (!IsUnmasked) {
+ if (!IsUnmasked || IsExpandingLoad) {
MVT MaskVT = getMaskTypeFor(ContainerVT);
Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
}
}
- if (!VL)
- VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
+ if (!LoadVL)
+ LoadVL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
+
+ SDValue ExpandingVL;
+ if (IsExpandingLoad) {
+ ExpandingVL = LoadVL;
+ LoadVL = DAG.getNode(
+ RISCVISD::VCPOP_VL, DL, XLenVT, Mask,
+ getAllOnesMask(Mask.getSimpleValueType(), LoadVL, DL, DAG), LoadVL);
+ }
unsigned IntID =
IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
@@ -10770,7 +10781,7 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
Ops.push_back(BasePtr);
if (!IsUnmasked)
Ops.push_back(Mask);
- Ops.push_back(VL);
+ Ops.push_back(LoadVL);
if (!IsUnmasked)
Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
@@ -10779,6 +10790,18 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
SDValue Result =
DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
Chain = Result.getValue(1);
+ if (IsExpandingLoad) {
+ MVT IotaVT = ContainerVT;
+ if (ContainerVT.isFloatingPoint())
+ IotaVT = ContainerVT.changeVectorElementTypeToInteger();
+
+ SDValue Iota =
+ DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, IotaVT,
+ DAG.getConstant(Intrinsic::riscv_viota, DL, XLenVT),
+ DAG.getUNDEF(IotaVT), Mask, ExpandingVL);
+ Result = DAG.getNode(RISCVISD::VRGATHER_VV_VL, DL, ContainerVT, Result,
+ Iota, PassThru, Mask, ExpandingVL);
+ }
if (VT.isFixedLengthVector())
Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 4cd904c039a98..41b39accbf027 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -1966,6 +1966,16 @@ bool RISCVTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
C2.ScaleCost, C2.ImmCost, C2.SetupCost);
}
+bool RISCVTTIImpl::isLegalMaskedExpandLoad(Type *DataTy, Align Alignment) {
+ auto *VTy = dyn_cast<VectorType>(DataTy);
+ if (!VTy || VTy->isScalableTy())
+ return false;
+
+ if (!isLegalMaskedLoadStore(DataTy, Alignment))
+ return false;
+ return true;
+}
+
bool RISCVTTIImpl::isLegalMaskedCompressStore(Type *DataTy, Align Alignment) {
auto *VTy = dyn_cast<VectorType>(DataTy);
if (!VTy || VTy->isScalableTy())
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 9c37a4f6ec2d0..192bb35613aad 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -281,6 +281,8 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
return TLI->isLegalStridedLoadStore(DataTypeVT, Alignment);
}
+ bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment);
+
bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment);
bool isVScaleKnownToBeAPowerOfTwo() const {
diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
new file mode 100644
index 0000000000000..9380b52daf4b7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
@@ -0,0 +1,1541 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefix=RV64
+; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefix=RV32
+
+; Compress + store for i16 type
+
+define <1 x i8> @test_expandload_v1i8(ptr %base, <1 x i1> %mask, <1 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v1i8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT: vle8.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v1i8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT: vle8.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <1 x i8> @llvm.masked.expandload.v1i8(ptr align 1 %base, <1 x i1> %mask, <1 x i8> %passthru)
+ ret <1 x i8> %res
+}
+
+define <2 x i8> @test_expandload_v2i8(ptr %base, <2 x i1> %mask, <2 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v2i8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT: vle8.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v2i8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT: vle8.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <2 x i8> @llvm.masked.expandload.v2i8(ptr align 1 %base, <2 x i1> %mask, <2 x i8> %passthru)
+ ret <2 x i8> %res
+}
+
+define <4 x i8> @test_expandload_v4i8(ptr %base, <4 x i1> %mask, <4 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v4i8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; RV64-NEXT: vle8.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v4i8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; RV32-NEXT: vle8.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <4 x i8> @llvm.masked.expandload.v4i8(ptr align 1 %base, <4 x i1> %mask, <4 x i8> %passthru)
+ ret <4 x i8> %res
+}
+
+define <8 x i8> @test_expandload_v8i8(ptr %base, <8 x i1> %mask, <8 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v8i8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; RV64-NEXT: vle8.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v8i8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; RV32-NEXT: vle8.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <8 x i8> @llvm.masked.expandload.v8i8(ptr align 1 %base, <8 x i1> %mask, <8 x i8> %passthru)
+ ret <8 x i8> %res
+}
+
+define <16 x i8> @test_expandload_v16i8(ptr %base, <16 x i1> %mask, <16 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v16i8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vle8.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v16i8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vle8.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <16 x i8> @llvm.masked.expandload.v16i8(ptr align 1 %base, <16 x i1> %mask, <16 x i8> %passthru)
+ ret <16 x i8> %res
+}
+
+define <32 x i8> @test_expandload_v32i8(ptr %base, <32 x i1> %mask, <32 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v32i8:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; RV64-NEXT: vle8.v v10, (a0)
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; RV64-NEXT: viota.m v12, v0
+; RV64-NEXT: vrgather.vv v8, v10, v12, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v32i8:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; RV32-NEXT: vle8.v v10, (a0)
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; RV32-NEXT: viota.m v12, v0
+; RV32-NEXT: vrgather.vv v8, v10, v12, v0.t
+; RV32-NEXT: ret
+ %res = call <32 x i8> @llvm.masked.expandload.v32i8(ptr align 1 %base, <32 x i1> %mask, <32 x i8> %passthru)
+ ret <32 x i8> %res
+}
+
+define <64 x i8> @test_expandload_v64i8(ptr %base, <64 x i1> %mask, <64 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v64i8:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 64
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; RV64-NEXT: vle8.v v12, (a0)
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; RV64-NEXT: viota.m v16, v0
+; RV64-NEXT: vrgather.vv v8, v12, v16, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v64i8:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; RV32-NEXT: vle8.v v12, (a0)
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; RV32-NEXT: viota.m v16, v0
+; RV32-NEXT: vrgather.vv v8, v12, v16, v0.t
+; RV32-NEXT: ret
+ %res = call <64 x i8> @llvm.masked.expandload.v64i8(ptr align 1 %base, <64 x i1> %mask, <64 x i8> %passthru)
+ ret <64 x i8> %res
+}
+
+define <128 x i8> @test_expandload_v128i8(ptr %base, <128 x i1> %mask, <128 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v128i8:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 128
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v16, (a0)
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; RV64-NEXT: viota.m v24, v0
+; RV64-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v128i8:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 128
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v16, (a0)
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; RV32-NEXT: viota.m v24, v0
+; RV32-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT: ret
+ %res = call <128 x i8> @llvm.masked.expandload.v128i8(ptr align 1 %base, <128 x i1> %mask, <128 x i8> %passthru)
+ ret <128 x i8> %res
+}
+
+define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v256i8:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 5
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: li a3, 24
+; RV64-NEXT: mul a2, a2, a3
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v7, v8
+; RV64-NEXT: li a2, 128
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v8, (a1)
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v0, 1
+; RV64-NEXT: vmv.x.s a1, v9
+; RV64-NEXT: vmv.x.s a3, v0
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vcpop.m a4, v0
+; RV64-NEXT: vsetvli zero, a4, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v24, (a0)
+; RV64-NEXT: csrr a4, vlenb
+; RV64-NEXT: slli a4, a4, 4
+; RV64-NEXT: add a4, sp, a4
+; RV64-NEXT: addi a4, a4, 16
+; RV64-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vcpop.m a4, v7
+; RV64-NEXT: cpop a3, a3
+; RV64-NEXT: cpop a1, a1
+; RV64-NEXT: add a0, a0, a3
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: vsetvli zero, a4, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; RV64-NEXT: viota.m v16, v0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vv v8, v24, v16, v0.t
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: viota.m v16, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vv v8, v24, v16, v0.t
+; RV64-NEXT: vmv.v.v v16, v8
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 5
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v256i8:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 24
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v7, v8
+; RV32-NEXT: li a2, 128
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v0, 1
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsrl.vx v10, v9, a1
+; RV32-NEXT: vmv.x.s a3, v10
+; RV32-NEXT: vsrl.vx v10, v0, a1
+; RV32-NEXT: vmv.x.s a1, v10
+; RV32-NEXT: vmv.x.s a4, v9
+; RV32-NEXT: vmv.x.s a5, v0
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vcpop.m a6, v0
+; RV32-NEXT: vsetvli zero, a6, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v8, (a0)
+; RV32-NEXT: csrr a6, vlenb
+; RV32-NEXT: slli a6, a6, 4
+; RV32-NEXT: add a6, sp, a6
+; RV32-NEXT: addi a6, a6, 16
+; RV32-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
+; RV32-NEXT: cpop a1, a1
+; RV32-NEXT: cpop a5, a5
+; RV32-NEXT: add a1, a5, a1
+; RV32-NEXT: cpop a3, a3
+; RV32-NEXT: cpop a4, a4
+; RV32-NEXT: add a3, a4, a3
+; RV32-NEXT: add a1, a1, a3
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vcpop.m a1, v7
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v8, (a0)
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; RV32-NEXT: viota.m v16, v0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vrgather.vv v8, v24, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: viota.m v16, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vrgather.vv v16, v24, v8, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+ %res = call <256 x i8> @llvm.masked.expandload.v256i8(ptr align 1 %base, <256 x i1> %mask, <256 x i8> %passthru)
+ ret <256 x i8> %res
+}
+
+declare <1 x i8> @llvm.masked.expandload.v1i8(ptr, <1 x i1>, <1 x i8>)
+declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>)
+declare <4 x i8> @llvm.masked.expandload.v4i8(ptr, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.expandload.v8i8(ptr, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.expandload.v16i8(ptr, <16 x i1>, <16 x i8>)
+declare <32 x i8> @llvm.masked.expandload.v32i8(ptr, <32 x i1>, <32 x i8>)
+declare <64 x i8> @llvm.masked.expandload.v64i8(ptr, <64 x i1>, <64 x i8>)
+declare <128 x i8> @llvm.masked.expandload.v128i8(ptr, <128 x i1>, <128 x i8>)
+declare <256 x i8> @llvm.masked.expandload.v256i8(ptr, <256 x i1>, <256 x i8>)
+
+; Compress + store for i16 type
+
+define <1 x i16> @test_expandload_v1i16(ptr %base, <1 x i1> %mask, <1 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v1i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v1i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr align 2 %base, <1 x i1> %mask, <1 x i16> %passthru)
+ ret <1 x i16> %res
+}
+
+define <2 x i16> @test_expandload_v2i16(ptr %base, <2 x i1> %mask, <2 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v2i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v2i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr align 2 %base, <2 x i1> %mask, <2 x i16> %passthru)
+ ret <2 x i16> %res
+}
+
+define <4 x i16> @test_expandload_v4i16(ptr %base, <4 x i1> %mask, <4 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v4i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v4i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr align 2 %base, <4 x i1> %mask, <4 x i16> %passthru)
+ ret <4 x i16> %res
+}
+
+define <8 x i16> @test_expandload_v8i16(ptr %base, <8 x i1> %mask, <8 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v8i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v8i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr align 2 %base, <8 x i1> %mask, <8 x i16> %passthru)
+ ret <8 x i16> %res
+}
+
+define <16 x i16> @test_expandload_v16i16(ptr %base, <16 x i1> %mask, <16 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v16i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vle16.v v10, (a0)
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu
+; RV64-NEXT: viota.m v12, v0
+; RV64-NEXT: vrgather.vv v8, v10, v12, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v16i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v10, (a0)
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu
+; RV32-NEXT: viota.m v12, v0
+; RV32-NEXT: vrgather.vv v8, v10, v12, v0.t
+; RV32-NEXT: ret
+ %res = call <16 x i16> @llvm.masked.expandload.v16i16(ptr align 2 %base, <16 x i1> %mask, <16 x i16> %passthru)
+ ret <16 x i16> %res
+}
+
+define <32 x i16> @test_expandload_v32i16(ptr %base, <32 x i1> %mask, <32 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v32i16:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV64-NEXT: vle16.v v12, (a0)
+; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; RV64-NEXT: viota.m v16, v0
+; RV64-NEXT: vrgather.vv v8, v12, v16, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v32i16:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV32-NEXT: vle16.v v12, (a0)
+; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; RV32-NEXT: viota.m v16, v0
+; RV32-NEXT: vrgather.vv v8, v12, v16, v0.t
+; RV32-NEXT: ret
+ %res = call <32 x i16> @llvm.masked.expandload.v32i16(ptr align 2 %base, <32 x i1> %mask, <32 x i16> %passthru)
+ ret <32 x i16> %res
+}
+
+define <64 x i16> @test_expandload_v64i16(ptr %base, <64 x i1> %mask, <64 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v64i16:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 64
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV64-NEXT: vle16.v v16, (a0)
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; RV64-NEXT: viota.m v24, v0
+; RV64-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v64i16:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV32-NEXT: vle16.v v16, (a0)
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; RV32-NEXT: viota.m v24, v0
+; RV32-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT: ret
+ %res = call <64 x i16> @llvm.masked.expandload.v64i16(ptr align 2 %base, <64 x i1> %mask, <64 x i16> %passthru)
+ ret <64 x i16> %res
+}
+
+define <128 x i16> @test_expandload_v128i16(ptr %base, <128 x i1> %mask, <128 x i16> %passthru) {
+; RV64-LABEL: test_expandload_v128i16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 40
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: li a1, 64
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV64-NEXT: vle16.v v16, (a0)
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: li a3, 24
+; RV64-NEXT: mul a2, a2, a3
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v7, v0, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT: vcpop.m a2, v7
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vmv.x.s a3, v0
+; RV64-NEXT: cpop a3, a3
+; RV64-NEXT: slli a3, a3, 1
+; RV64-NEXT: add a0, a0, a3
+; RV64-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV64-NEXT: vle16.v v16, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; RV64-NEXT: viota.m v16, v0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: viota.m v16, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 5
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vv v16, v24, v8, v0.t
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 40
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v128i16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 24
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v7, v0, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vcpop.m a2, v7
+; RV32-NEXT: li a3, 32
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vsrl.vx v25, v0, a3
+; RV32-NEXT: vmv.x.s a3, v25
+; RV32-NEXT: cpop a3, a3
+; RV32-NEXT: vmv.x.s a4, v0
+; RV32-NEXT: cpop a4, a4
+; RV32-NEXT: add a3, a4, a3
+; RV32-NEXT: slli a3, a3, 1
+; RV32-NEXT: add a0, a0, a3
+; RV32-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; RV32-NEXT: viota.m v8, v0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: viota.m v16, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT: vmv.v.v v16, v8
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+ %res = call <128 x i16> @llvm.masked.expandload.v128i16(ptr align 2 %base, <128 x i1> %mask, <128 x i16> %passthru)
+ ret <128 x i16> %res
+}
+
+declare <1 x i16> @llvm.masked.expandload.v1i16(ptr, <1 x i1>, <1 x i16>)
+declare <2 x i16> @llvm.masked.expandload.v2i16(ptr, <2 x i1>, <2 x i16>)
+declare <4 x i16> @llvm.masked.expandload.v4i16(ptr, <4 x i1>, <4 x i16>)
+declare <8 x i16> @llvm.masked.expandload.v8i16(ptr, <8 x i1>, <8 x i16>)
+declare <16 x i16> @llvm.masked.expandload.v16i16(ptr, <16 x i1>, <16 x i16>)
+declare <32 x i16> @llvm.masked.expandload.v32i16(ptr, <32 x i1>, <32 x i16>)
+declare <64 x i16> @llvm.masked.expandload.v64i16(ptr, <64 x i1>, <64 x i16>)
+declare <128 x i16> @llvm.masked.expandload.v128i16(ptr, <128 x i1>, <128 x i16>)
+
+; Compress + store for i32 type
+
+define <1 x i32> @test_expandload_v1i32(ptr %base, <1 x i1> %mask, <1 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v1i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vle32.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v1i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vle32.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <1 x i32> @llvm.masked.expandload.v1i32(ptr align 4 %base, <1 x i1> %mask, <1 x i32> %passthru)
+ ret <1 x i32> %res
+}
+
+define <2 x i32> @test_expandload_v2i32(ptr %base, <2 x i1> %mask, <2 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v2i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vle32.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v2i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vle32.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <2 x i32> @llvm.masked.expandload.v2i32(ptr align 4 %base, <2 x i1> %mask, <2 x i32> %passthru)
+ ret <2 x i32> %res
+}
+
+define <4 x i32> @test_expandload_v4i32(ptr %base, <4 x i1> %mask, <4 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v4i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV64-NEXT: vle32.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v4i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV32-NEXT: vle32.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <4 x i32> @llvm.masked.expandload.v4i32(ptr align 4 %base, <4 x i1> %mask, <4 x i32> %passthru)
+ ret <4 x i32> %res
+}
+
+define <8 x i32> @test_expandload_v8i32(ptr %base, <8 x i1> %mask, <8 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v8i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV64-NEXT: vle32.v v10, (a0)
+; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu
+; RV64-NEXT: viota.m v12, v0
+; RV64-NEXT: vrgather.vv v8, v10, v12, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v8i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV32-NEXT: vle32.v v10, (a0)
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT: viota.m v12, v0
+; RV32-NEXT: vrgather.vv v8, v10, v12, v0.t
+; RV32-NEXT: ret
+ %res = call <8 x i32> @llvm.masked.expandload.v8i32(ptr align 4 %base, <8 x i1> %mask, <8 x i32> %passthru)
+ ret <8 x i32> %res
+}
+
+define <16 x i32> @test_expandload_v16i32(ptr %base, <16 x i1> %mask, <16 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v16i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vle32.v v12, (a0)
+; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV64-NEXT: viota.m v16, v0
+; RV64-NEXT: vrgather.vv v8, v12, v16, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v16i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV32-NEXT: vle32.v v12, (a0)
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: viota.m v16, v0
+; RV32-NEXT: vrgather.vv v8, v12, v16, v0.t
+; RV32-NEXT: ret
+ %res = call <16 x i32> @llvm.masked.expandload.v16i32(ptr align 4 %base, <16 x i1> %mask, <16 x i32> %passthru)
+ ret <16 x i32> %res
+}
+
+define <32 x i32> @test_expandload_v32i32(ptr %base, <32 x i1> %mask, <32 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v32i32:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV64-NEXT: vle32.v v16, (a0)
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; RV64-NEXT: viota.m v24, v0
+; RV64-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v32i32:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vle32.v v16, (a0)
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; RV32-NEXT: viota.m v24, v0
+; RV32-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT: ret
+ %res = call <32 x i32> @llvm.masked.expandload.v32i32(ptr align 4 %base, <32 x i1> %mask, <32 x i32> %passthru)
+ ret <32 x i32> %res
+}
+
+define <64 x i32> @test_expandload_v64i32(ptr %base, <64 x i1> %mask, <64 x i32> %passthru) {
+; RV64-LABEL: test_expandload_v64i32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 40
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV64-NEXT: vle32.v v16, (a0)
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: li a3, 24
+; RV64-NEXT: mul a2, a2, a3
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; RV64-NEXT: vslidedown.vi v7, v0, 4
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT: vcpop.m a2, v7
+; RV64-NEXT: vmv.x.s a3, v0
+; RV64-NEXT: cpopw a3, a3
+; RV64-NEXT: slli a3, a3, 2
+; RV64-NEXT: add a0, a0, a3
+; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV64-NEXT: vle32.v v16, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; RV64-NEXT: viota.m v16, v0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: viota.m v16, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 5
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vv v16, v24, v8, v0.t
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 40
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v64i32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vle32.v v16, (a0)
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 24
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v7, v0, 4
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: vcpop.m a2, v7
+; RV32-NEXT: vmv.x.s a3, v0
+; RV32-NEXT: cpop a3, a3
+; RV32-NEXT: slli a3, a3, 2
+; RV32-NEXT: add a0, a0, a3
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vle32.v v16, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; RV32-NEXT: viota.m v16, v0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: viota.m v16, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vrgather.vv v16, v24, v8, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+ %res = call <64 x i32> @llvm.masked.expandload.v64i32(ptr align 4 %base, <64 x i1> %mask, <64 x i32> %passthru)
+ ret <64 x i32> %res
+}
+
+declare <1 x i32> @llvm.masked.expandload.v1i32(ptr, <1 x i1>, <1 x i32>)
+declare <2 x i32> @llvm.masked.expandload.v2i32(ptr, <2 x i1>, <2 x i32>)
+declare <4 x i32> @llvm.masked.expandload.v4i32(ptr, <4 x i1>, <4 x i32>)
+declare <8 x i32> @llvm.masked.expandload.v8i32(ptr, <8 x i1>, <8 x i32>)
+declare <16 x i32> @llvm.masked.expandload.v16i32(ptr, <16 x i1>, <16 x i32>)
+declare <32 x i32> @llvm.masked.expandload.v32i32(ptr, <32 x i1>, <32 x i32>)
+declare <64 x i32> @llvm.masked.expandload.v64i32(ptr, <64 x i1>, <64 x i32>)
+
+; Compress + store for i64 type
+
+define <1 x i64> @test_expandload_v1i64(ptr %base, <1 x i1> %mask, <1 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v1i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vle64.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v1i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vle64.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <1 x i64> @llvm.masked.expandload.v1i64(ptr align 8 %base, <1 x i1> %mask, <1 x i64> %passthru)
+ ret <1 x i64> %res
+}
+
+define <2 x i64> @test_expandload_v2i64(ptr %base, <2 x i1> %mask, <2 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vle64.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vle64.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <2 x i64> @llvm.masked.expandload.v2i64(ptr align 8 %base, <2 x i1> %mask, <2 x i64> %passthru)
+ ret <2 x i64> %res
+}
+
+define <4 x i64> @test_expandload_v4i64(ptr %base, <4 x i1> %mask, <4 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v10, (a0)
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT: viota.m v12, v0
+; RV64-NEXT: vrgather.vv v8, v10, v12, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV32-NEXT: vle64.v v10, (a0)
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT: viota.m v12, v0
+; RV32-NEXT: vrgather.vv v8, v10, v12, v0.t
+; RV32-NEXT: ret
+ %res = call <4 x i64> @llvm.masked.expandload.v4i64(ptr align 8 %base, <4 x i1> %mask, <4 x i64> %passthru)
+ ret <4 x i64> %res
+}
+
+define <8 x i64> @test_expandload_v8i64(ptr %base, <8 x i1> %mask, <8 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vle64.v v12, (a0)
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: viota.m v16, v0
+; RV64-NEXT: vrgather.vv v8, v12, v16, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV32-NEXT: vle64.v v12, (a0)
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT: viota.m v16, v0
+; RV32-NEXT: vrgather.vv v8, v12, v16, v0.t
+; RV32-NEXT: ret
+ %res = call <8 x i64> @llvm.masked.expandload.v8i64(ptr align 8 %base, <8 x i1> %mask, <8 x i64> %passthru)
+ ret <8 x i64> %res
+}
+
+define <16 x i64> @test_expandload_v16i64(ptr %base, <16 x i1> %mask, <16 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v16, (a0)
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT: viota.m v24, v0
+; RV64-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v16, (a0)
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT: viota.m v24, v0
+; RV32-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT: ret
+ %res = call <16 x i64> @llvm.masked.expandload.v16i64(ptr align 8 %base, <16 x i1> %mask, <16 x i64> %passthru)
+ ret <16 x i64> %res
+}
+
+define <32 x i64> @test_expandload_v32i64(ptr %base, <32 x i1> %mask, <32 x i64> %passthru) {
+; RV64-LABEL: test_expandload_v32i64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 40
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vle64.v v16, (a0)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 24
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vmv.x.s a1, v0
+; RV64-NEXT: zext.h a1, a1
+; RV64-NEXT: cpopw a1, a1
+; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v7, v0, 2
+; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT: vcpop.m a1, v7
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v16, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT: viota.m v16, v0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: viota.m v16, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 5
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vv v16, v24, v8, v0.t
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 40
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v32i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vle64.v v16, (a0)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 24
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vmv.x.s a1, v0
+; RV32-NEXT: zext.h a1, a1
+; RV32-NEXT: cpop a1, a1
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v7, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT: vcpop.m a1, v7
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v16, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT: viota.m v16, v0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: viota.m v16, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vrgather.vv v16, v24, v8, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+ %res = call <32 x i64> @llvm.masked.expandload.v32i64(ptr align 8 %base, <32 x i1> %mask, <32 x i64> %passthru)
+ ret <32 x i64> %res
+}
+
+declare <1 x i64> @llvm.masked.expandload.v1i64(ptr, <1 x i1>, <1 x i64>)
+declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>)
+declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>)
+declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>)
+declare <16 x i64> @llvm.masked.expandload.v16i64(ptr, <16 x i1>, <16 x i64>)
+declare <32 x i64> @llvm.masked.expandload.v32i64(ptr, <32 x i1>, <32 x i64>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
index 8b31166e313de..b532507d32e3d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll
@@ -6,24 +6,24 @@ declare <1 x half> @llvm.masked.expandload.v1f16(ptr, <1 x i1>, <1 x half>)
define <1 x half> @expandload_v1f16(ptr %base, <1 x half> %src0, <1 x i1> %mask) {
; RV32-LABEL: expandload_v1f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB0_2
-; RV32-NEXT: # %bb.1: # %cond.load
-; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: .LBB0_2: # %else
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v1f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB0_2
-; RV64-NEXT: # %bb.1: # %cond.load
-; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: .LBB0_2: # %else
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV64-NEXT: ret
%res = call <1 x half> @llvm.masked.expandload.v1f16(ptr align 2 %base, <1 x i1> %mask, <1 x half> %src0)
ret <1 x half>%res
@@ -33,54 +33,24 @@ declare <2 x half> @llvm.masked.expandload.v2f16(ptr, <2 x i1>, <2 x half>)
define <2 x half> @expandload_v2f16(ptr %base, <2 x half> %src0, <2 x i1> %mask) {
; RV32-LABEL: expandload_v2f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB1_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB1_4
-; RV32-NEXT: .LBB1_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB1_3: # %cond.load
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, ma
-; RV32-NEXT: vfmv.s.f v8, fa5
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB1_2
-; RV32-NEXT: .LBB1_4: # %cond.load1
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; RV32-NEXT: vslideup.vi v8, v9, 1
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v2f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB1_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB1_4
-; RV64-NEXT: .LBB1_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB1_3: # %cond.load
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, ma
-; RV64-NEXT: vfmv.s.f v8, fa5
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB1_2
-; RV64-NEXT: .LBB1_4: # %cond.load1
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; RV64-NEXT: vslideup.vi v8, v9, 1
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV64-NEXT: ret
%res = call <2 x half> @llvm.masked.expandload.v2f16(ptr align 2 %base, <2 x i1> %mask, <2 x half> %src0)
ret <2 x half>%res
@@ -90,98 +60,24 @@ declare <4 x half> @llvm.masked.expandload.v4f16(ptr, <4 x i1>, <4 x half>)
define <4 x half> @expandload_v4f16(ptr %base, <4 x half> %src0, <4 x i1> %mask) {
; RV32-LABEL: expandload_v4f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB2_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB2_6
-; RV32-NEXT: .LBB2_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB2_7
-; RV32-NEXT: .LBB2_3: # %else6
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB2_8
-; RV32-NEXT: .LBB2_4: # %else10
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB2_5: # %cond.load
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, ma
-; RV32-NEXT: vfmv.s.f v8, fa5
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB2_2
-; RV32-NEXT: .LBB2_6: # %cond.load1
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
-; RV32-NEXT: vslideup.vi v8, v9, 1
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB2_3
-; RV32-NEXT: .LBB2_7: # %cond.load5
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 3, e16, mf2, tu, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vslideup.vi v8, v9, 2
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB2_4
-; RV32-NEXT: .LBB2_8: # %cond.load9
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vslideup.vi v8, v9, 3
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v4f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB2_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB2_6
-; RV64-NEXT: .LBB2_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB2_7
-; RV64-NEXT: .LBB2_3: # %else6
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB2_8
-; RV64-NEXT: .LBB2_4: # %else10
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB2_5: # %cond.load
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, ma
-; RV64-NEXT: vfmv.s.f v8, fa5
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB2_2
-; RV64-NEXT: .LBB2_6: # %cond.load1
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
-; RV64-NEXT: vslideup.vi v8, v9, 1
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB2_3
-; RV64-NEXT: .LBB2_7: # %cond.load5
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 3, e16, mf2, tu, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vslideup.vi v8, v9, 2
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB2_4
-; RV64-NEXT: .LBB2_8: # %cond.load9
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vslideup.vi v8, v9, 3
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV64-NEXT: ret
%res = call <4 x half> @llvm.masked.expandload.v4f16(ptr align 2 %base, <4 x i1> %mask, <4 x half> %src0)
ret <4 x half>%res
@@ -191,186 +87,24 @@ declare <8 x half> @llvm.masked.expandload.v8f16(ptr, <8 x i1>, <8 x half>)
define <8 x half> @expandload_v8f16(ptr %base, <8 x half> %src0, <8 x i1> %mask) {
; RV32-LABEL: expandload_v8f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB3_9
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB3_10
-; RV32-NEXT: .LBB3_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB3_11
-; RV32-NEXT: .LBB3_3: # %else6
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB3_12
-; RV32-NEXT: .LBB3_4: # %else10
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB3_13
-; RV32-NEXT: .LBB3_5: # %else14
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB3_14
-; RV32-NEXT: .LBB3_6: # %else18
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB3_15
-; RV32-NEXT: .LBB3_7: # %else22
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB3_16
-; RV32-NEXT: .LBB3_8: # %else26
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB3_9: # %cond.load
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, ma
-; RV32-NEXT: vfmv.s.f v8, fa5
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB3_2
-; RV32-NEXT: .LBB3_10: # %cond.load1
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vsetivli zero, 2, e16, m1, tu, ma
-; RV32-NEXT: vslideup.vi v8, v9, 1
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB3_3
-; RV32-NEXT: .LBB3_11: # %cond.load5
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 3, e16, m1, tu, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vslideup.vi v8, v9, 2
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB3_4
-; RV32-NEXT: .LBB3_12: # %cond.load9
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 4, e16, m1, tu, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vslideup.vi v8, v9, 3
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: beqz a2, .LBB3_5
-; RV32-NEXT: .LBB3_13: # %cond.load13
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 5, e16, m1, tu, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vslideup.vi v8, v9, 4
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB3_6
-; RV32-NEXT: .LBB3_14: # %cond.load17
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 6, e16, m1, tu, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vslideup.vi v8, v9, 5
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB3_7
-; RV32-NEXT: .LBB3_15: # %cond.load21
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vslideup.vi v8, v9, 6
-; RV32-NEXT: addi a0, a0, 2
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB3_8
-; RV32-NEXT: .LBB3_16: # %cond.load25
-; RV32-NEXT: flh fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vslideup.vi v8, v9, 7
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v8f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB3_9
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB3_10
-; RV64-NEXT: .LBB3_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB3_11
-; RV64-NEXT: .LBB3_3: # %else6
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB3_12
-; RV64-NEXT: .LBB3_4: # %else10
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB3_13
-; RV64-NEXT: .LBB3_5: # %else14
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB3_14
-; RV64-NEXT: .LBB3_6: # %else18
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB3_15
-; RV64-NEXT: .LBB3_7: # %else22
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB3_16
-; RV64-NEXT: .LBB3_8: # %else26
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB3_9: # %cond.load
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, ma
-; RV64-NEXT: vfmv.s.f v8, fa5
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB3_2
-; RV64-NEXT: .LBB3_10: # %cond.load1
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vsetivli zero, 2, e16, m1, tu, ma
-; RV64-NEXT: vslideup.vi v8, v9, 1
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB3_3
-; RV64-NEXT: .LBB3_11: # %cond.load5
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 3, e16, m1, tu, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vslideup.vi v8, v9, 2
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB3_4
-; RV64-NEXT: .LBB3_12: # %cond.load9
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 4, e16, m1, tu, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vslideup.vi v8, v9, 3
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: beqz a2, .LBB3_5
-; RV64-NEXT: .LBB3_13: # %cond.load13
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 5, e16, m1, tu, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vslideup.vi v8, v9, 4
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB3_6
-; RV64-NEXT: .LBB3_14: # %cond.load17
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 6, e16, m1, tu, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vslideup.vi v8, v9, 5
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB3_7
-; RV64-NEXT: .LBB3_15: # %cond.load21
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vslideup.vi v8, v9, 6
-; RV64-NEXT: addi a0, a0, 2
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB3_8
-; RV64-NEXT: .LBB3_16: # %cond.load25
-; RV64-NEXT: flh fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vslideup.vi v8, v9, 7
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV64-NEXT: ret
%res = call <8 x half> @llvm.masked.expandload.v8f16(ptr align 2 %base, <8 x i1> %mask, <8 x half> %src0)
ret <8 x half>%res
@@ -380,24 +114,24 @@ declare <1 x float> @llvm.masked.expandload.v1f32(ptr, <1 x i1>, <1 x float>)
define <1 x float> @expandload_v1f32(ptr %base, <1 x float> %src0, <1 x i1> %mask) {
; RV32-LABEL: expandload_v1f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB4_2
-; RV32-NEXT: # %bb.1: # %cond.load
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV32-NEXT: vle32.v v8, (a0)
-; RV32-NEXT: .LBB4_2: # %else
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vle32.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v1f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB4_2
-; RV64-NEXT: # %bb.1: # %cond.load
-; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT: vle32.v v8, (a0)
-; RV64-NEXT: .LBB4_2: # %else
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vle32.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV64-NEXT: ret
%res = call <1 x float> @llvm.masked.expandload.v1f32(ptr align 4 %base, <1 x i1> %mask, <1 x float> %src0)
ret <1 x float>%res
@@ -407,54 +141,24 @@ declare <2 x float> @llvm.masked.expandload.v2f32(ptr, <2 x i1>, <2 x float>)
define <2 x float> @expandload_v2f32(ptr %base, <2 x float> %src0, <2 x i1> %mask) {
; RV32-LABEL: expandload_v2f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB5_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB5_4
-; RV32-NEXT: .LBB5_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB5_3: # %cond.load
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, ma
-; RV32-NEXT: vfmv.s.f v8, fa5
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB5_2
-; RV32-NEXT: .LBB5_4: # %cond.load1
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; RV32-NEXT: vslideup.vi v8, v9, 1
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT: vle32.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v2f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB5_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB5_4
-; RV64-NEXT: .LBB5_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB5_3: # %cond.load
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, ma
-; RV64-NEXT: vfmv.s.f v8, fa5
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB5_2
-; RV64-NEXT: .LBB5_4: # %cond.load1
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; RV64-NEXT: vslideup.vi v8, v9, 1
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT: vle32.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV64-NEXT: ret
%res = call <2 x float> @llvm.masked.expandload.v2f32(ptr align 4 %base, <2 x i1> %mask, <2 x float> %src0)
ret <2 x float>%res
@@ -464,98 +168,24 @@ declare <4 x float> @llvm.masked.expandload.v4f32(ptr, <4 x i1>, <4 x float>)
define <4 x float> @expandload_v4f32(ptr %base, <4 x float> %src0, <4 x i1> %mask) {
; RV32-LABEL: expandload_v4f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB6_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB6_6
-; RV32-NEXT: .LBB6_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB6_7
-; RV32-NEXT: .LBB6_3: # %else6
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB6_8
-; RV32-NEXT: .LBB6_4: # %else10
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB6_5: # %cond.load
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, ma
-; RV32-NEXT: vfmv.s.f v8, fa5
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB6_2
-; RV32-NEXT: .LBB6_6: # %cond.load1
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT: vslideup.vi v8, v9, 1
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB6_3
-; RV32-NEXT: .LBB6_7: # %cond.load5
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vslideup.vi v8, v9, 2
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB6_4
-; RV32-NEXT: .LBB6_8: # %cond.load9
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vslideup.vi v8, v9, 3
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV32-NEXT: vle32.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v4f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB6_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB6_6
-; RV64-NEXT: .LBB6_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB6_7
-; RV64-NEXT: .LBB6_3: # %else6
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB6_8
-; RV64-NEXT: .LBB6_4: # %else10
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB6_5: # %cond.load
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, ma
-; RV64-NEXT: vfmv.s.f v8, fa5
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB6_2
-; RV64-NEXT: .LBB6_6: # %cond.load1
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; RV64-NEXT: vslideup.vi v8, v9, 1
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB6_3
-; RV64-NEXT: .LBB6_7: # %cond.load5
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vslideup.vi v8, v9, 2
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB6_4
-; RV64-NEXT: .LBB6_8: # %cond.load9
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vslideup.vi v8, v9, 3
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; RV64-NEXT: vle32.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV64-NEXT: ret
%res = call <4 x float> @llvm.masked.expandload.v4f32(ptr align 4 %base, <4 x i1> %mask, <4 x float> %src0)
ret <4 x float>%res
@@ -565,186 +195,24 @@ declare <8 x float> @llvm.masked.expandload.v8f32(ptr, <8 x i1>, <8 x float>)
define <8 x float> @expandload_v8f32(ptr %base, <8 x float> %src0, <8 x i1> %mask) {
; RV32-LABEL: expandload_v8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB7_9
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB7_10
-; RV32-NEXT: .LBB7_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB7_11
-; RV32-NEXT: .LBB7_3: # %else6
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB7_12
-; RV32-NEXT: .LBB7_4: # %else10
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB7_13
-; RV32-NEXT: .LBB7_5: # %else14
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB7_14
-; RV32-NEXT: .LBB7_6: # %else18
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB7_15
-; RV32-NEXT: .LBB7_7: # %else22
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB7_16
-; RV32-NEXT: .LBB7_8: # %else26
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB7_9: # %cond.load
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, ma
-; RV32-NEXT: vfmv.s.f v8, fa5
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB7_2
-; RV32-NEXT: .LBB7_10: # %cond.load1
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT: vfmv.s.f v10, fa5
-; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT: vslideup.vi v8, v10, 1
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB7_3
-; RV32-NEXT: .LBB7_11: # %cond.load5
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; RV32-NEXT: vfmv.s.f v10, fa5
-; RV32-NEXT: vslideup.vi v8, v10, 2
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB7_4
-; RV32-NEXT: .LBB7_12: # %cond.load9
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 4, e32, m1, tu, ma
-; RV32-NEXT: vfmv.s.f v10, fa5
-; RV32-NEXT: vslideup.vi v8, v10, 3
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: beqz a2, .LBB7_5
-; RV32-NEXT: .LBB7_13: # %cond.load13
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 5, e32, m2, tu, ma
-; RV32-NEXT: vfmv.s.f v10, fa5
-; RV32-NEXT: vslideup.vi v8, v10, 4
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB7_6
-; RV32-NEXT: .LBB7_14: # %cond.load17
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; RV32-NEXT: vfmv.s.f v10, fa5
-; RV32-NEXT: vslideup.vi v8, v10, 5
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB7_7
-; RV32-NEXT: .LBB7_15: # %cond.load21
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV32-NEXT: vfmv.s.f v10, fa5
-; RV32-NEXT: vslideup.vi v8, v10, 6
-; RV32-NEXT: addi a0, a0, 4
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB7_8
-; RV32-NEXT: .LBB7_16: # %cond.load25
-; RV32-NEXT: flw fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vfmv.s.f v10, fa5
-; RV32-NEXT: vslideup.vi v8, v10, 7
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV32-NEXT: vle32.v v10, (a0)
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT: viota.m v12, v0
+; RV32-NEXT: vrgather.vv v8, v10, v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB7_9
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB7_10
-; RV64-NEXT: .LBB7_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB7_11
-; RV64-NEXT: .LBB7_3: # %else6
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB7_12
-; RV64-NEXT: .LBB7_4: # %else10
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB7_13
-; RV64-NEXT: .LBB7_5: # %else14
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB7_14
-; RV64-NEXT: .LBB7_6: # %else18
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB7_15
-; RV64-NEXT: .LBB7_7: # %else22
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB7_16
-; RV64-NEXT: .LBB7_8: # %else26
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB7_9: # %cond.load
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, ma
-; RV64-NEXT: vfmv.s.f v8, fa5
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB7_2
-; RV64-NEXT: .LBB7_10: # %cond.load1
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT: vfmv.s.f v10, fa5
-; RV64-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; RV64-NEXT: vslideup.vi v8, v10, 1
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB7_3
-; RV64-NEXT: .LBB7_11: # %cond.load5
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; RV64-NEXT: vfmv.s.f v10, fa5
-; RV64-NEXT: vslideup.vi v8, v10, 2
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB7_4
-; RV64-NEXT: .LBB7_12: # %cond.load9
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 4, e32, m1, tu, ma
-; RV64-NEXT: vfmv.s.f v10, fa5
-; RV64-NEXT: vslideup.vi v8, v10, 3
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: beqz a2, .LBB7_5
-; RV64-NEXT: .LBB7_13: # %cond.load13
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 5, e32, m2, tu, ma
-; RV64-NEXT: vfmv.s.f v10, fa5
-; RV64-NEXT: vslideup.vi v8, v10, 4
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB7_6
-; RV64-NEXT: .LBB7_14: # %cond.load17
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; RV64-NEXT: vfmv.s.f v10, fa5
-; RV64-NEXT: vslideup.vi v8, v10, 5
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB7_7
-; RV64-NEXT: .LBB7_15: # %cond.load21
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64-NEXT: vfmv.s.f v10, fa5
-; RV64-NEXT: vslideup.vi v8, v10, 6
-; RV64-NEXT: addi a0, a0, 4
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB7_8
-; RV64-NEXT: .LBB7_16: # %cond.load25
-; RV64-NEXT: flw fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64-NEXT: vfmv.s.f v10, fa5
-; RV64-NEXT: vslideup.vi v8, v10, 7
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; RV64-NEXT: vle32.v v10, (a0)
+; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu
+; RV64-NEXT: viota.m v12, v0
+; RV64-NEXT: vrgather.vv v8, v10, v12, v0.t
; RV64-NEXT: ret
%res = call <8 x float> @llvm.masked.expandload.v8f32(ptr align 4 %base, <8 x i1> %mask, <8 x float> %src0)
ret <8 x float>%res
@@ -754,24 +222,24 @@ declare <1 x double> @llvm.masked.expandload.v1f64(ptr, <1 x i1>, <1 x double>)
define <1 x double> @expandload_v1f64(ptr %base, <1 x double> %src0, <1 x i1> %mask) {
; RV32-LABEL: expandload_v1f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB8_2
-; RV32-NEXT: # %bb.1: # %cond.load
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vle64.v v8, (a0)
-; RV32-NEXT: .LBB8_2: # %else
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vle64.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v1f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB8_2
-; RV64-NEXT: # %bb.1: # %cond.load
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: .LBB8_2: # %else
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vle64.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV64-NEXT: ret
%res = call <1 x double> @llvm.masked.expandload.v1f64(ptr align 8 %base, <1 x i1> %mask, <1 x double> %src0)
ret <1 x double>%res
@@ -781,54 +249,24 @@ declare <2 x double> @llvm.masked.expandload.v2f64(ptr, <2 x i1>, <2 x double>)
define <2 x double> @expandload_v2f64(ptr %base, <2 x double> %src0, <2 x i1> %mask) {
; RV32-LABEL: expandload_v2f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB9_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB9_4
-; RV32-NEXT: .LBB9_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB9_3: # %cond.load
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma
-; RV32-NEXT: vfmv.s.f v8, fa5
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB9_2
-; RV32-NEXT: .LBB9_4: # %cond.load1
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vfmv.s.f v9, fa5
-; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vslideup.vi v8, v9, 1
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vle64.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v2f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB9_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB9_4
-; RV64-NEXT: .LBB9_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB9_3: # %cond.load
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT: vfmv.s.f v8, fa5
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB9_2
-; RV64-NEXT: .LBB9_4: # %cond.load1
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vfmv.s.f v9, fa5
-; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT: vslideup.vi v8, v9, 1
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vle64.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
; RV64-NEXT: ret
%res = call <2 x double> @llvm.masked.expandload.v2f64(ptr align 8 %base, <2 x i1> %mask, <2 x double> %src0)
ret <2 x double>%res
@@ -838,98 +276,24 @@ declare <4 x double> @llvm.masked.expandload.v4f64(ptr, <4 x i1>, <4 x double>)
define <4 x double> @expandload_v4f64(ptr %base, <4 x double> %src0, <4 x i1> %mask) {
; RV32-LABEL: expandload_v4f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB10_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB10_6
-; RV32-NEXT: .LBB10_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB10_7
-; RV32-NEXT: .LBB10_3: # %else6
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB10_8
-; RV32-NEXT: .LBB10_4: # %else10
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB10_5: # %cond.load
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma
-; RV32-NEXT: vfmv.s.f v8, fa5
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB10_2
-; RV32-NEXT: .LBB10_6: # %cond.load1
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vfmv.s.f v10, fa5
-; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, ma
-; RV32-NEXT: vslideup.vi v8, v10, 1
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB10_3
-; RV32-NEXT: .LBB10_7: # %cond.load5
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; RV32-NEXT: vfmv.s.f v10, fa5
-; RV32-NEXT: vslideup.vi v8, v10, 2
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB10_4
-; RV32-NEXT: .LBB10_8: # %cond.load9
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vfmv.s.f v10, fa5
-; RV32-NEXT: vslideup.vi v8, v10, 3
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV32-NEXT: vle64.v v10, (a0)
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT: viota.m v12, v0
+; RV32-NEXT: vrgather.vv v8, v10, v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v4f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB10_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB10_6
-; RV64-NEXT: .LBB10_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB10_7
-; RV64-NEXT: .LBB10_3: # %else6
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB10_8
-; RV64-NEXT: .LBB10_4: # %else10
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB10_5: # %cond.load
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT: vfmv.s.f v8, fa5
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB10_2
-; RV64-NEXT: .LBB10_6: # %cond.load1
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vfmv.s.f v10, fa5
-; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
-; RV64-NEXT: vslideup.vi v8, v10, 1
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB10_3
-; RV64-NEXT: .LBB10_7: # %cond.load5
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; RV64-NEXT: vfmv.s.f v10, fa5
-; RV64-NEXT: vslideup.vi v8, v10, 2
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB10_4
-; RV64-NEXT: .LBB10_8: # %cond.load9
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vfmv.s.f v10, fa5
-; RV64-NEXT: vslideup.vi v8, v10, 3
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v10, (a0)
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT: viota.m v12, v0
+; RV64-NEXT: vrgather.vv v8, v10, v12, v0.t
; RV64-NEXT: ret
%res = call <4 x double> @llvm.masked.expandload.v4f64(ptr align 8 %base, <4 x i1> %mask, <4 x double> %src0)
ret <4 x double>%res
@@ -939,186 +303,24 @@ declare <8 x double> @llvm.masked.expandload.v8f64(ptr, <8 x i1>, <8 x double>)
define <8 x double> @expandload_v8f64(ptr %base, <8 x double> %src0, <8 x i1> %mask) {
; RV32-LABEL: expandload_v8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB11_9
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB11_10
-; RV32-NEXT: .LBB11_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB11_11
-; RV32-NEXT: .LBB11_3: # %else6
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB11_12
-; RV32-NEXT: .LBB11_4: # %else10
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB11_13
-; RV32-NEXT: .LBB11_5: # %else14
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB11_14
-; RV32-NEXT: .LBB11_6: # %else18
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB11_15
-; RV32-NEXT: .LBB11_7: # %else22
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB11_16
-; RV32-NEXT: .LBB11_8: # %else26
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB11_9: # %cond.load
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma
-; RV32-NEXT: vfmv.s.f v8, fa5
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB11_2
-; RV32-NEXT: .LBB11_10: # %cond.load1
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vfmv.s.f v12, fa5
-; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, ma
-; RV32-NEXT: vslideup.vi v8, v12, 1
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB11_3
-; RV32-NEXT: .LBB11_11: # %cond.load5
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; RV32-NEXT: vfmv.s.f v12, fa5
-; RV32-NEXT: vslideup.vi v8, v12, 2
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB11_4
-; RV32-NEXT: .LBB11_12: # %cond.load9
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, ma
-; RV32-NEXT: vfmv.s.f v12, fa5
-; RV32-NEXT: vslideup.vi v8, v12, 3
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: beqz a2, .LBB11_5
-; RV32-NEXT: .LBB11_13: # %cond.load13
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV32-NEXT: vfmv.s.f v12, fa5
-; RV32-NEXT: vslideup.vi v8, v12, 4
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB11_6
-; RV32-NEXT: .LBB11_14: # %cond.load17
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 6, e64, m4, tu, ma
-; RV32-NEXT: vfmv.s.f v12, fa5
-; RV32-NEXT: vslideup.vi v8, v12, 5
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB11_7
-; RV32-NEXT: .LBB11_15: # %cond.load21
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 7, e64, m4, tu, ma
-; RV32-NEXT: vfmv.s.f v12, fa5
-; RV32-NEXT: vslideup.vi v8, v12, 6
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB11_8
-; RV32-NEXT: .LBB11_16: # %cond.load25
-; RV32-NEXT: fld fa5, 0(a0)
-; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vfmv.s.f v12, fa5
-; RV32-NEXT: vslideup.vi v8, v12, 7
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV32-NEXT: vle64.v v12, (a0)
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT: viota.m v16, v0
+; RV32-NEXT: vrgather.vv v8, v12, v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB11_9
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB11_10
-; RV64-NEXT: .LBB11_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB11_11
-; RV64-NEXT: .LBB11_3: # %else6
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB11_12
-; RV64-NEXT: .LBB11_4: # %else10
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB11_13
-; RV64-NEXT: .LBB11_5: # %else14
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB11_14
-; RV64-NEXT: .LBB11_6: # %else18
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB11_15
-; RV64-NEXT: .LBB11_7: # %else22
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB11_16
-; RV64-NEXT: .LBB11_8: # %else26
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB11_9: # %cond.load
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT: vfmv.s.f v8, fa5
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB11_2
-; RV64-NEXT: .LBB11_10: # %cond.load1
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vfmv.s.f v12, fa5
-; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
-; RV64-NEXT: vslideup.vi v8, v12, 1
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB11_3
-; RV64-NEXT: .LBB11_11: # %cond.load5
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; RV64-NEXT: vfmv.s.f v12, fa5
-; RV64-NEXT: vslideup.vi v8, v12, 2
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB11_4
-; RV64-NEXT: .LBB11_12: # %cond.load9
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 4, e64, m2, tu, ma
-; RV64-NEXT: vfmv.s.f v12, fa5
-; RV64-NEXT: vslideup.vi v8, v12, 3
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: beqz a2, .LBB11_5
-; RV64-NEXT: .LBB11_13: # %cond.load13
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT: vfmv.s.f v12, fa5
-; RV64-NEXT: vslideup.vi v8, v12, 4
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB11_6
-; RV64-NEXT: .LBB11_14: # %cond.load17
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma
-; RV64-NEXT: vfmv.s.f v12, fa5
-; RV64-NEXT: vslideup.vi v8, v12, 5
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB11_7
-; RV64-NEXT: .LBB11_15: # %cond.load21
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 7, e64, m4, tu, ma
-; RV64-NEXT: vfmv.s.f v12, fa5
-; RV64-NEXT: vslideup.vi v8, v12, 6
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB11_8
-; RV64-NEXT: .LBB11_16: # %cond.load25
-; RV64-NEXT: fld fa5, 0(a0)
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vfmv.s.f v12, fa5
-; RV64-NEXT: vslideup.vi v8, v12, 7
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vle64.v v12, (a0)
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: viota.m v16, v0
+; RV64-NEXT: vrgather.vv v8, v12, v16, v0.t
; RV64-NEXT: ret
%res = call <8 x double> @llvm.masked.expandload.v8f64(ptr align 8 %base, <8 x i1> %mask, <8 x double> %src0)
ret <8 x double>%res
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
index 5bf8b07efc1da..bba21fc97ee45 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll
@@ -6,13 +6,13 @@ declare <1 x i8> @llvm.masked.expandload.v1i8(ptr, <1 x i1>, <1 x i8>)
define <1 x i8> @expandload_v1i8(ptr %base, <1 x i8> %src0, <1 x i1> %mask) {
; CHECK-LABEL: expandload_v1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vfirst.m a1, v0
-; CHECK-NEXT: bnez a1, .LBB0_2
-; CHECK-NEXT: # %bb.1: # %cond.load
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: .LBB0_2: # %else
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = call <1 x i8> @llvm.masked.expandload.v1i8(ptr %base, <1 x i1> %mask, <1 x i8> %src0)
ret <1 x i8>%res
@@ -22,28 +22,13 @@ declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>)
define <2 x i8> @expandload_v2i8(ptr %base, <2 x i8> %src0, <2 x i1> %mask) {
; CHECK-LABEL: expandload_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB1_3
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: bnez a1, .LBB1_4
-; CHECK-NEXT: .LBB1_2: # %else2
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB1_3: # %cond.load
-; CHECK-NEXT: lbu a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, ma
-; CHECK-NEXT: vmv.s.x v8, a2
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: beqz a1, .LBB1_2
-; CHECK-NEXT: .LBB1_4: # %cond.load1
-; CHECK-NEXT: lbu a0, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 1
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = call <2 x i8> @llvm.masked.expandload.v2i8(ptr %base, <2 x i1> %mask, <2 x i8> %src0)
ret <2 x i8>%res
@@ -53,50 +38,13 @@ declare <4 x i8> @llvm.masked.expandload.v4i8(ptr, <4 x i1>, <4 x i8>)
define <4 x i8> @expandload_v4i8(ptr %base, <4 x i8> %src0, <4 x i1> %mask) {
; CHECK-LABEL: expandload_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB2_5
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB2_6
-; CHECK-NEXT: .LBB2_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB2_7
-; CHECK-NEXT: .LBB2_3: # %else6
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: bnez a1, .LBB2_8
-; CHECK-NEXT: .LBB2_4: # %else10
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB2_5: # %cond.load
-; CHECK-NEXT: lbu a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, ma
-; CHECK-NEXT: vmv.s.x v8, a2
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB2_2
-; CHECK-NEXT: .LBB2_6: # %cond.load1
-; CHECK-NEXT: lbu a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 1
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB2_3
-; CHECK-NEXT: .LBB2_7: # %cond.load5
-; CHECK-NEXT: lbu a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 2
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: beqz a1, .LBB2_4
-; CHECK-NEXT: .LBB2_8: # %cond.load9
-; CHECK-NEXT: lbu a0, 0(a0)
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vslideup.vi v8, v9, 3
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = call <4 x i8> @llvm.masked.expandload.v4i8(ptr %base, <4 x i1> %mask, <4 x i8> %src0)
ret <4 x i8>%res
@@ -106,94 +54,13 @@ declare <8 x i8> @llvm.masked.expandload.v8i8(ptr, <8 x i1>, <8 x i8>)
define <8 x i8> @expandload_v8i8(ptr %base, <8 x i8> %src0, <8 x i1> %mask) {
; CHECK-LABEL: expandload_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB3_9
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB3_10
-; CHECK-NEXT: .LBB3_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB3_11
-; CHECK-NEXT: .LBB3_3: # %else6
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: bnez a2, .LBB3_12
-; CHECK-NEXT: .LBB3_4: # %else10
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: bnez a2, .LBB3_13
-; CHECK-NEXT: .LBB3_5: # %else14
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: bnez a2, .LBB3_14
-; CHECK-NEXT: .LBB3_6: # %else18
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: bnez a2, .LBB3_15
-; CHECK-NEXT: .LBB3_7: # %else22
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: bnez a1, .LBB3_16
-; CHECK-NEXT: .LBB3_8: # %else26
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB3_9: # %cond.load
-; CHECK-NEXT: lbu a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, ma
-; CHECK-NEXT: vmv.s.x v8, a2
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB3_2
-; CHECK-NEXT: .LBB3_10: # %cond.load1
-; CHECK-NEXT: lbu a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 1
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB3_3
-; CHECK-NEXT: .LBB3_11: # %cond.load5
-; CHECK-NEXT: lbu a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 2
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: beqz a2, .LBB3_4
-; CHECK-NEXT: .LBB3_12: # %cond.load9
-; CHECK-NEXT: lbu a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 3
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: beqz a2, .LBB3_5
-; CHECK-NEXT: .LBB3_13: # %cond.load13
-; CHECK-NEXT: lbu a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 5, e8, mf2, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 4
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: beqz a2, .LBB3_6
-; CHECK-NEXT: .LBB3_14: # %cond.load17
-; CHECK-NEXT: lbu a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 5
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: beqz a2, .LBB3_7
-; CHECK-NEXT: .LBB3_15: # %cond.load21
-; CHECK-NEXT: lbu a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 7, e8, mf2, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 6
-; CHECK-NEXT: addi a0, a0, 1
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: beqz a1, .LBB3_8
-; CHECK-NEXT: .LBB3_16: # %cond.load25
-; CHECK-NEXT: lbu a0, 0(a0)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vslideup.vi v8, v9, 7
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = call <8 x i8> @llvm.masked.expandload.v8i8(ptr %base, <8 x i1> %mask, <8 x i8> %src0)
ret <8 x i8>%res
@@ -203,13 +70,13 @@ declare <1 x i16> @llvm.masked.expandload.v1i16(ptr, <1 x i1>, <1 x i16>)
define <1 x i16> @expandload_v1i16(ptr %base, <1 x i16> %src0, <1 x i1> %mask) {
; CHECK-LABEL: expandload_v1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vfirst.m a1, v0
-; CHECK-NEXT: bnez a1, .LBB4_2
-; CHECK-NEXT: # %bb.1: # %cond.load
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: .LBB4_2: # %else
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr align 2 %base, <1 x i1> %mask, <1 x i16> %src0)
ret <1 x i16>%res
@@ -219,28 +86,13 @@ declare <2 x i16> @llvm.masked.expandload.v2i16(ptr, <2 x i1>, <2 x i16>)
define <2 x i16> @expandload_v2i16(ptr %base, <2 x i16> %src0, <2 x i1> %mask) {
; CHECK-LABEL: expandload_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB5_3
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: bnez a1, .LBB5_4
-; CHECK-NEXT: .LBB5_2: # %else2
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB5_3: # %cond.load
-; CHECK-NEXT: lh a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, ma
-; CHECK-NEXT: vmv.s.x v8, a2
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: beqz a1, .LBB5_2
-; CHECK-NEXT: .LBB5_4: # %cond.load1
-; CHECK-NEXT: lh a0, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 1
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr align 2 %base, <2 x i1> %mask, <2 x i16> %src0)
ret <2 x i16>%res
@@ -250,50 +102,13 @@ declare <4 x i16> @llvm.masked.expandload.v4i16(ptr, <4 x i1>, <4 x i16>)
define <4 x i16> @expandload_v4i16(ptr %base, <4 x i16> %src0, <4 x i1> %mask) {
; CHECK-LABEL: expandload_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB6_5
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB6_6
-; CHECK-NEXT: .LBB6_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB6_7
-; CHECK-NEXT: .LBB6_3: # %else6
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: bnez a1, .LBB6_8
-; CHECK-NEXT: .LBB6_4: # %else10
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB6_5: # %cond.load
-; CHECK-NEXT: lh a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, ma
-; CHECK-NEXT: vmv.s.x v8, a2
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB6_2
-; CHECK-NEXT: .LBB6_6: # %cond.load1
-; CHECK-NEXT: lh a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 1
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB6_3
-; CHECK-NEXT: .LBB6_7: # %cond.load5
-; CHECK-NEXT: lh a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 3, e16, mf2, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 2
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: beqz a1, .LBB6_4
-; CHECK-NEXT: .LBB6_8: # %cond.load9
-; CHECK-NEXT: lh a0, 0(a0)
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vslideup.vi v8, v9, 3
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr align 2 %base, <4 x i1> %mask, <4 x i16> %src0)
ret <4 x i16>%res
@@ -303,94 +118,13 @@ declare <8 x i16> @llvm.masked.expandload.v8i16(ptr, <8 x i1>, <8 x i16>)
define <8 x i16> @expandload_v8i16(ptr %base, <8 x i16> %src0, <8 x i1> %mask) {
; CHECK-LABEL: expandload_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB7_9
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB7_10
-; CHECK-NEXT: .LBB7_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB7_11
-; CHECK-NEXT: .LBB7_3: # %else6
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: bnez a2, .LBB7_12
-; CHECK-NEXT: .LBB7_4: # %else10
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: bnez a2, .LBB7_13
-; CHECK-NEXT: .LBB7_5: # %else14
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: bnez a2, .LBB7_14
-; CHECK-NEXT: .LBB7_6: # %else18
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: bnez a2, .LBB7_15
-; CHECK-NEXT: .LBB7_7: # %else22
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: bnez a1, .LBB7_16
-; CHECK-NEXT: .LBB7_8: # %else26
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB7_9: # %cond.load
-; CHECK-NEXT: lh a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, ma
-; CHECK-NEXT: vmv.s.x v8, a2
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB7_2
-; CHECK-NEXT: .LBB7_10: # %cond.load1
-; CHECK-NEXT: lh a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vsetivli zero, 2, e16, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 1
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB7_3
-; CHECK-NEXT: .LBB7_11: # %cond.load5
-; CHECK-NEXT: lh a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 3, e16, m1, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 2
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: beqz a2, .LBB7_4
-; CHECK-NEXT: .LBB7_12: # %cond.load9
-; CHECK-NEXT: lh a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 3
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: beqz a2, .LBB7_5
-; CHECK-NEXT: .LBB7_13: # %cond.load13
-; CHECK-NEXT: lh a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 5, e16, m1, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 4
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: beqz a2, .LBB7_6
-; CHECK-NEXT: .LBB7_14: # %cond.load17
-; CHECK-NEXT: lh a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 6, e16, m1, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 5
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: beqz a2, .LBB7_7
-; CHECK-NEXT: .LBB7_15: # %cond.load21
-; CHECK-NEXT: lh a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 6
-; CHECK-NEXT: addi a0, a0, 2
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: beqz a1, .LBB7_8
-; CHECK-NEXT: .LBB7_16: # %cond.load25
-; CHECK-NEXT: lh a0, 0(a0)
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vslideup.vi v8, v9, 7
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr align 2 %base, <8 x i1> %mask, <8 x i16> %src0)
ret <8 x i16>%res
@@ -400,13 +134,13 @@ declare <1 x i32> @llvm.masked.expandload.v1i32(ptr, <1 x i1>, <1 x i32>)
define <1 x i32> @expandload_v1i32(ptr %base, <1 x i32> %src0, <1 x i1> %mask) {
; CHECK-LABEL: expandload_v1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vfirst.m a1, v0
-; CHECK-NEXT: bnez a1, .LBB8_2
-; CHECK-NEXT: # %bb.1: # %cond.load
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: .LBB8_2: # %else
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = call <1 x i32> @llvm.masked.expandload.v1i32(ptr align 4 %base, <1 x i1> %mask, <1 x i32> %src0)
ret <1 x i32>%res
@@ -416,28 +150,13 @@ declare <2 x i32> @llvm.masked.expandload.v2i32(ptr, <2 x i1>, <2 x i32>)
define <2 x i32> @expandload_v2i32(ptr %base, <2 x i32> %src0, <2 x i1> %mask) {
; CHECK-LABEL: expandload_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB9_3
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: bnez a1, .LBB9_4
-; CHECK-NEXT: .LBB9_2: # %else2
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB9_3: # %cond.load
-; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, ma
-; CHECK-NEXT: vmv.s.x v8, a2
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a1, a1, 2
-; CHECK-NEXT: beqz a1, .LBB9_2
-; CHECK-NEXT: .LBB9_4: # %cond.load1
-; CHECK-NEXT: lw a0, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 1
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = call <2 x i32> @llvm.masked.expandload.v2i32(ptr align 4 %base, <2 x i1> %mask, <2 x i32> %src0)
ret <2 x i32>%res
@@ -447,50 +166,13 @@ declare <4 x i32> @llvm.masked.expandload.v4i32(ptr, <4 x i1>, <4 x i32>)
define <4 x i32> @expandload_v4i32(ptr %base, <4 x i32> %src0, <4 x i1> %mask) {
; CHECK-LABEL: expandload_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB10_5
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB10_6
-; CHECK-NEXT: .LBB10_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB10_7
-; CHECK-NEXT: .LBB10_3: # %else6
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: bnez a1, .LBB10_8
-; CHECK-NEXT: .LBB10_4: # %else10
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB10_5: # %cond.load
-; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, ma
-; CHECK-NEXT: vmv.s.x v8, a2
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB10_2
-; CHECK-NEXT: .LBB10_6: # %cond.load1
-; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 1
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB10_3
-; CHECK-NEXT: .LBB10_7: # %cond.load5
-; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vslideup.vi v8, v9, 2
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a1, a1, 8
-; CHECK-NEXT: beqz a1, .LBB10_4
-; CHECK-NEXT: .LBB10_8: # %cond.load9
-; CHECK-NEXT: lw a0, 0(a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vslideup.vi v8, v9, 3
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = call <4 x i32> @llvm.masked.expandload.v4i32(ptr align 4 %base, <4 x i1> %mask, <4 x i32> %src0)
ret <4 x i32>%res
@@ -500,94 +182,13 @@ declare <8 x i32> @llvm.masked.expandload.v8i32(ptr, <8 x i1>, <8 x i32>)
define <8 x i32> @expandload_v8i32(ptr %base, <8 x i32> %src0, <8 x i1> %mask) {
; CHECK-LABEL: expandload_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vmv.x.s a1, v0
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bnez a2, .LBB11_9
-; CHECK-NEXT: # %bb.1: # %else
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: bnez a2, .LBB11_10
-; CHECK-NEXT: .LBB11_2: # %else2
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: bnez a2, .LBB11_11
-; CHECK-NEXT: .LBB11_3: # %else6
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: bnez a2, .LBB11_12
-; CHECK-NEXT: .LBB11_4: # %else10
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: bnez a2, .LBB11_13
-; CHECK-NEXT: .LBB11_5: # %else14
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: bnez a2, .LBB11_14
-; CHECK-NEXT: .LBB11_6: # %else18
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: bnez a2, .LBB11_15
-; CHECK-NEXT: .LBB11_7: # %else22
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: bnez a1, .LBB11_16
-; CHECK-NEXT: .LBB11_8: # %else26
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB11_9: # %cond.load
-; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, ma
-; CHECK-NEXT: vmv.s.x v8, a2
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 2
-; CHECK-NEXT: beqz a2, .LBB11_2
-; CHECK-NEXT: .LBB11_10: # %cond.load1
-; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vmv.s.x v10, a2
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 1
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 4
-; CHECK-NEXT: beqz a2, .LBB11_3
-; CHECK-NEXT: .LBB11_11: # %cond.load5
-; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; CHECK-NEXT: vmv.s.x v10, a2
-; CHECK-NEXT: vslideup.vi v8, v10, 2
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 8
-; CHECK-NEXT: beqz a2, .LBB11_4
-; CHECK-NEXT: .LBB11_12: # %cond.load9
-; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
-; CHECK-NEXT: vmv.s.x v10, a2
-; CHECK-NEXT: vslideup.vi v8, v10, 3
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 16
-; CHECK-NEXT: beqz a2, .LBB11_5
-; CHECK-NEXT: .LBB11_13: # %cond.load13
-; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma
-; CHECK-NEXT: vmv.s.x v10, a2
-; CHECK-NEXT: vslideup.vi v8, v10, 4
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 32
-; CHECK-NEXT: beqz a2, .LBB11_6
-; CHECK-NEXT: .LBB11_14: # %cond.load17
-; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vmv.s.x v10, a2
-; CHECK-NEXT: vslideup.vi v8, v10, 5
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a2, a1, 64
-; CHECK-NEXT: beqz a2, .LBB11_7
-; CHECK-NEXT: .LBB11_15: # %cond.load21
-; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vmv.s.x v10, a2
-; CHECK-NEXT: vslideup.vi v8, v10, 6
-; CHECK-NEXT: addi a0, a0, 4
-; CHECK-NEXT: andi a1, a1, -128
-; CHECK-NEXT: beqz a1, .LBB11_8
-; CHECK-NEXT: .LBB11_16: # %cond.load25
-; CHECK-NEXT: lw a0, 0(a0)
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v10, a0
-; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT: viota.m v12, v0
+; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
%res = call <8 x i32> @llvm.masked.expandload.v8i32(ptr align 4 %base, <8 x i1> %mask, <8 x i32> %src0)
ret <8 x i32>%res
@@ -595,418 +196,67 @@ define <8 x i32> @expandload_v8i32(ptr %base, <8 x i32> %src0, <8 x i1> %mask) {
declare <1 x i64> @llvm.masked.expandload.v1i64(ptr, <1 x i1>, <1 x i64>)
define <1 x i64> @expandload_v1i64(ptr %base, <1 x i64> %src0, <1 x i1> %mask) {
-; RV32-LABEL: expandload_v1i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV32-NEXT: vfirst.m a1, v0
-; RV32-NEXT: bnez a1, .LBB12_2
-; RV32-NEXT: # %bb.1: # %cond.load
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: lw a1, 4(a0)
-; RV32-NEXT: lw a0, 0(a0)
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a0, 8(sp)
-; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v8, (a0), zero
-; RV32-NEXT: addi sp, sp, 16
-; RV32-NEXT: .LBB12_2: # %else
-; RV32-NEXT: ret
-;
-; RV64-LABEL: expandload_v1i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT: vfirst.m a1, v0
-; RV64-NEXT: bnez a1, .LBB12_2
-; RV64-NEXT: # %bb.1: # %cond.load
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: .LBB12_2: # %else
-; RV64-NEXT: ret
+; CHECK-LABEL: expandload_v1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
%res = call <1 x i64> @llvm.masked.expandload.v1i64(ptr align 8 %base, <1 x i1> %mask, <1 x i64> %src0)
ret <1 x i64>%res
}
declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>)
define <2 x i64> @expandload_v2i64(ptr %base, <2 x i64> %src0, <2 x i1> %mask) {
-; RV32-LABEL: expandload_v2i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB13_3
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: bnez a1, .LBB13_4
-; RV32-NEXT: .LBB13_2: # %else2
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB13_3: # %cond.load
-; RV32-NEXT: lw a2, 0(a0)
-; RV32-NEXT: lw a3, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a2
-; RV32-NEXT: vslide1down.vx v8, v8, a3
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 2
-; RV32-NEXT: beqz a1, .LBB13_2
-; RV32-NEXT: .LBB13_4: # %cond.load1
-; RV32-NEXT: lw a1, 0(a0)
-; RV32-NEXT: lw a0, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v9, v8, a1
-; RV32-NEXT: vslide1down.vx v9, v9, a0
-; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vslideup.vi v8, v9, 1
-; RV32-NEXT: ret
-;
-; RV64-LABEL: expandload_v2i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB13_3
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: bnez a1, .LBB13_4
-; RV64-NEXT: .LBB13_2: # %else2
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB13_3: # %cond.load
-; RV64-NEXT: ld a2, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT: vmv.s.x v8, a2
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 2
-; RV64-NEXT: beqz a1, .LBB13_2
-; RV64-NEXT: .LBB13_4: # %cond.load1
-; RV64-NEXT: ld a0, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vmv.s.x v9, a0
-; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT: vslideup.vi v8, v9, 1
-; RV64-NEXT: ret
+; CHECK-LABEL: expandload_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT: viota.m v10, v0
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
%res = call <2 x i64> @llvm.masked.expandload.v2i64(ptr align 8 %base, <2 x i1> %mask, <2 x i64> %src0)
ret <2 x i64>%res
}
declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>)
define <4 x i64> @expandload_v4i64(ptr %base, <4 x i64> %src0, <4 x i1> %mask) {
-; RV32-LABEL: expandload_v4i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB14_5
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB14_6
-; RV32-NEXT: .LBB14_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB14_7
-; RV32-NEXT: .LBB14_3: # %else6
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: bnez a1, .LBB14_8
-; RV32-NEXT: .LBB14_4: # %else10
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB14_5: # %cond.load
-; RV32-NEXT: lw a2, 0(a0)
-; RV32-NEXT: lw a3, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a2
-; RV32-NEXT: vslide1down.vx v8, v8, a3
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB14_2
-; RV32-NEXT: .LBB14_6: # %cond.load1
-; RV32-NEXT: lw a2, 0(a0)
-; RV32-NEXT: lw a3, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v10, v8, a2
-; RV32-NEXT: vslide1down.vx v10, v10, a3
-; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, ma
-; RV32-NEXT: vslideup.vi v8, v10, 1
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB14_3
-; RV32-NEXT: .LBB14_7: # %cond.load5
-; RV32-NEXT: lw a2, 0(a0)
-; RV32-NEXT: lw a3, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v10, v8, a2
-; RV32-NEXT: vslide1down.vx v10, v10, a3
-; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; RV32-NEXT: vslideup.vi v8, v10, 2
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, 8
-; RV32-NEXT: beqz a1, .LBB14_4
-; RV32-NEXT: .LBB14_8: # %cond.load9
-; RV32-NEXT: lw a1, 0(a0)
-; RV32-NEXT: lw a0, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v10, v8, a1
-; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vslideup.vi v8, v10, 3
-; RV32-NEXT: ret
-;
-; RV64-LABEL: expandload_v4i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB14_5
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB14_6
-; RV64-NEXT: .LBB14_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB14_7
-; RV64-NEXT: .LBB14_3: # %else6
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: bnez a1, .LBB14_8
-; RV64-NEXT: .LBB14_4: # %else10
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB14_5: # %cond.load
-; RV64-NEXT: ld a2, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT: vmv.s.x v8, a2
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB14_2
-; RV64-NEXT: .LBB14_6: # %cond.load1
-; RV64-NEXT: ld a2, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vmv.s.x v10, a2
-; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
-; RV64-NEXT: vslideup.vi v8, v10, 1
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB14_3
-; RV64-NEXT: .LBB14_7: # %cond.load5
-; RV64-NEXT: ld a2, 0(a0)
-; RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; RV64-NEXT: vmv.s.x v10, a2
-; RV64-NEXT: vslideup.vi v8, v10, 2
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, 8
-; RV64-NEXT: beqz a1, .LBB14_4
-; RV64-NEXT: .LBB14_8: # %cond.load9
-; RV64-NEXT: ld a0, 0(a0)
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vmv.s.x v10, a0
-; RV64-NEXT: vslideup.vi v8, v10, 3
-; RV64-NEXT: ret
+; CHECK-LABEL: expandload_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v10, (a0)
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT: viota.m v12, v0
+; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
%res = call <4 x i64> @llvm.masked.expandload.v4i64(ptr align 8 %base, <4 x i1> %mask, <4 x i64> %src0)
ret <4 x i64>%res
}
declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>)
define <8 x i64> @expandload_v8i64(ptr %base, <8 x i64> %src0, <8 x i1> %mask) {
-; RV32-LABEL: expandload_v8i64:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: andi a2, a1, 1
-; RV32-NEXT: bnez a2, .LBB15_9
-; RV32-NEXT: # %bb.1: # %else
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: bnez a2, .LBB15_10
-; RV32-NEXT: .LBB15_2: # %else2
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: bnez a2, .LBB15_11
-; RV32-NEXT: .LBB15_3: # %else6
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: bnez a2, .LBB15_12
-; RV32-NEXT: .LBB15_4: # %else10
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: bnez a2, .LBB15_13
-; RV32-NEXT: .LBB15_5: # %else14
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: bnez a2, .LBB15_14
-; RV32-NEXT: .LBB15_6: # %else18
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: bnez a2, .LBB15_15
-; RV32-NEXT: .LBB15_7: # %else22
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: bnez a1, .LBB15_16
-; RV32-NEXT: .LBB15_8: # %else26
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB15_9: # %cond.load
-; RV32-NEXT: lw a2, 0(a0)
-; RV32-NEXT: lw a3, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a2
-; RV32-NEXT: vslide1down.vx v8, v8, a3
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 2
-; RV32-NEXT: beqz a2, .LBB15_2
-; RV32-NEXT: .LBB15_10: # %cond.load1
-; RV32-NEXT: lw a2, 0(a0)
-; RV32-NEXT: lw a3, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v12, v8, a2
-; RV32-NEXT: vslide1down.vx v12, v12, a3
-; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, ma
-; RV32-NEXT: vslideup.vi v8, v12, 1
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 4
-; RV32-NEXT: beqz a2, .LBB15_3
-; RV32-NEXT: .LBB15_11: # %cond.load5
-; RV32-NEXT: lw a2, 0(a0)
-; RV32-NEXT: lw a3, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v12, v8, a2
-; RV32-NEXT: vslide1down.vx v12, v12, a3
-; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; RV32-NEXT: vslideup.vi v8, v12, 2
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 8
-; RV32-NEXT: beqz a2, .LBB15_4
-; RV32-NEXT: .LBB15_12: # %cond.load9
-; RV32-NEXT: lw a2, 0(a0)
-; RV32-NEXT: lw a3, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v12, v8, a2
-; RV32-NEXT: vslide1down.vx v12, v12, a3
-; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, ma
-; RV32-NEXT: vslideup.vi v8, v12, 3
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 16
-; RV32-NEXT: beqz a2, .LBB15_5
-; RV32-NEXT: .LBB15_13: # %cond.load13
-; RV32-NEXT: lw a2, 0(a0)
-; RV32-NEXT: lw a3, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT: vslide1down.vx v12, v8, a2
-; RV32-NEXT: vslide1down.vx v12, v12, a3
-; RV32-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV32-NEXT: vslideup.vi v8, v12, 4
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 32
-; RV32-NEXT: beqz a2, .LBB15_6
-; RV32-NEXT: .LBB15_14: # %cond.load17
-; RV32-NEXT: lw a2, 0(a0)
-; RV32-NEXT: lw a3, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT: vslide1down.vx v12, v8, a2
-; RV32-NEXT: vslide1down.vx v12, v12, a3
-; RV32-NEXT: vsetivli zero, 6, e64, m4, tu, ma
-; RV32-NEXT: vslideup.vi v8, v12, 5
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a2, a1, 64
-; RV32-NEXT: beqz a2, .LBB15_7
-; RV32-NEXT: .LBB15_15: # %cond.load21
-; RV32-NEXT: lw a2, 0(a0)
-; RV32-NEXT: lw a3, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT: vslide1down.vx v12, v8, a2
-; RV32-NEXT: vslide1down.vx v12, v12, a3
-; RV32-NEXT: vsetivli zero, 7, e64, m4, tu, ma
-; RV32-NEXT: vslideup.vi v8, v12, 6
-; RV32-NEXT: addi a0, a0, 8
-; RV32-NEXT: andi a1, a1, -128
-; RV32-NEXT: beqz a1, .LBB15_8
-; RV32-NEXT: .LBB15_16: # %cond.load25
-; RV32-NEXT: lw a1, 0(a0)
-; RV32-NEXT: lw a0, 4(a0)
-; RV32-NEXT: vsetivli zero, 2, e32, m4, ta, ma
-; RV32-NEXT: vslide1down.vx v12, v8, a1
-; RV32-NEXT: vslide1down.vx v12, v12, a0
-; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vslideup.vi v8, v12, 7
-; RV32-NEXT: ret
-;
-; RV64-LABEL: expandload_v8i64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; RV64-NEXT: vmv.x.s a1, v0
-; RV64-NEXT: andi a2, a1, 1
-; RV64-NEXT: bnez a2, .LBB15_9
-; RV64-NEXT: # %bb.1: # %else
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: bnez a2, .LBB15_10
-; RV64-NEXT: .LBB15_2: # %else2
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: bnez a2, .LBB15_11
-; RV64-NEXT: .LBB15_3: # %else6
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: bnez a2, .LBB15_12
-; RV64-NEXT: .LBB15_4: # %else10
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: bnez a2, .LBB15_13
-; RV64-NEXT: .LBB15_5: # %else14
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: bnez a2, .LBB15_14
-; RV64-NEXT: .LBB15_6: # %else18
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: bnez a2, .LBB15_15
-; RV64-NEXT: .LBB15_7: # %else22
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: bnez a1, .LBB15_16
-; RV64-NEXT: .LBB15_8: # %else26
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB15_9: # %cond.load
-; RV64-NEXT: ld a2, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, ma
-; RV64-NEXT: vmv.s.x v8, a2
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 2
-; RV64-NEXT: beqz a2, .LBB15_2
-; RV64-NEXT: .LBB15_10: # %cond.load1
-; RV64-NEXT: ld a2, 0(a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vmv.s.x v12, a2
-; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
-; RV64-NEXT: vslideup.vi v8, v12, 1
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 4
-; RV64-NEXT: beqz a2, .LBB15_3
-; RV64-NEXT: .LBB15_11: # %cond.load5
-; RV64-NEXT: ld a2, 0(a0)
-; RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; RV64-NEXT: vmv.s.x v12, a2
-; RV64-NEXT: vslideup.vi v8, v12, 2
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 8
-; RV64-NEXT: beqz a2, .LBB15_4
-; RV64-NEXT: .LBB15_12: # %cond.load9
-; RV64-NEXT: ld a2, 0(a0)
-; RV64-NEXT: vsetivli zero, 4, e64, m2, tu, ma
-; RV64-NEXT: vmv.s.x v12, a2
-; RV64-NEXT: vslideup.vi v8, v12, 3
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 16
-; RV64-NEXT: beqz a2, .LBB15_5
-; RV64-NEXT: .LBB15_13: # %cond.load13
-; RV64-NEXT: ld a2, 0(a0)
-; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT: vmv.s.x v12, a2
-; RV64-NEXT: vslideup.vi v8, v12, 4
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 32
-; RV64-NEXT: beqz a2, .LBB15_6
-; RV64-NEXT: .LBB15_14: # %cond.load17
-; RV64-NEXT: ld a2, 0(a0)
-; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma
-; RV64-NEXT: vmv.s.x v12, a2
-; RV64-NEXT: vslideup.vi v8, v12, 5
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a2, a1, 64
-; RV64-NEXT: beqz a2, .LBB15_7
-; RV64-NEXT: .LBB15_15: # %cond.load21
-; RV64-NEXT: ld a2, 0(a0)
-; RV64-NEXT: vsetivli zero, 7, e64, m4, tu, ma
-; RV64-NEXT: vmv.s.x v12, a2
-; RV64-NEXT: vslideup.vi v8, v12, 6
-; RV64-NEXT: addi a0, a0, 8
-; RV64-NEXT: andi a1, a1, -128
-; RV64-NEXT: beqz a1, .LBB15_8
-; RV64-NEXT: .LBB15_16: # %cond.load25
-; RV64-NEXT: ld a0, 0(a0)
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vmv.s.x v12, a0
-; RV64-NEXT: vslideup.vi v8, v12, 7
-; RV64-NEXT: ret
+; CHECK-LABEL: expandload_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vcpop.m a1, v0
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64.v v12, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT: viota.m v16, v0
+; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
%res = call <8 x i64> @llvm.masked.expandload.v8i64(ptr align 8 %base, <8 x i1> %mask, <8 x i64> %src0)
ret <8 x i64>%res
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
More information about the llvm-commits
mailing list