[llvm] [RISCV] Support llvm.masked.expandload intrinsic (PR #101954)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 5 03:20:03 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Pengcheng Wang (wangpc-pp)
<details>
<summary>Changes</summary>
We can use `viota+vrgather` to synthesize `vdecompress` and lower
expanding load to `vcpop+load+vdecompress`.
Fixes #<!-- -->101914
---
Patch is 153.23 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/101954.diff
6 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+30-7)
- (modified) llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp (+10)
- (modified) llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h (+2)
- (added) llvm/test/CodeGen/RISCV/rvv/expandload.ll (+1541)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll (+168-966)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll (+123-873)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9ee60b9db2837..67908c480fed3 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -10732,18 +10732,21 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
SDValue Chain = MemSD->getChain();
SDValue BasePtr = MemSD->getBasePtr();
- SDValue Mask, PassThru, VL;
+ SDValue Mask, PassThru, LoadVL;
+ bool IsExpandingLoad = false;
if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
Mask = VPLoad->getMask();
PassThru = DAG.getUNDEF(VT);
- VL = VPLoad->getVectorLength();
+ LoadVL = VPLoad->getVectorLength();
} else {
const auto *MLoad = cast<MaskedLoadSDNode>(Op);
Mask = MLoad->getMask();
PassThru = MLoad->getPassThru();
+ IsExpandingLoad = MLoad->isExpandingLoad();
}
- bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+ bool IsUnmasked =
+ ISD::isConstantSplatVectorAllOnes(Mask.getNode()) || IsExpandingLoad;
MVT XLenVT = Subtarget.getXLenVT();
@@ -10751,14 +10754,22 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
if (VT.isFixedLengthVector()) {
ContainerVT = getContainerForFixedLengthVector(VT);
PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
- if (!IsUnmasked) {
+ if (!IsUnmasked || IsExpandingLoad) {
MVT MaskVT = getMaskTypeFor(ContainerVT);
Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
}
}
- if (!VL)
- VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
+ if (!LoadVL)
+ LoadVL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
+
+ SDValue ExpandingVL;
+ if (IsExpandingLoad) {
+ ExpandingVL = LoadVL;
+ LoadVL = DAG.getNode(
+ RISCVISD::VCPOP_VL, DL, XLenVT, Mask,
+ getAllOnesMask(Mask.getSimpleValueType(), LoadVL, DL, DAG), LoadVL);
+ }
unsigned IntID =
IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
@@ -10770,7 +10781,7 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
Ops.push_back(BasePtr);
if (!IsUnmasked)
Ops.push_back(Mask);
- Ops.push_back(VL);
+ Ops.push_back(LoadVL);
if (!IsUnmasked)
Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
@@ -10779,6 +10790,18 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
SDValue Result =
DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
Chain = Result.getValue(1);
+ if (IsExpandingLoad) {
+ MVT IotaVT = ContainerVT;
+ if (ContainerVT.isFloatingPoint())
+ IotaVT = ContainerVT.changeVectorElementTypeToInteger();
+
+ SDValue Iota =
+ DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, IotaVT,
+ DAG.getConstant(Intrinsic::riscv_viota, DL, XLenVT),
+ DAG.getUNDEF(IotaVT), Mask, ExpandingVL);
+ Result = DAG.getNode(RISCVISD::VRGATHER_VV_VL, DL, ContainerVT, Result,
+ Iota, PassThru, Mask, ExpandingVL);
+ }
if (VT.isFixedLengthVector())
Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 4cd904c039a98..41b39accbf027 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -1966,6 +1966,16 @@ bool RISCVTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
C2.ScaleCost, C2.ImmCost, C2.SetupCost);
}
+bool RISCVTTIImpl::isLegalMaskedExpandLoad(Type *DataTy, Align Alignment) {
+ auto *VTy = dyn_cast<VectorType>(DataTy);
+ if (!VTy || VTy->isScalableTy())
+ return false;
+
+ if (!isLegalMaskedLoadStore(DataTy, Alignment))
+ return false;
+ return true;
+}
+
bool RISCVTTIImpl::isLegalMaskedCompressStore(Type *DataTy, Align Alignment) {
auto *VTy = dyn_cast<VectorType>(DataTy);
if (!VTy || VTy->isScalableTy())
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 9c37a4f6ec2d0..192bb35613aad 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -281,6 +281,8 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
return TLI->isLegalStridedLoadStore(DataTypeVT, Alignment);
}
+ bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment);
+
bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment);
bool isVScaleKnownToBeAPowerOfTwo() const {
diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
new file mode 100644
index 0000000000000..9380b52daf4b7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
@@ -0,0 +1,1541 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefix=RV64
+; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefix=RV32
+
+; Compress + store for i16 type
+
+define <1 x i8> @test_expandload_v1i8(ptr %base, <1 x i1> %mask, <1 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v1i8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT: vle8.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v1i8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT: vle8.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <1 x i8> @llvm.masked.expandload.v1i8(ptr align 1 %base, <1 x i1> %mask, <1 x i8> %passthru)
+ ret <1 x i8> %res
+}
+
+define <2 x i8> @test_expandload_v2i8(ptr %base, <2 x i1> %mask, <2 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v2i8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT: vle8.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v2i8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT: vle8.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <2 x i8> @llvm.masked.expandload.v2i8(ptr align 1 %base, <2 x i1> %mask, <2 x i8> %passthru)
+ ret <2 x i8> %res
+}
+
+define <4 x i8> @test_expandload_v4i8(ptr %base, <4 x i1> %mask, <4 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v4i8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; RV64-NEXT: vle8.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v4i8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; RV32-NEXT: vle8.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <4 x i8> @llvm.masked.expandload.v4i8(ptr align 1 %base, <4 x i1> %mask, <4 x i8> %passthru)
+ ret <4 x i8> %res
+}
+
+define <8 x i8> @test_expandload_v8i8(ptr %base, <8 x i1> %mask, <8 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v8i8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; RV64-NEXT: vle8.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v8i8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; RV32-NEXT: vle8.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <8 x i8> @llvm.masked.expandload.v8i8(ptr align 1 %base, <8 x i1> %mask, <8 x i8> %passthru)
+ ret <8 x i8> %res
+}
+
+define <16 x i8> @test_expandload_v16i8(ptr %base, <16 x i1> %mask, <16 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v16i8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT: vcpop.m a1, v0
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vle8.v v9, (a0)
+; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu
+; RV64-NEXT: viota.m v10, v0
+; RV64-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v16i8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT: vcpop.m a1, v0
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vle8.v v9, (a0)
+; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu
+; RV32-NEXT: viota.m v10, v0
+; RV32-NEXT: vrgather.vv v8, v9, v10, v0.t
+; RV32-NEXT: ret
+ %res = call <16 x i8> @llvm.masked.expandload.v16i8(ptr align 1 %base, <16 x i1> %mask, <16 x i8> %passthru)
+ ret <16 x i8> %res
+}
+
+define <32 x i8> @test_expandload_v32i8(ptr %base, <32 x i1> %mask, <32 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v32i8:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 32
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; RV64-NEXT: vle8.v v10, (a0)
+; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; RV64-NEXT: viota.m v12, v0
+; RV64-NEXT: vrgather.vv v8, v10, v12, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v32i8:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; RV32-NEXT: vle8.v v10, (a0)
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; RV32-NEXT: viota.m v12, v0
+; RV32-NEXT: vrgather.vv v8, v10, v12, v0.t
+; RV32-NEXT: ret
+ %res = call <32 x i8> @llvm.masked.expandload.v32i8(ptr align 1 %base, <32 x i1> %mask, <32 x i8> %passthru)
+ ret <32 x i8> %res
+}
+
+define <64 x i8> @test_expandload_v64i8(ptr %base, <64 x i1> %mask, <64 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v64i8:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 64
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; RV64-NEXT: vle8.v v12, (a0)
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; RV64-NEXT: viota.m v16, v0
+; RV64-NEXT: vrgather.vv v8, v12, v16, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v64i8:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; RV32-NEXT: vle8.v v12, (a0)
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; RV32-NEXT: viota.m v16, v0
+; RV32-NEXT: vrgather.vv v8, v12, v16, v0.t
+; RV32-NEXT: ret
+ %res = call <64 x i8> @llvm.masked.expandload.v64i8(ptr align 1 %base, <64 x i1> %mask, <64 x i8> %passthru)
+ ret <64 x i8> %res
+}
+
+define <128 x i8> @test_expandload_v128i8(ptr %base, <128 x i1> %mask, <128 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v128i8:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 128
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT: vcpop.m a2, v0
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v16, (a0)
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; RV64-NEXT: viota.m v24, v0
+; RV64-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v128i8:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 128
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vcpop.m a2, v0
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v16, (a0)
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; RV32-NEXT: viota.m v24, v0
+; RV32-NEXT: vrgather.vv v8, v16, v24, v0.t
+; RV32-NEXT: ret
+ %res = call <128 x i8> @llvm.masked.expandload.v128i8(ptr align 1 %base, <128 x i1> %mask, <128 x i8> %passthru)
+ ret <128 x i8> %res
+}
+
+define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8> %passthru) {
+; RV64-LABEL: test_expandload_v256i8:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 5
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: li a3, 24
+; RV64-NEXT: mul a2, a2, a3
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v7, v8
+; RV64-NEXT: li a2, 128
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v8, (a1)
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v0, 1
+; RV64-NEXT: vmv.x.s a1, v9
+; RV64-NEXT: vmv.x.s a3, v0
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vcpop.m a4, v0
+; RV64-NEXT: vsetvli zero, a4, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v24, (a0)
+; RV64-NEXT: csrr a4, vlenb
+; RV64-NEXT: slli a4, a4, 4
+; RV64-NEXT: add a4, sp, a4
+; RV64-NEXT: addi a4, a4, 16
+; RV64-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT: vcpop.m a4, v7
+; RV64-NEXT: cpop a3, a3
+; RV64-NEXT: cpop a1, a1
+; RV64-NEXT: add a0, a0, a3
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: vsetvli zero, a4, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; RV64-NEXT: viota.m v16, v0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vv v8, v24, v16, v0.t
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: viota.m v16, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vrgather.vv v8, v24, v16, v0.t
+; RV64-NEXT: vmv.v.v v16, v8
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 5
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+;
+; RV32-LABEL: test_expandload_v256i8:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 24
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v7, v8
+; RV32-NEXT: li a2, 128
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v0, 1
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsrl.vx v10, v9, a1
+; RV32-NEXT: vmv.x.s a3, v10
+; RV32-NEXT: vsrl.vx v10, v0, a1
+; RV32-NEXT: vmv.x.s a1, v10
+; RV32-NEXT: vmv.x.s a4, v9
+; RV32-NEXT: vmv.x.s a5, v0
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vcpop.m a6, v0
+; RV32-NEXT: vsetvli zero, a6, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v8, (a0)
+; RV32-NEXT: csrr a6, vlenb
+; RV32-NEXT: slli a6, a6, 4
+; RV32-NEXT: add a6, sp, a6
+; RV32-NEXT: addi a6, a6, 16
+; RV32-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
+; RV32-NEXT: cpop a1, a1
+; RV32-NEXT: cpop a5, a5
+; RV32-NEXT: add a1, a5, a1
+; RV32-NEXT: cpop a3, a3
+; RV32-NEXT: cpop a4, a4
+; RV32-NEXT: add a3, a4, a3
+; RV32-NEXT: add a1, a1, a3
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT: vcpop.m a1, v7
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v8, (a0)
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; RV32-NEXT: viota.m v16, v0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vrgather.vv v8, v24, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: viota.m v16, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, ...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/101954
More information about the llvm-commits
mailing list