[llvm] [IA][RISCV] Add support for vp.load/vp.store with shufflevector (PR #135445)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Apr 11 14:59:32 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Min-Yih Hsu (mshockwave)
<details>
<summary>Changes</summary>
Teach InterleavedAccessPass to recognize vp.load + shufflevector and shufflevector + vp.store. Though this patch only adds RISC-V support to actually lower this pattern. The vp.load/vp.store in this pattern require constant mask and EVL.
-------
I really don't want to add yet another new TLI hook on top of the _six_ we already have, and that's why I reuse the existing `TLI::lowerInterleavedLoad/Store`.
Also, that I'm planning to consolidate and generalize InterleavedAccessPass to support any combinations of load/vp.load/masked.load with shufflevector/vector.deinterleave (and their store operation counterparts).
---
Patch is 60.54 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/135445.diff
11 Files Affected:
- (modified) llvm/include/llvm/CodeGen/TargetLowering.h (+5-4)
- (modified) llvm/lib/CodeGen/InterleavedAccessPass.cpp (+150-27)
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+10-2)
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.h (+2-2)
- (modified) llvm/lib/Target/ARM/ARMISelLowering.cpp (+10-2)
- (modified) llvm/lib/Target/ARM/ARMISelLowering.h (+2-2)
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+57-22)
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.h (+2-2)
- (modified) llvm/lib/Target/X86/X86ISelLowering.h (+2-2)
- (modified) llvm/lib/Target/X86/X86InterleavedAccess.cpp (+10-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll (+411-32)
``````````diff
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 4f2f202f94841..5407bf8b2ba13 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -3157,11 +3157,11 @@ class TargetLoweringBase {
/// Lower an interleaved load to target specific intrinsics. Return
/// true on success.
///
- /// \p LI is the vector load instruction.
+ /// \p LoadOp is a vector load or vp.load instruction.
/// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
/// \p Indices is the corresponding indices for each shufflevector.
/// \p Factor is the interleave factor.
- virtual bool lowerInterleavedLoad(LoadInst *LI,
+ virtual bool lowerInterleavedLoad(Instruction *LoadOp,
ArrayRef<ShuffleVectorInst *> Shuffles,
ArrayRef<unsigned> Indices,
unsigned Factor) const {
@@ -3171,10 +3171,11 @@ class TargetLoweringBase {
/// Lower an interleaved store to target specific intrinsics. Return
/// true on success.
///
- /// \p SI is the vector store instruction.
+ /// \p StoreOp is a vector store or vp.store instruction.
/// \p SVI is the shufflevector to RE-interleave the stored vector.
/// \p Factor is the interleave factor.
- virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
+ virtual bool lowerInterleavedStore(Instruction *StoreOp,
+ ShuffleVectorInst *SVI,
unsigned Factor) const {
return false;
}
diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
index 9e47510e9cd1a..83bde96cc725a 100644
--- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
@@ -45,6 +45,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
@@ -100,11 +101,11 @@ class InterleavedAccessImpl {
unsigned MaxFactor = 0u;
/// Transform an interleaved load into target specific intrinsics.
- bool lowerInterleavedLoad(LoadInst *LI,
+ bool lowerInterleavedLoad(Instruction *LoadOp,
SmallSetVector<Instruction *, 32> &DeadInsts);
/// Transform an interleaved store into target specific intrinsics.
- bool lowerInterleavedStore(StoreInst *SI,
+ bool lowerInterleavedStore(Instruction *StoreOp,
SmallSetVector<Instruction *, 32> &DeadInsts);
/// Transform a load and a deinterleave intrinsic into target specific
@@ -131,7 +132,7 @@ class InterleavedAccessImpl {
/// made.
bool replaceBinOpShuffles(ArrayRef<ShuffleVectorInst *> BinOpShuffles,
SmallVectorImpl<ShuffleVectorInst *> &Shuffles,
- LoadInst *LI);
+ Instruction *LI);
};
class InterleavedAccess : public FunctionPass {
@@ -250,10 +251,23 @@ static bool isReInterleaveMask(ShuffleVectorInst *SVI, unsigned &Factor,
}
bool InterleavedAccessImpl::lowerInterleavedLoad(
- LoadInst *LI, SmallSetVector<Instruction *, 32> &DeadInsts) {
- if (!LI->isSimple() || isa<ScalableVectorType>(LI->getType()))
+ Instruction *LoadOp, SmallSetVector<Instruction *, 32> &DeadInsts) {
+ if (isa<ScalableVectorType>(LoadOp->getType()))
return false;
+ if (auto *LI = dyn_cast<LoadInst>(LoadOp)) {
+ if (!LI->isSimple())
+ return false;
+ } else if (auto *VPLoad = dyn_cast<VPIntrinsic>(LoadOp)) {
+ assert(VPLoad->getIntrinsicID() == Intrinsic::vp_load);
+ // Require a constant mask and evl.
+ if (!isa<ConstantVector>(VPLoad->getArgOperand(1)) ||
+ !isa<ConstantInt>(VPLoad->getArgOperand(2)))
+ return false;
+ } else {
+ llvm_unreachable("unsupported load operation");
+ }
+
// Check if all users of this load are shufflevectors. If we encounter any
// users that are extractelement instructions or binary operators, we save
// them to later check if they can be modified to extract from one of the
@@ -265,7 +279,7 @@ bool InterleavedAccessImpl::lowerInterleavedLoad(
// binop are the same load.
SmallSetVector<ShuffleVectorInst *, 4> BinOpShuffles;
- for (auto *User : LI->users()) {
+ for (auto *User : LoadOp->users()) {
auto *Extract = dyn_cast<ExtractElementInst>(User);
if (Extract && isa<ConstantInt>(Extract->getIndexOperand())) {
Extracts.push_back(Extract);
@@ -294,13 +308,31 @@ bool InterleavedAccessImpl::lowerInterleavedLoad(
unsigned Factor, Index;
unsigned NumLoadElements =
- cast<FixedVectorType>(LI->getType())->getNumElements();
+ cast<FixedVectorType>(LoadOp->getType())->getNumElements();
auto *FirstSVI = Shuffles.size() > 0 ? Shuffles[0] : BinOpShuffles[0];
// Check if the first shufflevector is DE-interleave shuffle.
if (!isDeInterleaveMask(FirstSVI->getShuffleMask(), Factor, Index, MaxFactor,
NumLoadElements))
return false;
+ // If this is a vp.load, record its mask (NOT shuffle mask).
+ BitVector MaskedIndices(NumLoadElements);
+ if (auto *VPLoad = dyn_cast<VPIntrinsic>(LoadOp)) {
+ auto *Mask = cast<ConstantVector>(VPLoad->getArgOperand(1));
+ assert(cast<FixedVectorType>(Mask->getType())->getNumElements() ==
+ NumLoadElements);
+ if (auto *Splat = Mask->getSplatValue()) {
+ // All-zeros mask, bail out early.
+ if (Splat->isZeroValue())
+ return false;
+ } else {
+ for (unsigned i = 0U; i < NumLoadElements; ++i) {
+ if (Mask->getAggregateElement(i)->isZeroValue())
+ MaskedIndices.set(i);
+ }
+ }
+ }
+
// Holds the corresponding index for each DE-interleave shuffle.
SmallVector<unsigned, 4> Indices;
@@ -327,9 +359,9 @@ bool InterleavedAccessImpl::lowerInterleavedLoad(
assert(Shuffle->getShuffleMask().size() <= NumLoadElements);
- if (cast<Instruction>(Shuffle->getOperand(0))->getOperand(0) == LI)
+ if (cast<Instruction>(Shuffle->getOperand(0))->getOperand(0) == LoadOp)
Indices.push_back(Index);
- if (cast<Instruction>(Shuffle->getOperand(0))->getOperand(1) == LI)
+ if (cast<Instruction>(Shuffle->getOperand(0))->getOperand(1) == LoadOp)
Indices.push_back(Index);
}
@@ -339,25 +371,61 @@ bool InterleavedAccessImpl::lowerInterleavedLoad(
return false;
bool BinOpShuffleChanged =
- replaceBinOpShuffles(BinOpShuffles.getArrayRef(), Shuffles, LI);
+ replaceBinOpShuffles(BinOpShuffles.getArrayRef(), Shuffles, LoadOp);
+
+ // Check if we extract only the unmasked elements.
+ if (MaskedIndices.any()) {
+ if (any_of(Shuffles, [&](const auto *Shuffle) {
+ ArrayRef<int> ShuffleMask = Shuffle->getShuffleMask();
+ for (int Idx : ShuffleMask) {
+ if (Idx < 0)
+ continue;
+ if (MaskedIndices.test(unsigned(Idx)))
+ return true;
+ }
+ return false;
+ })) {
+ LLVM_DEBUG(dbgs() << "IA: trying to extract a masked element through "
+ << "shufflevector\n");
+ return false;
+ }
+ }
+ // Check if we extract only the elements within evl.
+ if (auto *VPLoad = dyn_cast<VPIntrinsic>(LoadOp)) {
+ uint64_t EVL = cast<ConstantInt>(VPLoad->getArgOperand(2))->getZExtValue();
+ if (any_of(Shuffles, [&](const auto *Shuffle) {
+ ArrayRef<int> ShuffleMask = Shuffle->getShuffleMask();
+ for (int Idx : ShuffleMask) {
+ if (Idx < 0)
+ continue;
+ if (unsigned(Idx) >= EVL)
+ return true;
+ }
+ return false;
+ })) {
+ LLVM_DEBUG(
+ dbgs() << "IA: trying to extract an element out of EVL range\n");
+ return false;
+ }
+ }
- LLVM_DEBUG(dbgs() << "IA: Found an interleaved load: " << *LI << "\n");
+ LLVM_DEBUG(dbgs() << "IA: Found an interleaved load: " << *LoadOp << "\n");
// Try to create target specific intrinsics to replace the load and shuffles.
- if (!TLI->lowerInterleavedLoad(LI, Shuffles, Indices, Factor)) {
+ if (!TLI->lowerInterleavedLoad(LoadOp, Shuffles, Indices, Factor)) {
// If Extracts is not empty, tryReplaceExtracts made changes earlier.
return !Extracts.empty() || BinOpShuffleChanged;
}
DeadInsts.insert_range(Shuffles);
- DeadInsts.insert(LI);
+ DeadInsts.insert(LoadOp);
return true;
}
bool InterleavedAccessImpl::replaceBinOpShuffles(
ArrayRef<ShuffleVectorInst *> BinOpShuffles,
- SmallVectorImpl<ShuffleVectorInst *> &Shuffles, LoadInst *LI) {
+ SmallVectorImpl<ShuffleVectorInst *> &Shuffles, Instruction *LoadOp) {
for (auto *SVI : BinOpShuffles) {
BinaryOperator *BI = cast<BinaryOperator>(SVI->getOperand(0));
Type *BIOp0Ty = BI->getOperand(0)->getType();
@@ -380,9 +448,9 @@ bool InterleavedAccessImpl::replaceBinOpShuffles(
<< "\n With : " << *NewSVI1 << "\n And : "
<< *NewSVI2 << "\n And : " << *NewBI << "\n");
RecursivelyDeleteTriviallyDeadInstructions(SVI);
- if (NewSVI1->getOperand(0) == LI)
+ if (NewSVI1->getOperand(0) == LoadOp)
Shuffles.push_back(NewSVI1);
- if (NewSVI2->getOperand(0) == LI)
+ if (NewSVI2->getOperand(0) == LoadOp)
Shuffles.push_back(NewSVI2);
}
@@ -454,27 +522,79 @@ bool InterleavedAccessImpl::tryReplaceExtracts(
}
bool InterleavedAccessImpl::lowerInterleavedStore(
- StoreInst *SI, SmallSetVector<Instruction *, 32> &DeadInsts) {
- if (!SI->isSimple())
- return false;
+ Instruction *StoreOp, SmallSetVector<Instruction *, 32> &DeadInsts) {
+ Value *StoredValue;
+ if (auto *SI = dyn_cast<StoreInst>(StoreOp)) {
+ if (!SI->isSimple())
+ return false;
+ StoredValue = SI->getValueOperand();
+ } else if (auto *VPStore = dyn_cast<VPIntrinsic>(StoreOp)) {
+ assert(VPStore->getIntrinsicID() == Intrinsic::vp_store);
+ // Require a constant mask and evl.
+ if (!isa<ConstantVector>(VPStore->getArgOperand(2)) ||
+ !isa<ConstantInt>(VPStore->getArgOperand(3)))
+ return false;
+ StoredValue = VPStore->getArgOperand(0);
+ } else {
+ llvm_unreachable("unsupported store operation");
+ }
- auto *SVI = dyn_cast<ShuffleVectorInst>(SI->getValueOperand());
+ auto *SVI = dyn_cast<ShuffleVectorInst>(StoredValue);
if (!SVI || !SVI->hasOneUse() || isa<ScalableVectorType>(SVI->getType()))
return false;
+ unsigned NumStoredElements =
+ cast<FixedVectorType>(SVI->getType())->getNumElements();
+ // If this is a vp.store, record its mask (NOT shuffle mask).
+ BitVector MaskedIndices(NumStoredElements);
+ if (auto *VPStore = dyn_cast<VPIntrinsic>(StoreOp)) {
+ auto *Mask = cast<ConstantVector>(VPStore->getArgOperand(2));
+ assert(cast<FixedVectorType>(Mask->getType())->getNumElements() ==
+ NumStoredElements);
+ if (auto *Splat = Mask->getSplatValue()) {
+ // All-zeros mask, bail out early.
+ if (Splat->isZeroValue())
+ return false;
+ } else {
+ for (unsigned i = 0U; i < NumStoredElements; ++i) {
+ if (Mask->getAggregateElement(i)->isZeroValue())
+ MaskedIndices.set(i);
+ }
+ }
+ }
+
// Check if the shufflevector is RE-interleave shuffle.
unsigned Factor;
if (!isReInterleaveMask(SVI, Factor, MaxFactor))
return false;
- LLVM_DEBUG(dbgs() << "IA: Found an interleaved store: " << *SI << "\n");
+ // Check if we store only the unmasked elements.
+ if (MaskedIndices.any()) {
+ if (any_of(SVI->getShuffleMask(), [&](int Idx) {
+ return Idx >= 0 && MaskedIndices.test(unsigned(Idx));
+ })) {
+ LLVM_DEBUG(dbgs() << "IA: trying to store a masked element\n");
+ return false;
+ }
+ }
+ // Check if we store only the elements within evl.
+ if (auto *VPStore = dyn_cast<VPIntrinsic>(StoreOp)) {
+ uint64_t EVL = cast<ConstantInt>(VPStore->getArgOperand(3))->getZExtValue();
+ if (any_of(SVI->getShuffleMask(),
+ [&](int Idx) { return Idx >= 0 && unsigned(Idx) >= EVL; })) {
+ LLVM_DEBUG(dbgs() << "IA: trying to store an element out of EVL range\n");
+ return false;
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << "IA: Found an interleaved store: " << *StoreOp << "\n");
// Try to create target specific intrinsics to replace the store and shuffle.
- if (!TLI->lowerInterleavedStore(SI, SVI, Factor))
+ if (!TLI->lowerInterleavedStore(StoreOp, SVI, Factor))
return false;
// Already have a new target specific interleaved store. Erase the old store.
- DeadInsts.insert(SI);
+ DeadInsts.insert(StoreOp);
DeadInsts.insert(SVI);
return true;
}
@@ -766,12 +886,15 @@ bool InterleavedAccessImpl::runOnFunction(Function &F) {
SmallSetVector<Instruction *, 32> DeadInsts;
bool Changed = false;
+ using namespace PatternMatch;
for (auto &I : instructions(F)) {
- if (auto *LI = dyn_cast<LoadInst>(&I))
- Changed |= lowerInterleavedLoad(LI, DeadInsts);
+ if (match(&I, m_CombineOr(m_Load(m_Value()),
+ m_Intrinsic<Intrinsic::vp_load>())))
+ Changed |= lowerInterleavedLoad(&I, DeadInsts);
- if (auto *SI = dyn_cast<StoreInst>(&I))
- Changed |= lowerInterleavedStore(SI, DeadInsts);
+ if (match(&I, m_CombineOr(m_Store(m_Value(), m_Value()),
+ m_Intrinsic<Intrinsic::vp_store>())))
+ Changed |= lowerInterleavedStore(&I, DeadInsts);
if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
// At present, we only have intrinsics to represent (de)interleaving
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e366d7cb54490..d74cc3161684d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17176,7 +17176,7 @@ static Function *getStructuredStoreFunction(Module *M, unsigned Factor,
/// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
/// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
bool AArch64TargetLowering::lowerInterleavedLoad(
- LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
+ Instruction *LoadOp, ArrayRef<ShuffleVectorInst *> Shuffles,
ArrayRef<unsigned> Indices, unsigned Factor) const {
assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
"Invalid interleave factor");
@@ -17184,6 +17184,10 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
assert(Shuffles.size() == Indices.size() &&
"Unmatched number of shufflevectors and indices");
+ auto *LI = dyn_cast<LoadInst>(LoadOp);
+ if (!LI)
+ return false;
+
const DataLayout &DL = LI->getDataLayout();
VectorType *VTy = Shuffles[0]->getType();
@@ -17359,13 +17363,17 @@ bool hasNearbyPairedStore(Iter It, Iter End, Value *Ptr, const DataLayout &DL) {
/// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
/// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
/// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
-bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
+bool AArch64TargetLowering::lowerInterleavedStore(Instruction *StoreOp,
ShuffleVectorInst *SVI,
unsigned Factor) const {
assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
"Invalid interleave factor");
+ auto *SI = dyn_cast<StoreInst>(StoreOp);
+ if (!SI)
+ return false;
+
auto *VecTy = cast<FixedVectorType>(SVI->getType());
assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 0d51ef2be8631..34446abb1474c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -716,11 +716,11 @@ class AArch64TargetLowering : public TargetLowering {
unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
- bool lowerInterleavedLoad(LoadInst *LI,
+ bool lowerInterleavedLoad(Instruction *LoadOp,
ArrayRef<ShuffleVectorInst *> Shuffles,
ArrayRef<unsigned> Indices,
unsigned Factor) const override;
- bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
+ bool lowerInterleavedStore(Instruction *StoreOp, ShuffleVectorInst *SVI,
unsigned Factor) const override;
bool lowerDeinterleaveIntrinsicToLoad(
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 2290ac2728c6d..64d12a0eb1d9b 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -21608,7 +21608,7 @@ unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const {
/// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
/// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
bool ARMTargetLowering::lowerInterleavedLoad(
- LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
+ Instruction *LoadOp, ArrayRef<ShuffleVectorInst *> Shuffles,
ArrayRef<unsigned> Indices, unsigned Factor) const {
assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
"Invalid interleave factor");
@@ -21616,6 +21616,10 @@ bool ARMTargetLowering::lowerInterleavedLoad(
assert(Shuffles.size() == Indices.size() &&
"Unmatched number of shufflevectors and indices");
+ auto *LI = dyn_cast<LoadInst>(LoadOp);
+ if (!LI)
+ return false;
+
auto *VecTy = cast<FixedVectorType>(Shuffles[0]->getType());
Type *EltTy = VecTy->getElementType();
@@ -21750,12 +21754,16 @@ bool ARMTargetLowering::lowerInterleavedLoad(
/// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
/// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
/// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
-bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
+bool ARMTargetLowering::lowerInterleavedStore(Instruction *StoreOp,
ShuffleVectorInst *SVI,
unsigned Factor) const {
assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
"Invalid interleave factor");
+ auto *SI = dyn_cast<StoreInst>(StoreOp);
+ if (!SI)
+ return false;
+
auto *VecTy = cast<FixedVectorType>(SVI->getType());
assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 9fad056edd3f1..635a6cd226936 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -673,11 +673,11 @@ class VectorType;
unsigned getMaxSupportedInterleaveFactor() const override;
- bool lowerInterleavedLoad(LoadInst *LI,
+ bool lowerInterleavedLoad(Instruction *LoadOp,
ArrayRef<ShuffleVectorInst *> Shuffles,
ArrayRef<unsigned> Indices,
unsigned Factor) const override;
- bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
+ bool lowerInterleavedStore(Instruction *StoreOp, ShuffleVectorInst *SVI,
unsigned Factor) const override;
bool shouldInsertFencesForAtomic(const Instruction *I) const override;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f7d192756fd56..9558783963500 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -23376,19 +23376,36 @@ static const Intrinsic::ID FixedVlsegIntrIds[] = {
/// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
/// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
bool RISCVTargetLowering::lowerInterleavedLoad(
- LoadInst *LI, ArrayRef<Sh...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/135445
More information about the llvm-commits
mailing list