[llvm-branch-commits] [llvm] [CodeGen] Refactor targets to override the new getTgtMemIntrinsic overload (NFC) (PR #175844)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Jan 13 13:48:00 PST 2026
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Nicolai Hähnle (nhaehnle)
<details>
<summary>Changes</summary>
This is a fairly mechanical change. Instead of returning true/false,
we either keep the Infos vector empty or push one entry.
---
**Stack**:
- [4/4] #<!-- -->175846
- [3/4] #<!-- -->175845
- [2/4] #<!-- -->175844 ⬅
- [1/4] #<!-- -->175843
⚠️ *Part of a stack created by [spr](https://github.com/nhaehnle/spr). Merging this PR using the GitHub UI may have unexpected results.*
---
Patch is 74.14 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/175844.diff
22 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+36-22)
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.h (+2-2)
- (modified) llvm/lib/Target/AMDGPU/SIISelLowering.cpp (+40-21)
- (modified) llvm/lib/Target/AMDGPU/SIISelLowering.h (+1-1)
- (modified) llvm/lib/Target/ARM/ARMISelLowering.cpp (+36-22)
- (modified) llvm/lib/Target/ARM/ARMISelLowering.h (+2-2)
- (modified) llvm/lib/Target/Hexagon/HexagonISelLowering.cpp (+11-12)
- (modified) llvm/lib/Target/Hexagon/HexagonISelLowering.h (+2-2)
- (modified) llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp (+9-7)
- (modified) llvm/lib/Target/LoongArch/LoongArchISelLowering.h (+2-2)
- (modified) llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp (+99-53)
- (modified) llvm/lib/Target/NVPTX/NVPTXISelLowering.h (+2-2)
- (modified) llvm/lib/Target/PowerPC/PPCISelLowering.cpp (+16-12)
- (modified) llvm/lib/Target/PowerPC/PPCISelLowering.h (+2-2)
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+78-59)
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.h (+2-2)
- (modified) llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp (+6-7)
- (modified) llvm/lib/Target/SPIRV/SPIRVISelLowering.h (+2-2)
- (modified) llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp (+15-10)
- (modified) llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h (+2-2)
- (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+26-18)
- (modified) llvm/lib/Target/X86/X86ISelLowering.h (+6-6)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index a7ac558182228..2fba4592c8525 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17362,7 +17362,7 @@ SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op,
/// Set the IntrinsicInfo for the `aarch64_sve_st<N>` intrinsics.
template <unsigned NumVecs>
-static bool
+static void
setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL,
AArch64TargetLowering::IntrinsicInfo &Info, const CallBase &CI) {
Info.opc = ISD::INTRINSIC_VOID;
@@ -17382,24 +17382,29 @@ setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL,
Info.offset = 0;
Info.align.reset();
Info.flags = MachineMemOperand::MOStore;
- return true;
}
/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
/// specified in the intrinsic calls.
-bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
- const CallBase &I,
- MachineFunction &MF,
- unsigned Intrinsic) const {
+void AArch64TargetLowering::getTgtMemIntrinsic(
+ SmallVectorImpl<IntrinsicInfo> &Infos, const CallBase &I,
+ MachineFunction &MF, unsigned Intrinsic) const {
+ IntrinsicInfo Info;
auto &DL = I.getDataLayout();
switch (Intrinsic) {
case Intrinsic::aarch64_sve_st2:
- return setInfoSVEStN<2>(*this, DL, Info, I);
+ setInfoSVEStN<2>(*this, DL, Info, I);
+ Infos.push_back(Info);
+ return;
case Intrinsic::aarch64_sve_st3:
- return setInfoSVEStN<3>(*this, DL, Info, I);
+ setInfoSVEStN<3>(*this, DL, Info, I);
+ Infos.push_back(Info);
+ return;
case Intrinsic::aarch64_sve_st4:
- return setInfoSVEStN<4>(*this, DL, Info, I);
+ setInfoSVEStN<4>(*this, DL, Info, I);
+ Infos.push_back(Info);
+ return;
case Intrinsic::aarch64_neon_ld2:
case Intrinsic::aarch64_neon_ld3:
case Intrinsic::aarch64_neon_ld4:
@@ -17414,7 +17419,8 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align.reset();
// volatile loads with NEON intrinsics not supported
Info.flags = MachineMemOperand::MOLoad;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::aarch64_neon_ld2lane:
case Intrinsic::aarch64_neon_ld3lane:
@@ -17435,7 +17441,8 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align.reset();
// volatile loads with NEON intrinsics not supported
Info.flags = MachineMemOperand::MOLoad;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::aarch64_neon_st2:
case Intrinsic::aarch64_neon_st3:
@@ -17457,7 +17464,8 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align.reset();
// volatile stores with NEON intrinsics not supported
Info.flags = MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::aarch64_neon_st2lane:
case Intrinsic::aarch64_neon_st3lane:
@@ -17481,7 +17489,8 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align.reset();
// volatile stores with NEON intrinsics not supported
Info.flags = MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::aarch64_ldaxr:
case Intrinsic::aarch64_ldxr: {
@@ -17492,7 +17501,8 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.offset = 0;
Info.align = DL.getABITypeAlign(ValTy);
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::aarch64_stlxr:
case Intrinsic::aarch64_stxr: {
@@ -17503,7 +17513,8 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.offset = 0;
Info.align = DL.getABITypeAlign(ValTy);
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::aarch64_ldaxp:
case Intrinsic::aarch64_ldxp:
@@ -17513,7 +17524,8 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.offset = 0;
Info.align = Align(16);
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
- return true;
+ Infos.push_back(Info);
+ return;
case Intrinsic::aarch64_stlxp:
case Intrinsic::aarch64_stxp:
Info.opc = ISD::INTRINSIC_W_CHAIN;
@@ -17522,7 +17534,8 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.offset = 0;
Info.align = Align(16);
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
- return true;
+ Infos.push_back(Info);
+ return;
case Intrinsic::aarch64_sve_ldnt1: {
Type *ElTy = cast<VectorType>(I.getType())->getElementType();
Info.opc = ISD::INTRINSIC_W_CHAIN;
@@ -17531,7 +17544,8 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.offset = 0;
Info.align = DL.getABITypeAlign(ElTy);
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MONonTemporal;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::aarch64_sve_stnt1: {
Type *ElTy =
@@ -17542,7 +17556,8 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.offset = 0;
Info.align = DL.getABITypeAlign(ElTy);
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MONonTemporal;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::aarch64_mops_memset_tag: {
Value *Dst = I.getArgOperand(0);
@@ -17555,13 +17570,12 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.flags = MachineMemOperand::MOStore;
// The size of the memory being operated on is unknown at this point
Info.size = MemoryLocation::UnknownSize;
- return true;
+ Infos.push_back(Info);
+ return;
}
default:
break;
}
-
- return false;
}
bool AArch64TargetLowering::shouldReduceLoadWidth(
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 258651261fd62..db47de77bd39b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -206,8 +206,8 @@ class AArch64TargetLowering : public TargetLowering {
EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const override;
- bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I,
- MachineFunction &MF,
+ void getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
+ const CallBase &I, MachineFunction &MF,
unsigned Intrinsic) const override;
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ed5988ee6efc3..d21d2e05469ea 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1320,10 +1320,11 @@ static void getCoopAtomicOperandsInfo(const CallBase &CI, bool IsLoad,
Info.ssid = CI.getContext().getOrInsertSyncScopeID(Scope);
}
-bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
+void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
const CallBase &CI,
MachineFunction &MF,
unsigned IntrID) const {
+ IntrinsicInfo Info;
Info.flags = MachineMemOperand::MONone;
if (CI.hasMetadata(LLVMContext::MD_invariant_load))
Info.flags |= MachineMemOperand::MOInvariant;
@@ -1337,7 +1338,7 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Intrinsic::getFnAttributes(CI.getContext(), (Intrinsic::ID)IntrID);
MemoryEffects ME = Attr.getMemoryEffects();
if (ME.doesNotAccessMemory())
- return false;
+ return;
// TODO: Should images get their own address space?
Info.fallbackAddressSpace = AMDGPUAS::BUFFER_RESOURCE;
@@ -1433,7 +1434,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
unsigned Width = cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue();
Info.memVT = EVT::getIntegerVT(CI.getContext(), Width * 8);
Info.ptrVal = CI.getArgOperand(1);
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_raw_atomic_buffer_load:
case Intrinsic::amdgcn_raw_ptr_atomic_buffer_load:
@@ -1443,11 +1445,13 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
memVTFromLoadIntrReturn(*this, MF.getDataLayout(), CI.getType(),
std::numeric_limits<unsigned>::max());
Info.flags &= ~MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
}
}
- return true;
+ Infos.push_back(Info);
+ return;
}
switch (IntrID) {
@@ -1463,7 +1467,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
if (!Vol->isZero())
Info.flags |= MachineMemOperand::MOVolatile;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_ds_add_gs_reg_rtn:
case Intrinsic::amdgcn_ds_sub_gs_reg_rtn: {
@@ -1472,7 +1477,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.ptrVal = nullptr;
Info.fallbackAddressSpace = AMDGPUAS::STREAMOUT_REGISTER;
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_ds_append:
case Intrinsic::amdgcn_ds_consume: {
@@ -1486,7 +1492,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
if (!Vol->isZero())
Info.flags |= MachineMemOperand::MOVolatile;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_ds_atomic_async_barrier_arrive_b64:
case Intrinsic::amdgcn_ds_atomic_barrier_arrive_rtn_b64: {
@@ -1499,7 +1506,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.size = 8;
Info.align.reset();
Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_image_bvh_dual_intersect_ray:
case Intrinsic::amdgcn_image_bvh_intersect_ray:
@@ -1515,7 +1523,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align.reset();
Info.flags |=
MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_global_atomic_fmin_num:
case Intrinsic::amdgcn_global_atomic_fmax_num:
@@ -1529,7 +1538,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOVolatile;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_flat_load_monitor_b32:
case Intrinsic::amdgcn_flat_load_monitor_b64:
@@ -1557,7 +1567,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.ptrVal = CI.getOperand(0);
Info.align.reset();
Info.flags |= MachineMemOperand::MOLoad;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
@@ -1567,7 +1578,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.ptrVal = CI.getOperand(0);
Info.align.reset();
getCoopAtomicOperandsInfo(CI, /*IsLoad=*/true, Info);
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
@@ -1577,7 +1589,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.ptrVal = CI.getArgOperand(0);
Info.align.reset();
getCoopAtomicOperandsInfo(CI, /*IsLoad=*/false, Info);
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_ds_gws_init:
case Intrinsic::amdgcn_ds_gws_barrier:
@@ -1602,7 +1615,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.flags |= MachineMemOperand::MOLoad;
else
Info.flags |= MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_global_load_async_to_lds_b8:
case Intrinsic::amdgcn_global_load_async_to_lds_b32:
@@ -1616,7 +1630,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
Info.ptrVal = CI.getArgOperand(1);
Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_global_store_async_from_lds_b8:
case Intrinsic::amdgcn_global_store_async_from_lds_b32:
@@ -1626,7 +1641,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
Info.ptrVal = CI.getArgOperand(0);
Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_load_to_lds:
case Intrinsic::amdgcn_global_load_lds: {
@@ -1638,7 +1654,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
auto *Aux = cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1));
if (Aux->getZExtValue() & AMDGPU::CPol::VOLATILE)
Info.flags |= MachineMemOperand::MOVolatile;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_ds_bvh_stack_rtn:
case Intrinsic::amdgcn_ds_bvh_stack_push4_pop1_rtn:
@@ -1658,7 +1675,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align = Align(4);
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::amdgcn_s_prefetch_data:
case Intrinsic::amdgcn_flat_prefetch:
@@ -1667,10 +1685,11 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.memVT = EVT::getIntegerVT(CI.getContext(), 8);
Info.ptrVal = CI.getArgOperand(0);
Info.flags |= MachineMemOperand::MOLoad;
- return true;
+ Infos.push_back(Info);
+ return;
}
default:
- return false;
+ return;
}
}
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index e82f4528fcd09..c4020bdc7655c 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -336,7 +336,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
MVT getPointerTy(const DataLayout &DL, unsigned AS) const override;
MVT getPointerMemTy(const DataLayout &DL, unsigned AS) const override;
- bool getTgtMemIntrinsic(IntrinsicInfo &, const CallBase &,
+ void getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &, const CallBase &,
MachineFunction &MF,
unsigned IntrinsicID) const override;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 4fd845fbc07ac..0eaea492272e6 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -20680,10 +20680,10 @@ bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
/// specified in the intrinsic calls.
-bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
- const CallBase &I,
- MachineFunction &MF,
- unsigned Intrinsic) const {
+void ARMTargetLowering::getTgtMemIntrinsic(
+ SmallVectorImpl<IntrinsicInfo> &Infos, const CallBase &I,
+ MachineFunction &MF, unsigned Intrinsic) const {
+ IntrinsicInfo Info;
switch (Intrinsic) {
case Intrinsic::arm_neon_vld1:
case Intrinsic::arm_neon_vld2:
@@ -20706,7 +20706,8 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
// volatile loads with NEON intrinsics not supported
Info.flags = MachineMemOperand::MOLoad;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::arm_neon_vld1x2:
case Intrinsic::arm_neon_vld1x3:
@@ -20721,7 +20722,8 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align = I.getParamAlign(I.arg_size() - 1).valueOrOne();
// volatile loads with NEON intrinsics not supported
Info.flags = MachineMemOperand::MOLoad;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::arm_neon_vst1:
case Intrinsic::arm_neon_vst2:
@@ -20747,7 +20749,8 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
// volatile stores with NEON intrinsics not supported
Info.flags = MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::arm_neon_vst1x2:
case Intrinsic::arm_neon_vst1x3:
@@ -20768,7 +20771,8 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align = I.getParamAlign(0).valueOrOne();
// volatile stores with NEON intrinsics not supported
Info.flags = MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::arm_mve_vld2q:
case Intrinsic::arm_mve_vld4q: {
@@ -20782,7 +20786,8 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align = Align(VecTy->getScalarSizeInBits() / 8);
// volatile loads with MVE intrinsics not supported
Info.flags = MachineMemOperand::MOLoad;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::arm_mve_vst2q:
case Intrinsic::arm_mve_vst4q: {
@@ -20796,7 +20801,8 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align = Align(VecTy->getScalarSizeInBits() / 8);
// volatile stores with MVE intrinsics not supported
Info.flags = MachineMemOperand::MOStore;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::arm_mve_vldr_gather_base:
case Intrinsic::arm_mve_vldr_gather_base_predicated: {
@@ -20805,7 +20811,8 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.memVT = MVT::getVT(I.getType());
Info.align = Align(1);
Info.flags |= MachineMemOperand::MOLoad;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::arm_mve_vldr_gather_base_wb:
case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: {
@@ -20814,7 +20821,8 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.memVT = MVT::getVT(I.getType()->getContainedType(0));
Info.align = Align(1);
Info.flags |= MachineMemOperand::MOLoad;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::arm_mve_vldr_gather_offset:
case Intrinsic::arm_mve_vldr_gather_offset_predicated: {
@@ -20826,7 +20834,8 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
DataVT.getVectorNumElements());
Info.align = Align(1);
Info.flags |= MachineMemOperand::MOLoad;
- return true;
+ Infos.push_back(Info);
+ return;
}
case Intrinsic::arm_mve_vstr_scatter_base:
case Intrinsic::arm_mve_vstr_scatter_base_predicated:...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/175844
More information about the llvm-branch-commits
mailing list