[llvm] [llvm] Ensure that soft float targets don't use float/vector code for memops. (PR #107022)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 2 14:41:50 PDT 2024
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {clang-format}-->
:warning: C/C++ code formatter, clang-format found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
git-clang-format --diff 2579b411a13799534c8b8a22246134b88ba7785d 7dc7e9a07a015abcd735e261edfc61f1f04097b8 --extensions h,cpp -- llvm/include/llvm/CodeGen/TargetLowering.h llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp llvm/lib/Target/AArch64/AArch64ISelLowering.cpp llvm/lib/Target/AArch64/AArch64ISelLowering.h llvm/lib/Target/AMDGPU/SIISelLowering.cpp llvm/lib/Target/AMDGPU/SIISelLowering.h llvm/lib/Target/ARM/ARMISelLowering.cpp llvm/lib/Target/ARM/ARMISelLowering.h llvm/lib/Target/BPF/BPFISelLowering.h llvm/lib/Target/Hexagon/HexagonISelLowering.cpp llvm/lib/Target/Hexagon/HexagonISelLowering.h llvm/lib/Target/Mips/MipsISelLowering.cpp llvm/lib/Target/Mips/MipsISelLowering.h llvm/lib/Target/PowerPC/PPCISelLowering.cpp llvm/lib/Target/PowerPC/PPCISelLowering.h llvm/lib/Target/RISCV/RISCVISelLowering.cpp llvm/lib/Target/RISCV/RISCVISelLowering.h llvm/lib/Target/SystemZ/SystemZISelLowering.cpp llvm/lib/Target/SystemZ/SystemZISelLowering.h llvm/lib/Target/X86/X86ISelLowering.h llvm/lib/Target/X86/X86ISelLoweringCall.cpp
``````````
</details>
<details>
<summary>
View the diff from clang-format here.
</summary>
``````````diff
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index fd44223f3d..6b56da2b9e 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -1963,18 +1963,16 @@ public:
/// The PreferIntScalar parameter is a hint from the caller; if true *and* the
/// implementation returns a float or vector type, the caller will discard the
/// result and proceed as if EVT::Other had been returned.
- virtual EVT
- getOptimalMemOpType(const MemOp &Op,
- const AttributeList & /*FuncAttributes*/,
- bool /*PreferIntScalar*/) const {
+ virtual EVT getOptimalMemOpType(const MemOp &Op,
+ const AttributeList & /*FuncAttributes*/,
+ bool /*PreferIntScalar*/) const {
return MVT::Other;
}
/// LLT returning variant.
- virtual LLT
- getOptimalMemOpLLT(const MemOp &Op,
- const AttributeList & /*FuncAttributes*/,
- bool /*PreferIntScalar*/) const {
+ virtual LLT getOptimalMemOpLLT(const MemOp &Op,
+ const AttributeList & /*FuncAttributes*/,
+ bool /*PreferIntScalar*/) const {
return LLT();
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 0dac608595..4a5e52bc3a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -203,8 +203,8 @@ bool TargetLowering::findOptimalMemOpLowering(
Op.getSrcAlign() < Op.getDstAlign())
return false;
- bool WantIntScalar = useSoftFloat() ||
- FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
+ bool WantIntScalar =
+ useSoftFloat() || FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
EVT VT = getOptimalMemOpType(Op, FuncAttributes, WantIntScalar);
// The target may well report supporting float/vector operations to do the
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 0192012b71..77d429be81 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -720,12 +720,10 @@ public:
bool shouldConsiderGEPOffsetSplit() const override;
- EVT getOptimalMemOpType(const MemOp &Op,
- const AttributeList &FuncAttributes,
+ EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
bool PreferIntScalar) const override;
- LLT getOptimalMemOpLLT(const MemOp &Op,
- const AttributeList &FuncAttributes,
+ LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &FuncAttributes,
bool PreferIntScalar) const override;
/// Return true if the addressing mode represented by AM is legal for this
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 5d114cab93..a31b873b9c 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1861,9 +1861,9 @@ bool SITargetLowering::allowsMisalignedMemoryAccesses(
Alignment, Flags, IsFast);
}
-EVT SITargetLowering::getOptimalMemOpType(
- const MemOp &Op, const AttributeList &FuncAttributes,
- bool PreferIntScalar) const {
+EVT SITargetLowering::getOptimalMemOpType(const MemOp &Op,
+ const AttributeList &FuncAttributes,
+ bool PreferIntScalar) const {
// FIXME: Should account for address space here.
// The default fallback uses the private pointer size as a guess for a type to
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index c80bd56a7c..fd58bd1f93 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -344,8 +344,7 @@ public:
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
unsigned *IsFast = nullptr) const override;
- EVT getOptimalMemOpType(const MemOp &Op,
- const AttributeList &FuncAttributes,
+ EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
bool PreferIntScalar) const override;
bool isMemOpUniform(const SDNode *N) const;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index db16339875..9288c9dc3b 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -19189,10 +19189,9 @@ bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
return false;
}
-
-EVT ARMTargetLowering::getOptimalMemOpType(
- const MemOp &Op, const AttributeList &FuncAttributes,
- bool PreferIntScalar) const {
+EVT ARMTargetLowering::getOptimalMemOpType(const MemOp &Op,
+ const AttributeList &FuncAttributes,
+ bool PreferIntScalar) const {
// See if we can use NEON instructions for this...
if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON()) {
unsigned Fast;
diff --git a/llvm/lib/Target/BPF/BPFISelLowering.h b/llvm/lib/Target/BPF/BPFISelLowering.h
index 85b969a3ed..b4dec3593f 100644
--- a/llvm/lib/Target/BPF/BPFISelLowering.h
+++ b/llvm/lib/Target/BPF/BPFISelLowering.h
@@ -113,8 +113,7 @@ private:
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const override;
- EVT getOptimalMemOpType(const MemOp &Op,
- const AttributeList &FuncAttributes,
+ EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
bool PreferIntScalar) const override {
return Op.size() >= 8 ? MVT::i64 : MVT::i32;
}
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index a568d67c5f..8a4840bf02 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -325,8 +325,7 @@ public:
/// the immediate into a register.
bool isLegalICmpImmediate(int64_t Imm) const override;
- EVT getOptimalMemOpType(const MemOp &Op,
- const AttributeList &FuncAttributes,
+ EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
bool PreferIntScalar) const override;
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 17b26178f7..353fc07717 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -17399,9 +17399,9 @@ bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
-EVT PPCTargetLowering::getOptimalMemOpType(
- const MemOp &Op, const AttributeList &FuncAttributes,
- bool PreferIntScalar) const {
+EVT PPCTargetLowering::getOptimalMemOpType(const MemOp &Op,
+ const AttributeList &FuncAttributes,
+ bool PreferIntScalar) const {
if (getTargetMachine().getOptLevel() != CodeGenOptLevel::None &&
!PreferIntScalar) {
// We should use Altivec/VSX loads and stores when available. For unaligned
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index fdbf202946..98d81beb16 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -21045,10 +21045,9 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
return Subtarget.enableUnalignedVectorMem();
}
-
-EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op,
- const AttributeList &FuncAttributes,
- bool PreferIntScalar) const {
+EVT RISCVTargetLowering::getOptimalMemOpType(
+ const MemOp &Op, const AttributeList &FuncAttributes,
+ bool PreferIntScalar) const {
if (!Subtarget.hasVInstructions())
return MVT::Other;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 5a99d76419..581a0964e8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -782,8 +782,7 @@ public:
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
unsigned *Fast = nullptr) const override;
- EVT getOptimalMemOpType(const MemOp &Op,
- const AttributeList &FuncAttributes,
+ EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
bool PreferIntScalar) const override;
bool splitValueIntoRegisterParts(
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index a5b0dc2d8c..bcd7238b11 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -1132,9 +1132,9 @@ bool SystemZTargetLowering::findOptimalMemOpLowering(
SrcAS, FuncAttributes);
}
-EVT SystemZTargetLowering::getOptimalMemOpType(const MemOp &Op,
- const AttributeList &FuncAttributes,
- bool PreferIntScalar) const {
+EVT SystemZTargetLowering::getOptimalMemOpType(
+ const MemOp &Op, const AttributeList &FuncAttributes,
+ bool PreferIntScalar) const {
return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
}
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 0046078f82..1f512d0272 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -493,8 +493,7 @@ public:
findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
const MemOp &Op, unsigned DstAS, unsigned SrcAS,
const AttributeList &FuncAttributes) const override;
- EVT getOptimalMemOpType(const MemOp &Op,
- const AttributeList &FuncAttributes,
+ EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
bool PreferIntScalar) const override;
bool isTruncateFree(Type *, Type *) const override;
bool isTruncateFree(EVT, EVT) const override;
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 75a8fd2407..b66ca190fc 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -279,9 +279,9 @@ uint64_t X86TargetLowering::getByValTypeAlignment(Type *Ty,
/// target-independent logic.
/// For vector ops we check that the overall size isn't larger than our
/// preferred vector width.
-EVT X86TargetLowering::getOptimalMemOpType(
- const MemOp &Op, const AttributeList &FuncAttributes,
- bool PreferIntScalar) const {
+EVT X86TargetLowering::getOptimalMemOpType(const MemOp &Op,
+ const AttributeList &FuncAttributes,
+ bool PreferIntScalar) const {
if (!PreferIntScalar) {
if (Op.size() >= 16 &&
(!Subtarget.isUnalignedMem16Slow() || Op.isAligned(Align(16)))) {
``````````
</details>
https://github.com/llvm/llvm-project/pull/107022
More information about the llvm-commits
mailing list