[llvm] [IntrinsicInst] Remove MemCpyInlineInst and MemSetInlineInst [nfc] (PR #138568)
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Mon May 5 12:45:05 PDT 2025
https://github.com/preames created https://github.com/llvm/llvm-project/pull/138568
I'm looking for ways to simplify the Mem*Inst class structure, and these two seem to have fairly minimal justification, so let's remove them.
>From af2703e0f234388ed64892a8c1bfbdc347db7f53 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Mon, 5 May 2025 11:18:16 -0700
Subject: [PATCH] [IntrinsicInst] Remove MemCpyInlineInst and MemSetInlineInst
[nfc]
I'm looking for ways to simplify the Mem*Inst class structure, and these
two seem to have fairly minimal justification, so let's remove them.
---
llvm/include/llvm/IR/InstVisitor.h | 10 +++---
llvm/include/llvm/IR/IntrinsicInst.h | 34 ++++++-------------
llvm/lib/Analysis/Lint.cpp | 10 ++----
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 4 ---
llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp | 4 +--
.../SelectionDAG/SelectionDAGBuilder.cpp | 30 +++++-----------
llvm/lib/IR/IRBuilder.cpp | 2 +-
.../Transforms/Scalar/InferAddressSpaces.cpp | 22 ++++++------
.../Transforms/Scalar/LoopIdiomRecognize.cpp | 4 +--
.../lib/Transforms/Scalar/MemCpyOptimizer.cpp | 27 ++++++++-------
10 files changed, 55 insertions(+), 92 deletions(-)
diff --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index 5fc6fbfd0f28e..b4eb729c7ce38 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -207,12 +207,10 @@ class InstVisitor {
RetTy visitDbgLabelInst(DbgLabelInst &I) { DELEGATE(DbgInfoIntrinsic);}
RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I){ DELEGATE(IntrinsicInst); }
RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); }
- RetTy visitMemSetInlineInst(MemSetInlineInst &I){ DELEGATE(MemSetInst); }
RetTy visitMemSetPatternInst(MemSetPatternInst &I) {
DELEGATE(IntrinsicInst);
}
RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); }
- RetTy visitMemCpyInlineInst(MemCpyInlineInst &I){ DELEGATE(MemCpyInst); }
RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); }
RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); }
@@ -291,13 +289,13 @@ class InstVisitor {
case Intrinsic::dbg_declare: DELEGATE(DbgDeclareInst);
case Intrinsic::dbg_value: DELEGATE(DbgValueInst);
case Intrinsic::dbg_label: DELEGATE(DbgLabelInst);
- case Intrinsic::memcpy: DELEGATE(MemCpyInst);
+ case Intrinsic::memcpy:
case Intrinsic::memcpy_inline:
- DELEGATE(MemCpyInlineInst);
+ DELEGATE(MemCpyInst);
case Intrinsic::memmove: DELEGATE(MemMoveInst);
- case Intrinsic::memset: DELEGATE(MemSetInst);
+ case Intrinsic::memset:
case Intrinsic::memset_inline:
- DELEGATE(MemSetInlineInst);
+ DELEGATE(MemSetInst);
case Intrinsic::experimental_memset_pattern:
DELEGATE(MemSetPatternInst);
case Intrinsic::vastart: DELEGATE(VAStartInst);
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 93750d6e3845e..48b3067266125 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1215,6 +1215,16 @@ class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
void setVolatile(Constant *V) { setArgOperand(ARG_VOLATILE, V); }
+ bool isForceInlined() const {
+ switch (getIntrinsicID()) {
+ case Intrinsic::memset_inline:
+ case Intrinsic::memcpy_inline:
+ return true;
+ default:
+ return false;
+ }
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
@@ -1251,18 +1261,6 @@ class MemSetInst : public MemSetBase<MemIntrinsic> {
}
};
-/// This class wraps the llvm.memset.inline intrinsic.
-class MemSetInlineInst : public MemSetInst {
-public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memset_inline;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
/// This is the base class for llvm.experimental.memset.pattern
class MemSetPatternIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
private:
@@ -1342,18 +1340,6 @@ class MemMoveInst : public MemTransferInst {
}
};
-/// This class wraps the llvm.memcpy.inline intrinsic.
-class MemCpyInlineInst : public MemCpyInst {
-public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memcpy_inline;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
// The common base class for any memset/memmove/memcpy intrinsics;
// whether they be atomic or non-atomic.
// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index f05e36e2025d4..7e540ea907893 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -344,19 +344,13 @@ void Lint::visitCallBase(CallBase &I) {
MMI->getSourceAlign(), nullptr, MemRef::Read);
break;
}
- case Intrinsic::memset: {
+ case Intrinsic::memset:
+ case Intrinsic::memset_inline: {
MemSetInst *MSI = cast<MemSetInst>(&I);
visitMemoryReference(I, MemoryLocation::getForDest(MSI),
MSI->getDestAlign(), nullptr, MemRef::Write);
break;
}
- case Intrinsic::memset_inline: {
- MemSetInlineInst *MSII = cast<MemSetInlineInst>(&I);
- visitMemoryReference(I, MemoryLocation::getForDest(MSII),
- MSII->getDestAlign(), nullptr, MemRef::Write);
- break;
- }
-
case Intrinsic::vastart:
// vastart in non-varargs function is rejected by the verifier
visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 908524313030f..b453af1b7a641 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1734,10 +1734,6 @@ bool IRTranslator::translateMemFunc(const CallInst &CI,
DstAlign = MCI->getDestAlign().valueOrOne();
SrcAlign = MCI->getSourceAlign().valueOrOne();
CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
- } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
- DstAlign = MCI->getDestAlign().valueOrOne();
- SrcAlign = MCI->getSourceAlign().valueOrOne();
- CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
} else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
DstAlign = MMI->getDestAlign().valueOrOne();
SrcAlign = MMI->getSourceAlign().valueOrOne();
diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 9dc1764b49e46..1c2912358dcb3 100644
--- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -319,7 +319,7 @@ bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function &F) const {
// Only expand llvm.memcpy.inline with non-constant length in this
// codepath, leaving the current SelectionDAG expansion for constant
// length memcpy intrinsics undisturbed.
- auto *Memcpy = cast<MemCpyInlineInst>(Inst);
+ auto *Memcpy = cast<MemCpyInst>(Inst);
if (isa<ConstantInt>(Memcpy->getLength()))
break;
@@ -367,7 +367,7 @@ bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function &F) const {
// Only expand llvm.memset.inline with non-constant length in this
// codepath, leaving the current SelectionDAG expansion for constant
// length memset intrinsics undisturbed.
- auto *Memset = cast<MemSetInlineInst>(Inst);
+ auto *Memset = cast<MemSetInst>(Inst);
if (isa<ConstantInt>(Memset->getLength()))
break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 881b1536a131f..9c0d0cd663f9d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6483,7 +6483,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memcpy_inline: {
- const auto &MCI = cast<MemCpyInlineInst>(I);
+ const auto &MCI = cast<MemCpyInst>(I);
SDValue Dst = getValue(I.getArgOperand(0));
SDValue Src = getValue(I.getArgOperand(1));
SDValue Size = getValue(I.getArgOperand(2));
@@ -6503,35 +6503,21 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
updateDAGForMaybeTailCall(MC);
return;
}
- case Intrinsic::memset: {
- const auto &MSI = cast<MemSetInst>(I);
- SDValue Op1 = getValue(I.getArgOperand(0));
- SDValue Op2 = getValue(I.getArgOperand(1));
- SDValue Op3 = getValue(I.getArgOperand(2));
- // @llvm.memset defines 0 and 1 to both mean no alignment.
- Align Alignment = MSI.getDestAlign().valueOrOne();
- bool isVol = MSI.isVolatile();
- SDValue Root = isVol ? getRoot() : getMemoryRoot();
- SDValue MS = DAG.getMemset(
- Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
- &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
- updateDAGForMaybeTailCall(MS);
- return;
- }
+ case Intrinsic::memset:
case Intrinsic::memset_inline: {
- const auto &MSII = cast<MemSetInlineInst>(I);
+ const auto &MSII = cast<MemSetInst>(I);
SDValue Dst = getValue(I.getArgOperand(0));
SDValue Value = getValue(I.getArgOperand(1));
SDValue Size = getValue(I.getArgOperand(2));
- assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
+ assert((!MSII.isForceInlined() || isa<ConstantSDNode>(Size)) &&
+ "memset_inline needs constant size");
// @llvm.memset defines 0 and 1 to both mean no alignment.
Align DstAlign = MSII.getDestAlign().valueOrOne();
bool isVol = MSII.isVolatile();
SDValue Root = isVol ? getRoot() : getMemoryRoot();
- SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
- /* AlwaysInline */ true, &I,
- MachinePointerInfo(I.getArgOperand(0)),
- I.getAAMetadata());
+ SDValue MC = DAG.getMemset(
+ Root, sdl, Dst, Value, Size, DstAlign, isVol, MSII.isForceInlined(),
+ &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
updateDAGForMaybeTailCall(MC);
return;
}
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index b448c0372eb0e..089bd997bc058 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -203,7 +203,7 @@ CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign,
CallInst *CI = CreateIntrinsic(Intrinsic::memset_inline, Tys, Ops);
if (DstAlign)
- cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign);
+ cast<MemSetInst>(CI)->setDestAlignment(*DstAlign);
// Set the TBAA info if present.
if (TBAATag)
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index b65a08be75640..0087d037f8cf2 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -1142,18 +1142,18 @@ static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV,
if (Dest == OldV)
Dest = NewV;
- if (isa<MemCpyInlineInst>(MTI)) {
+ if (auto *MCI = dyn_cast<MemCpyInst>(MTI)) {
MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
- B.CreateMemCpyInline(Dest, MTI->getDestAlign(), Src,
- MTI->getSourceAlign(), MTI->getLength(),
- false, // isVolatile
- TBAA, TBAAStruct, ScopeMD, NoAliasMD);
- } else if (isa<MemCpyInst>(MTI)) {
- MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
- B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
- MTI->getLength(),
- false, // isVolatile
- TBAA, TBAAStruct, ScopeMD, NoAliasMD);
+ if (MCI->isForceInlined())
+ B.CreateMemCpyInline(Dest, MTI->getDestAlign(), Src,
+ MTI->getSourceAlign(), MTI->getLength(),
+ false, // isVolatile
+ TBAA, TBAAStruct, ScopeMD, NoAliasMD);
+ else
+ B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
+ MTI->getLength(),
+ false, // isVolatile
+ TBAA, TBAAStruct, ScopeMD, NoAliasMD);
} else {
assert(isa<MemMoveInst>(MTI));
B.CreateMemMove(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index c9942b29a6f43..9c41c2798418c 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -780,7 +780,7 @@ bool LoopIdiomRecognize::processLoopMemCpy(MemCpyInst *MCI,
return false;
// If we're not allowed to hack on memcpy, we fail.
- if ((!HasMemcpy && !isa<MemCpyInlineInst>(MCI)) || DisableLIRP::Memcpy)
+ if ((!HasMemcpy && !MCI->isForceInlined()) || DisableLIRP::Memcpy)
return false;
Value *Dest = MCI->getDest();
@@ -1267,7 +1267,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
// FIXME: until llvm.memcpy.inline supports dynamic sizes, we need to
// conservatively bail here, since otherwise we may have to transform
// llvm.memcpy.inline into llvm.memcpy which is illegal.
- if (isa<MemCpyInlineInst>(TheStore))
+ if (auto *MCI = dyn_cast<MemCpyInst>(TheStore); MCI && MCI->isForceInlined())
return false;
// The trip count of the loop and the base pointer of the addrec SCEV is
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index d8884d34c7bb9..9bb5eb577303a 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -1212,7 +1212,7 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
// Don't convert llvm.memcpy.inline into memmove because memmove can be
// lowered as a call, and that is not allowed for llvm.memcpy.inline (and
// there is no inline version of llvm.memmove)
- if (isa<MemCpyInlineInst>(M))
+ if (auto *MCI = dyn_cast<MemCpyInst>(M); MCI && MCI->isForceInlined())
return false;
UseMemMove = true;
}
@@ -1229,17 +1229,20 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
NewM =
Builder.CreateMemMove(M->getDest(), M->getDestAlign(), CopySource,
CopySourceAlign, M->getLength(), M->isVolatile());
- else if (isa<MemCpyInlineInst>(M)) {
- // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
- // never allowed since that would allow the latter to be lowered as a call
- // to an external function.
- NewM = Builder.CreateMemCpyInline(M->getDest(), M->getDestAlign(),
- CopySource, CopySourceAlign,
- M->getLength(), M->isVolatile());
- } else
- NewM =
- Builder.CreateMemCpy(M->getDest(), M->getDestAlign(), CopySource,
- CopySourceAlign, M->getLength(), M->isVolatile());
+ else if (auto *MCI = dyn_cast<MemCpyInst>(M)) {
+ if (MCI->isForceInlined())
+ // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
+ // never allowed since that would allow the latter to be lowered as a call
+ // to an external function.
+ NewM = Builder.CreateMemCpyInline(M->getDest(), M->getDestAlign(),
+ CopySource, CopySourceAlign,
+ M->getLength(), M->isVolatile());
+ else
+ NewM = Builder.CreateMemCpy(M->getDest(), M->getDestAlign(), CopySource,
+ CopySourceAlign, M->getLength(),
+ M->isVolatile());
+ }
+
NewM->copyMetadata(*M, LLVMContext::MD_DIAssignID);
assert(isa<MemoryDef>(MSSA->getMemoryAccess(M)));
More information about the llvm-commits
mailing list