[llvm] 75384ec - [InstSimplify] Refactor invariant.group load folding
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 25 01:56:31 PDT 2021
Author: Nikita Popov
Date: 2021-10-25T10:56:25+02:00
New Revision: 75384ecdf8049b6eb8934790f1239c0c6c0a2502
URL: https://github.com/llvm/llvm-project/commit/75384ecdf8049b6eb8934790f1239c0c6c0a2502
DIFF: https://github.com/llvm/llvm-project/commit/75384ecdf8049b6eb8934790f1239c0c6c0a2502.diff
LOG: [InstSimplify] Refactor invariant.group load folding
Currently strip.invariant/launder.invariant are handled by
constructing constant expressions with the intrinsics skipped.
This takes an alternative approach of accumulating the offset
using stripAndAccumulateConstantOffsets(), with a flag to look
through invariant.group intrinsics.
Differential Revision: https://reviews.llvm.org/D112382
Added:
Modified:
llvm/include/llvm/IR/Value.h
llvm/lib/Analysis/InstructionSimplify.cpp
llvm/lib/IR/Value.cpp
llvm/lib/Transforms/IPO/AttributorAttributes.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h
index 515cfc7c1fa9b..fc2ed00d770f9 100644
--- a/llvm/include/llvm/IR/Value.h
+++ b/llvm/include/llvm/IR/Value.h
@@ -693,6 +693,9 @@ class Value {
/// If \p AllowNonInbounds is true, offsets in GEPs are stripped and
/// accumulated even if the GEP is not "inbounds".
///
+ /// If \p AllowInvariantGroup is true then this method also looks through
+ /// strip.invariant.group and launder.invariant.group intrinsics.
+ ///
/// If \p ExternalAnalysis is provided it will be used to calculate a offset
/// when a operand of GEP is not constant.
/// For example, for a value \p ExternalAnalysis might try to calculate a
@@ -708,13 +711,15 @@ class Value {
/// is unchanged.
const Value *stripAndAccumulateConstantOffsets(
const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
+ bool AllowInvariantGroup = false,
function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
nullptr) const;
Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
- bool AllowNonInbounds) {
+ bool AllowNonInbounds,
+ bool AllowInvariantGroup = false) {
return const_cast<Value *>(
static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
- DL, Offset, AllowNonInbounds));
+ DL, Offset, AllowNonInbounds, AllowInvariantGroup));
}
/// This is a wrapper around stripAndAccumulateConstantOffsets with the
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index e67ba55d395e9..6a27dd7a74dde 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -6089,73 +6089,27 @@ Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
return ::SimplifyFreezeInst(Op0, Q);
}
-static Constant *ConstructLoadOperandConstant(Value *Op) {
- SmallVector<Value *, 4> Worklist;
- // Invalid IR in unreachable code may contain self-referential values. Don't infinitely loop.
- SmallPtrSet<Value *, 4> Visited;
- Worklist.push_back(Op);
- while (true) {
- Value *CurOp = Worklist.back();
- if (!Visited.insert(CurOp).second)
- return nullptr;
- if (isa<Constant>(CurOp))
- break;
- if (auto *BC = dyn_cast<BitCastOperator>(CurOp)) {
- Worklist.push_back(BC->getOperand(0));
- } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
- for (unsigned I = 1; I != GEP->getNumOperands(); ++I) {
- if (!isa<Constant>(GEP->getOperand(I)))
- return nullptr;
- }
- Worklist.push_back(GEP->getOperand(0));
- } else if (auto *II = dyn_cast<IntrinsicInst>(CurOp)) {
- if (II->isLaunderOrStripInvariantGroup())
- Worklist.push_back(II->getOperand(0));
- else
- return nullptr;
- } else {
- return nullptr;
- }
- }
-
- Constant *NewOp = cast<Constant>(Worklist.pop_back_val());
- while (!Worklist.empty()) {
- Value *CurOp = Worklist.pop_back_val();
- if (isa<BitCastOperator>(CurOp)) {
- NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
- } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
- SmallVector<Constant *> Idxs;
- Idxs.reserve(GEP->getNumOperands() - 1);
- for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
- Idxs.push_back(cast<Constant>(GEP->getOperand(I)));
- }
- NewOp = ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), NewOp,
- Idxs, GEP->isInBounds(),
- GEP->getInRangeIndex());
- } else {
- assert(isa<IntrinsicInst>(CurOp) &&
- cast<IntrinsicInst>(CurOp)->isLaunderOrStripInvariantGroup() &&
- "expected invariant group intrinsic");
- NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
- }
- }
- return NewOp;
-}
-
static Value *SimplifyLoadInst(LoadInst *LI, Value *PtrOp,
const SimplifyQuery &Q) {
if (LI->isVolatile())
return nullptr;
- // Try to make the load operand a constant, specifically handle
- // invariant.group intrinsics.
+ APInt Offset(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()), 0);
auto *PtrOpC = dyn_cast<Constant>(PtrOp);
- if (!PtrOpC)
- PtrOpC = ConstructLoadOperandConstant(PtrOp);
+ // Try to convert operand into a constant by stripping offsets while looking
+ // through invariant.group intrinsics. Don't bother if the underlying object
+ // is not constant, as calculating GEP offsets is expensive.
+ if (!PtrOpC && isa<Constant>(getUnderlyingObject(PtrOp))) {
+ PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
+ Q.DL, Offset, /* AllowNonInbounts */ true,
+ /* AllowInvariantGroup */ true);
+ // Index size may have changed due to address space casts.
+ Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
+ PtrOpC = dyn_cast<Constant>(PtrOp);
+ }
if (PtrOpC)
- return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL);
-
+ return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Offset, Q.DL);
return nullptr;
}
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
index 0f11bf6106bd5..4136a9afc9cf0 100644
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -706,6 +706,7 @@ const Value *Value::stripPointerCastsForAliasAnalysis() const {
const Value *Value::stripAndAccumulateConstantOffsets(
const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
+ bool AllowInvariantGroup,
function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
if (!getType()->isPtrOrPtrVectorTy())
return this;
@@ -765,6 +766,8 @@ const Value *Value::stripAndAccumulateConstantOffsets(
} else if (const auto *Call = dyn_cast<CallBase>(V)) {
if (const Value *RV = Call->getReturnedArgOperand())
V = RV;
+ if (AllowInvariantGroup && Call->isLaunderOrStripInvariantGroup())
+ V = Call->getArgOperand(0);
}
assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!");
} while (Visited.insert(V).second);
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index f8410ae2d27cd..4cf5cad9fdebd 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -410,6 +410,7 @@ const Value *stripAndAccumulateMinimalOffsets(
};
return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
+ /* AllowInvariant */ false,
AttributorAnalysis);
}
More information about the llvm-commits
mailing list