[llvm] 26044c6 - [InstSimplify] Treat invariant group insts as bitcasts for load operands
Arthur Eubanks via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 1 16:33:36 PDT 2021
Author: Arthur Eubanks
Date: 2021-06-01T16:33:06-07:00
New Revision: 26044c6a54de3e03c73c5515702b95acdb0b7f22
URL: https://github.com/llvm/llvm-project/commit/26044c6a54de3e03c73c5515702b95acdb0b7f22
DIFF: https://github.com/llvm/llvm-project/commit/26044c6a54de3e03c73c5515702b95acdb0b7f22.diff
LOG: [InstSimplify] Treat invariant group insts as bitcasts for load operands
We can look through invariant group intrinsics for the purposes of
simplifying the result of a load.
Since intrinsics can't be constants, but we also don't want to
completely rewrite load constant folding, we convert the load operand to
a constant. For GEPs and bitcasts we just treat them as constants. For
invariant group intrinsics, we treat them as a bitcast.
Reviewed By: lebedev.ri
Differential Revision: https://reviews.llvm.org/D101103
Added:
Modified:
llvm/lib/Analysis/InstructionSimplify.cpp
llvm/test/Transforms/InstSimplify/invariant.group-load.ll
Removed:
################################################################################
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index e6baed1779cd1..c6dabf8870fd0 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -5819,6 +5819,74 @@ Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
return ::SimplifyFreezeInst(Op0, Q);
}
+static Constant *ConstructLoadOperandConstant(Value *Op) {
+ SmallVector<Value *, 4> Worklist;
+ Worklist.push_back(Op);
+ while (true) {
+ Value *CurOp = Worklist.back();
+ if (isa<Constant>(CurOp))
+ break;
+ if (auto *BC = dyn_cast<BitCastOperator>(CurOp)) {
+ Worklist.push_back(BC->getOperand(0));
+ } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
+ for (unsigned I = 1; I != GEP->getNumOperands(); ++I) {
+ if (!isa<Constant>(GEP->getOperand(I)))
+ return nullptr;
+ }
+ Worklist.push_back(GEP->getOperand(0));
+ } else if (auto *II = dyn_cast<IntrinsicInst>(CurOp)) {
+ if (II->isLaunderOrStripInvariantGroup())
+ Worklist.push_back(II->getOperand(0));
+ else
+ return nullptr;
+ } else {
+ return nullptr;
+ }
+ }
+
+ Constant *NewOp = cast<Constant>(Worklist.pop_back_val());
+ while (!Worklist.empty()) {
+ Value *CurOp = Worklist.pop_back_val();
+ if (isa<BitCastOperator>(CurOp)) {
+ NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
+ } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
+ SmallVector<Constant *> Idxs;
+ Idxs.reserve(GEP->getNumOperands() - 1);
+ for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
+ Idxs.push_back(cast<Constant>(GEP->getOperand(I)));
+ }
+ NewOp = ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), NewOp,
+ Idxs, GEP->isInBounds(),
+ GEP->getInRangeIndex());
+ } else {
+ assert(isa<IntrinsicInst>(CurOp) &&
+ cast<IntrinsicInst>(CurOp)->isLaunderOrStripInvariantGroup() &&
+ "expected invariant group intrinsic");
+ NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
+ }
+ }
+ return NewOp;
+}
+
+static Value *SimplifyLoadInst(LoadInst *LI, const SimplifyQuery &Q) {
+ if (LI->isVolatile())
+ return nullptr;
+
+ if (auto *C = ConstantFoldInstruction(LI, Q.DL))
+ return C;
+
+ // The following only catches more cases than ConstantFoldInstruction() if the
+ // load operand wasn't a constant. Specifically, invariant.group intrinsics.
+ if (isa<Constant>(LI->getPointerOperand()))
+ return nullptr;
+
+ if (auto *C = dyn_cast_or_null<Constant>(
+ ConstructLoadOperandConstant(LI->getPointerOperand())))
+ return ConstantFoldLoadFromConstPtr(C, LI->getType(), Q.DL);
+
+ return nullptr;
+}
+
/// See if we can compute a simplified version of this instruction.
/// If not, this returns null.
@@ -5975,6 +6043,9 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
// No simplifications for Alloca and it can't be constant folded.
Result = nullptr;
break;
+ case Instruction::Load:
+ Result = SimplifyLoadInst(cast<LoadInst>(I), Q);
+ break;
}
/// If called on unreachable code, the above logic may report that the
diff --git a/llvm/test/Transforms/InstSimplify/invariant.group-load.ll b/llvm/test/Transforms/InstSimplify/invariant.group-load.ll
index 72cb36d2eee6c..f1ee1528e8acf 100644
--- a/llvm/test/Transforms/InstSimplify/invariant.group-load.ll
+++ b/llvm/test/Transforms/InstSimplify/invariant.group-load.ll
@@ -9,11 +9,7 @@ declare i8* @llvm.launder.invariant.group.p0i8(i8* %p)
define i64 @f() {
; CHECK-LABEL: @f(
-; CHECK-NEXT: [[A:%.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* bitcast ({ i64, i64 }* @A to i8*))
-; CHECK-NEXT: [[B:%.*]] = getelementptr i8, i8* [[A]], i32 8
-; CHECK-NEXT: [[C:%.*]] = bitcast i8* [[B]] to i64*
-; CHECK-NEXT: [[D:%.*]] = load i64, i64* [[C]], align 4
-; CHECK-NEXT: ret i64 [[D]]
+; CHECK-NEXT: ret i64 3
;
%p = bitcast { i64, i64 }* @A to i8*
%a = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
@@ -25,11 +21,7 @@ define i64 @f() {
define i64 @g() {
; CHECK-LABEL: @g(
-; CHECK-NEXT: [[A:%.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* bitcast ({ i64, i64 }* @A to i8*))
-; CHECK-NEXT: [[B:%.*]] = getelementptr i8, i8* [[A]], i32 8
-; CHECK-NEXT: [[C:%.*]] = bitcast i8* [[B]] to i64*
-; CHECK-NEXT: [[D:%.*]] = load i64, i64* [[C]], align 4
-; CHECK-NEXT: ret i64 [[D]]
+; CHECK-NEXT: ret i64 3
;
%p = bitcast { i64, i64 }* @A to i8*
%a = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
More information about the llvm-commits
mailing list