[llvm] 9aa1428 - [InstSimplify] Treat invariant group insts as bitcasts for load operands

Arthur Eubanks via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 15 12:59:58 PDT 2021


Author: Arthur Eubanks
Date: 2021-06-15T12:59:43-07:00
New Revision: 9aa1428174ae5d0515f49d50a483a5517f4df2f4

URL: https://github.com/llvm/llvm-project/commit/9aa1428174ae5d0515f49d50a483a5517f4df2f4
DIFF: https://github.com/llvm/llvm-project/commit/9aa1428174ae5d0515f49d50a483a5517f4df2f4.diff

LOG: [InstSimplify] Treat invariant group insts as bitcasts for load operands

We can look through invariant group intrinsics for the purposes of
simplifying the result of a load.

Since intrinsics can't be constants, but we also don't want to
completely rewrite load constant folding, we convert the load operand to
a constant. For GEPs and bitcasts we just treat them as constants. For
invariant group intrinsics, we treat them as a bitcast.

Relanding with a check for self-referential values.

Reviewed By: lebedev.ri

Differential Revision: https://reviews.llvm.org/D101103

Added: 
    llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll

Modified: 
    llvm/lib/Analysis/InstructionSimplify.cpp
    llvm/test/Transforms/InstSimplify/invariant.group-load.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 2e591bb8df5c..7fb7a394e86a 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -18,6 +18,7 @@
 
 #include "llvm/Analysis/InstructionSimplify.h"
 #include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/AssumptionCache.h"
@@ -5823,6 +5824,78 @@ Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
   return ::SimplifyFreezeInst(Op0, Q);
 }
 
+static Constant *ConstructLoadOperandConstant(Value *Op) {
+  SmallVector<Value *, 4> Worklist;
+  // Invalid IR in unreachable code may contain self-referential values. Don't infinitely loop.
+  SmallPtrSet<Value *, 4> Visited;
+  Worklist.push_back(Op);
+  while (true) {
+    Value *CurOp = Worklist.back();
+    if (!Visited.insert(CurOp).second)
+      return nullptr;
+    if (isa<Constant>(CurOp))
+      break;
+    if (auto *BC = dyn_cast<BitCastOperator>(CurOp)) {
+      Worklist.push_back(BC->getOperand(0));
+    } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
+      for (unsigned I = 1; I != GEP->getNumOperands(); ++I) {
+        if (!isa<Constant>(GEP->getOperand(I)))
+          return nullptr;
+      }
+      Worklist.push_back(GEP->getOperand(0));
+    } else if (auto *II = dyn_cast<IntrinsicInst>(CurOp)) {
+      if (II->isLaunderOrStripInvariantGroup())
+        Worklist.push_back(II->getOperand(0));
+      else
+        return nullptr;
+    } else {
+      return nullptr;
+    }
+  }
+
+  Constant *NewOp = cast<Constant>(Worklist.pop_back_val());
+  while (!Worklist.empty()) {
+    Value *CurOp = Worklist.pop_back_val();
+    if (isa<BitCastOperator>(CurOp)) {
+      NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
+    } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
+      SmallVector<Constant *> Idxs;
+      Idxs.reserve(GEP->getNumOperands() - 1);
+      for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
+        Idxs.push_back(cast<Constant>(GEP->getOperand(I)));
+      }
+      NewOp = ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), NewOp,
+                                             Idxs, GEP->isInBounds(),
+                                             GEP->getInRangeIndex());
+    } else {
+      assert(isa<IntrinsicInst>(CurOp) &&
+             cast<IntrinsicInst>(CurOp)->isLaunderOrStripInvariantGroup() &&
+             "expected invariant group intrinsic");
+      NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
+    }
+  }
+  return NewOp;
+}
+
+static Value *SimplifyLoadInst(LoadInst *LI, const SimplifyQuery &Q) {
+  if (LI->isVolatile())
+    return nullptr;
+
+  if (auto *C = ConstantFoldInstruction(LI, Q.DL))
+    return C;
+
+  // The following only catches more cases than ConstantFoldInstruction() if the
+  // load operand wasn't a constant. Specifically, invariant.group intrinsics.
+  if (isa<Constant>(LI->getPointerOperand()))
+    return nullptr;
+
+  if (auto *C = dyn_cast_or_null<Constant>(
+          ConstructLoadOperandConstant(LI->getPointerOperand())))
+    return ConstantFoldLoadFromConstPtr(C, LI->getType(), Q.DL);
+
+  return nullptr;
+}
+
 /// See if we can compute a simplified version of this instruction.
 /// If not, this returns null.
 
@@ -5979,6 +6052,9 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
     // No simplifications for Alloca and it can't be constant folded.
     Result = nullptr;
     break;
+  case Instruction::Load:
+    Result = SimplifyLoadInst(cast<LoadInst>(I), Q);
+    break;
   }
 
   /// If called on unreachable code, the above logic may report that the

diff  --git a/llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll b/llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll
new file mode 100644
index 000000000000..6a4fe2686117
--- /dev/null
+++ b/llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll
@@ -0,0 +1,51 @@
+; RUN: opt -passes=jump-threading -S < %s | FileCheck %s
+; CHECK: @main
+
+%struct.wobble = type { i8 }
+
+define i32 @main() local_unnamed_addr personality i8* undef {
+bb12:
+  br i1 false, label %bb13, label %bb28
+
+bb13:                                             ; preds = %bb12
+  br label %bb14
+
+bb14:                                             ; preds = %bb26, %bb13
+  %tmp15 = phi i8* [ %tmp27, %bb26 ], [ undef, %bb13 ]
+  %tmp16 = icmp slt i32 5, undef
+  %tmp17 = select i1 false, i1 true, i1 %tmp16
+  br label %bb18
+
+bb18:                                             ; preds = %bb14
+  br i1 %tmp17, label %bb19, label %bb21
+
+bb19:                                             ; preds = %bb18
+  %tmp20 = or i32 undef, 4
+  br label %bb21
+
+bb21:                                             ; preds = %bb19, %bb18
+  %tmp22 = load i8, i8* %tmp15, align 1
+  br label %bb23
+
+bb23:                                             ; preds = %bb21
+  br i1 %tmp17, label %bb24, label %bb25
+
+bb24:                                             ; preds = %bb23
+  br label %bb25
+
+bb25:                                             ; preds = %bb24, %bb23
+  invoke void undef(%struct.wobble* undef, i32 0, i32 undef, i8 %tmp22)
+          to label %bb26 unwind label %bb33
+
+bb26:                                             ; preds = %bb25
+  %tmp27 = getelementptr inbounds i8, i8* %tmp15, i64 1
+  br label %bb14
+
+bb28:                                             ; preds = %bb12
+  unreachable
+
+bb33:                                             ; preds = %bb25
+  %tmp34 = landingpad { i8*, i32 }
+          cleanup
+  unreachable
+}

diff  --git a/llvm/test/Transforms/InstSimplify/invariant.group-load.ll b/llvm/test/Transforms/InstSimplify/invariant.group-load.ll
index 72cb36d2eee6..f1ee1528e8ac 100644
--- a/llvm/test/Transforms/InstSimplify/invariant.group-load.ll
+++ b/llvm/test/Transforms/InstSimplify/invariant.group-load.ll
@@ -9,11 +9,7 @@ declare i8* @llvm.launder.invariant.group.p0i8(i8* %p)
 
 define i64 @f() {
 ; CHECK-LABEL: @f(
-; CHECK-NEXT:    [[A:%.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* bitcast ({ i64, i64 }* @A to i8*))
-; CHECK-NEXT:    [[B:%.*]] = getelementptr i8, i8* [[A]], i32 8
-; CHECK-NEXT:    [[C:%.*]] = bitcast i8* [[B]] to i64*
-; CHECK-NEXT:    [[D:%.*]] = load i64, i64* [[C]], align 4
-; CHECK-NEXT:    ret i64 [[D]]
+; CHECK-NEXT:    ret i64 3
 ;
   %p = bitcast { i64, i64 }* @A to i8*
   %a = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
@@ -25,11 +21,7 @@ define i64 @f() {
 
 define i64 @g() {
 ; CHECK-LABEL: @g(
-; CHECK-NEXT:    [[A:%.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* bitcast ({ i64, i64 }* @A to i8*))
-; CHECK-NEXT:    [[B:%.*]] = getelementptr i8, i8* [[A]], i32 8
-; CHECK-NEXT:    [[C:%.*]] = bitcast i8* [[B]] to i64*
-; CHECK-NEXT:    [[D:%.*]] = load i64, i64* [[C]], align 4
-; CHECK-NEXT:    ret i64 [[D]]
+; CHECK-NEXT:    ret i64 3
 ;
   %p = bitcast { i64, i64 }* @A to i8*
   %a = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)


        


More information about the llvm-commits mailing list