[llvm] 2568e52 - [X86,SimplifyCFG] Support hoisting load/store with conditional faulting (Part II) (#108812)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Nov 24 23:19:32 PST 2024
Author: Phoebe Wang
Date: 2024-11-25T15:19:28+08:00
New Revision: 2568e52a733a9767014e0d8ccb685553479a3031
URL: https://github.com/llvm/llvm-project/commit/2568e52a733a9767014e0d8ccb685553479a3031
DIFF: https://github.com/llvm/llvm-project/commit/2568e52a733a9767014e0d8ccb685553479a3031.diff
LOG: [X86,SimplifyCFG] Support hoisting load/store with conditional faulting (Part II) (#108812)
This is a follow up of #96878 to support hoisting load/store from BBs
have the same predecessor, if load/store are the only instructions and
the branch is unpredictable, e.g.:
```
void test (int a, int *c, int *d) {
if (a)
*c = a;
else
*d = a;
}
```
Added:
Modified:
llvm/lib/Transforms/Utils/SimplifyCFG.cpp
llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 1991ec82d1e1e4..b664bde5d320a1 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -1662,21 +1662,43 @@ static bool areIdenticalUpToCommutativity(const Instruction *I1,
/// \endcode
///
/// So we need to turn hoisted load/store into cload/cstore.
+///
+/// \param BI The branch instruction.
+/// \param SpeculatedConditionalLoadsStores The load/store instructions that
+/// will be speculated.
+/// \param Invert indicates if speculates FalseBB. Only used in triangle CFG.
static void hoistConditionalLoadsStores(
BranchInst *BI,
SmallVectorImpl<Instruction *> &SpeculatedConditionalLoadsStores,
- bool Invert) {
+ std::optional<bool> Invert) {
auto &Context = BI->getParent()->getContext();
auto *VCondTy = FixedVectorType::get(Type::getInt1Ty(Context), 1);
auto *Cond = BI->getOperand(0);
// Construct the condition if needed.
BasicBlock *BB = BI->getParent();
- IRBuilder<> Builder(SpeculatedConditionalLoadsStores.back());
- Value *Mask = Builder.CreateBitCast(
- Invert ? Builder.CreateXor(Cond, ConstantInt::getTrue(Context)) : Cond,
- VCondTy);
+ IRBuilder<> Builder(
+ Invert.has_value() ? SpeculatedConditionalLoadsStores.back() : BI);
+ Value *Mask = nullptr;
+ Value *MaskFalse = nullptr;
+ Value *MaskTrue = nullptr;
+ if (Invert.has_value()) {
+ Mask = Builder.CreateBitCast(
+ *Invert ? Builder.CreateXor(Cond, ConstantInt::getTrue(Context)) : Cond,
+ VCondTy);
+ } else {
+ MaskFalse = Builder.CreateBitCast(
+ Builder.CreateXor(Cond, ConstantInt::getTrue(Context)), VCondTy);
+ MaskTrue = Builder.CreateBitCast(Cond, VCondTy);
+ }
+ auto PeekThroughBitcasts = [](Value *V) {
+ while (auto *BitCast = dyn_cast<BitCastInst>(V))
+ V = BitCast->getOperand(0);
+ return V;
+ };
for (auto *I : SpeculatedConditionalLoadsStores) {
- IRBuilder<> Builder(I);
+ IRBuilder<> Builder(Invert.has_value() ? I : BI);
+ if (!Invert.has_value())
+ Mask = I->getParent() == BI->getSuccessor(0) ? MaskTrue : MaskFalse;
// We currently assume conditional faulting load/store is supported for
// scalar types only when creating new instructions. This can be easily
// extended for vector types in the future.
@@ -1688,12 +1710,14 @@ static void hoistConditionalLoadsStores(
auto *Ty = I->getType();
PHINode *PN = nullptr;
Value *PassThru = nullptr;
- for (User *U : I->users())
- if ((PN = dyn_cast<PHINode>(U))) {
- PassThru = Builder.CreateBitCast(PN->getIncomingValueForBlock(BB),
- FixedVectorType::get(Ty, 1));
- break;
- }
+ if (Invert.has_value())
+ for (User *U : I->users())
+ if ((PN = dyn_cast<PHINode>(U))) {
+ PassThru = Builder.CreateBitCast(
+ PeekThroughBitcasts(PN->getIncomingValueForBlock(BB)),
+ FixedVectorType::get(Ty, 1));
+ break;
+ }
MaskedLoadStore = Builder.CreateMaskedLoad(
FixedVectorType::get(Ty, 1), Op0, LI->getAlign(), Mask, PassThru);
Value *NewLoadStore = Builder.CreateBitCast(MaskedLoadStore, Ty);
@@ -1702,8 +1726,8 @@ static void hoistConditionalLoadsStores(
I->replaceAllUsesWith(NewLoadStore);
} else {
// Handle Store.
- auto *StoredVal =
- Builder.CreateBitCast(Op0, FixedVectorType::get(Op0->getType(), 1));
+ auto *StoredVal = Builder.CreateBitCast(
+ PeekThroughBitcasts(Op0), FixedVectorType::get(Op0->getType(), 1));
MaskedLoadStore = Builder.CreateMaskedStore(
StoredVal, I->getOperand(1), cast<StoreInst>(I)->getAlign(), Mask);
}
@@ -3155,7 +3179,8 @@ static bool validateAndCostRequiredSelects(BasicBlock *BB, BasicBlock *ThenBB,
return HaveRewritablePHIs;
}
-static bool isProfitableToSpeculate(const BranchInst *BI, bool Invert,
+static bool isProfitableToSpeculate(const BranchInst *BI,
+ std::optional<bool> Invert,
const TargetTransformInfo &TTI) {
// If the branch is non-unpredictable, and is predicted to *not* branch to
// the `then` block, then avoid speculating it.
@@ -3166,7 +3191,10 @@ static bool isProfitableToSpeculate(const BranchInst *BI, bool Invert,
if (!extractBranchWeights(*BI, TWeight, FWeight) || (TWeight + FWeight) == 0)
return true;
- uint64_t EndWeight = Invert ? TWeight : FWeight;
+ if (!Invert.has_value())
+ return false;
+
+ uint64_t EndWeight = *Invert ? TWeight : FWeight;
BranchProbability BIEndProb =
BranchProbability::getBranchProbability(EndWeight, TWeight + FWeight);
BranchProbability Likely = TTI.getPredictableBranchThreshold();
@@ -8034,6 +8062,35 @@ bool SimplifyCFGOpt::simplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
if (HoistCommon &&
hoistCommonCodeFromSuccessors(BI, !Options.HoistCommonInsts))
return requestResimplify();
+
+ if (BI && HoistLoadsStoresWithCondFaulting &&
+ Options.HoistLoadsStoresWithCondFaulting &&
+ isProfitableToSpeculate(BI, std::nullopt, TTI)) {
+ SmallVector<Instruction *, 2> SpeculatedConditionalLoadsStores;
+ auto CanSpeculateConditionalLoadsStores = [&]() {
+ for (auto *Succ : successors(BB)) {
+ for (Instruction &I : *Succ) {
+ if (I.isTerminator()) {
+ if (I.getNumSuccessors() > 1)
+ return false;
+ continue;
+ } else if (!isSafeCheapLoadStore(&I, TTI) ||
+ SpeculatedConditionalLoadsStores.size() ==
+ HoistLoadsStoresWithCondFaultingThreshold) {
+ return false;
+ }
+ SpeculatedConditionalLoadsStores.push_back(&I);
+ }
+ }
+ return !SpeculatedConditionalLoadsStores.empty();
+ };
+
+ if (CanSpeculateConditionalLoadsStores()) {
+ hoistConditionalLoadsStores(BI, SpeculatedConditionalLoadsStores,
+ std::nullopt);
+ return requestResimplify();
+ }
+ }
} else {
// If Successor #1 has multiple preds, we may be able to conditionally
// execute Successor #0 if it branches to Successor #1.
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll b/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll
index 405afd5969a413..5c9058b4823202 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll
@@ -276,21 +276,19 @@ if.false: ; preds = %if.true, %entry
}
;; Both of successor 0 and successor 1 have a single predecessor.
-;; TODO: Support transform for this case.
-define void @single_predecessor(ptr %p, ptr %q, i32 %a) {
+define i32 @single_predecessor(ptr %p, ptr %q, i32 %a) {
; CHECK-LABEL: @single_predecessor(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[A:%.*]], 0
-; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
-; CHECK: common.ret:
-; CHECK-NEXT: ret void
-; CHECK: if.end:
-; CHECK-NEXT: store i32 1, ptr [[Q:%.*]], align 4
-; CHECK-NEXT: br label [[COMMON_RET:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[Q]], align 4
-; CHECK-NEXT: store i32 [[TMP0]], ptr [[P:%.*]], align 4
-; CHECK-NEXT: br label [[COMMON_RET]]
+; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[TOBOOL]], true
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i1 [[TMP0]] to <1 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i1 [[TOBOOL]] to <1 x i1>
+; CHECK-NEXT: call void @llvm.masked.store.v1i32.p0(<1 x i32> splat (i32 1), ptr [[Q:%.*]], i32 4, <1 x i1> [[TMP2]])
+; CHECK-NEXT: [[TMP3:%.*]] = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr [[Q]], i32 4, <1 x i1> [[TMP1]], <1 x i32> poison)
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <1 x i32> [[TMP3]] to i32
+; CHECK-NEXT: call void @llvm.masked.store.v1i32.p0(<1 x i32> [[TMP3]], ptr [[P:%.*]], i32 4, <1 x i1> [[TMP1]])
+; CHECK-NEXT: [[DOT:%.*]] = select i1 [[TOBOOL]], i32 2, i32 3
+; CHECK-NEXT: ret i32 [[DOT]]
;
entry:
%tobool = icmp ne i32 %a, 0
@@ -298,12 +296,12 @@ entry:
if.end:
store i32 1, ptr %q
- ret void
+ ret i32 2
if.then:
%0 = load i32, ptr %q
store i32 %0, ptr %p
- ret void
+ ret i32 3
}
;; Hoist 6 stores.
@@ -759,6 +757,44 @@ if.true:
ret i32 %res
}
+;; Not transform if either BB has multiple successors.
+define i32 @not_multi_successors(i1 %c1, i32 %c2, ptr %p) {
+; CHECK-LABEL: @not_multi_successors(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C1:%.*]], label [[ENTRY_IF:%.*]], label [[COMMON_RET:%.*]]
+; CHECK: entry.if:
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT: switch i32 [[C2:%.*]], label [[COMMON_RET]] [
+; CHECK-NEXT: i32 0, label [[SW_BB:%.*]]
+; CHECK-NEXT: i32 1, label [[SW_BB]]
+; CHECK-NEXT: ]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAL]], [[ENTRY_IF]] ], [ 0, [[SW_BB]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: sw.bb:
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+entry:
+ br i1 %c1, label %entry.if, label %entry.else
+
+entry.if: ; preds = %entry
+ %val = load i32, ptr %p, align 4
+ switch i32 %c2, label %return [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb
+ ]
+
+entry.else: ; preds = %entry
+ ret i32 0
+
+sw.bb: ; preds = %entry.if, %entry.if
+ br label %return
+
+return: ; preds = %sw.bb, %entry.if
+ %ret = phi i32 [ %val, %entry.if ], [ 0, %sw.bb ]
+ ret i32 %ret
+}
+
declare i32 @read_memory_only() readonly nounwind willreturn speculatable
!llvm.dbg.cu = !{!0}
More information about the llvm-commits
mailing list