[llvm] [InstCombine] Fold integer stores using llvm.assume (PR #173134)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 19 20:21:19 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-transforms
Author: Ken Matsui (ken-matsui)
<details>
<summary>Changes</summary>
Fixes #<!-- -->134540, fixes #<!-- -->134992
---
Full diff: https://github.com/llvm/llvm-project/pull/173134.diff
2 Files Affected:
- (modified) llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (+10)
- (added) llvm/test/Transforms/InstCombine/store-assume-fold.ll (+88)
``````````diff
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 5bca0bb8846c0..96aaef855a455 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -1535,6 +1535,16 @@ Instruction *InstCombinerImpl::visitStoreInst(StoreInst &SI) {
if (Value *V = simplifyNonNullOperand(Ptr, /*HasDereferenceable=*/true))
return replaceOperand(SI, 1, V);
+ // If the stored value is constant at this point (e.g., constrained by a
+ // guaranteed-to-execute llvm.assume), fold the store.
+ if (Val->getType()->isIntegerTy() && !isa<Constant>(Val) &&
+ !isa<PoisonValue>(Val)) {
+ KnownBits Known = computeKnownBits(Val, &SI);
+ if (Known.isConstant())
+ return replaceOperand(
+ SI, 0, ConstantInt::get(SI.getContext(), Known.getConstant()));
+ }
+
return nullptr;
}
diff --git a/llvm/test/Transforms/InstCombine/store-assume-fold.ll b/llvm/test/Transforms/InstCombine/store-assume-fold.ll
new file mode 100644
index 0000000000000..e1c38446f0544
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/store-assume-fold.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -passes=instcombine -S < %s | FileCheck %s
+
+ at a = external global i1
+ at b = external global i32
+
+declare void @llvm.assume(i1)
+
+define void @assume_store_i1(i1 %x) {
+; CHECK-LABEL: define void @assume_store_i1(
+; CHECK-SAME: i1 [[X:%.*]]) {
+; CHECK-NEXT: store i1 true, ptr @a, align 1
+; CHECK-NEXT: call void @llvm.assume(i1 [[X]])
+; CHECK-NEXT: ret void
+;
+ store i1 %x, ptr @a, align 1
+ call void @llvm.assume(i1 %x)
+ ret void
+}
+
+define void @assume_store_i1_not(i1 %x) {
+; CHECK-LABEL: define void @assume_store_i1_not(
+; CHECK-SAME: i1 [[X:%.*]]) {
+; CHECK-NEXT: store i1 false, ptr @a, align 1
+; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[X]], true
+; CHECK-NEXT: call void @llvm.assume(i1 [[NOT]])
+; CHECK-NEXT: ret void
+;
+ store i1 %x, ptr @a, align 1
+ %not = xor i1 %x, true
+ call void @llvm.assume(i1 %not)
+ ret void
+}
+
+define i1 @assume_store_i1_xor(ptr %G) {
+; CHECK-LABEL: define i1 @assume_store_i1_xor(
+; CHECK-SAME: ptr [[G:%.*]]) {
+; CHECK-NEXT: [[L:%.*]] = load i1, ptr [[G]], align 1
+; CHECK-NEXT: [[XOR:%.*]] = xor i1 [[L]], true
+; CHECK-NEXT: store i1 true, ptr @a, align 1
+; CHECK-NEXT: call void @llvm.assume(i1 [[XOR]])
+; CHECK-NEXT: ret i1 [[XOR]]
+;
+ %L = load i1, ptr %G, align 1
+ %xor = xor i1 %L, true
+ store i1 %xor, ptr @a, align 1
+ call void @llvm.assume(i1 %xor)
+ ret i1 %xor
+}
+
+define void @assume_store_i32_eq(i32 %x) {
+; CHECK-LABEL: define void @assume_store_i32_eq(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: store i32 10, ptr @b, align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 10
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret void
+;
+ store i32 %x, ptr @b, align 4
+ %cmp = icmp eq i32 %x, 10
+ call void @llvm.assume(i1 %cmp)
+ ret void
+}
+
+define void @unreachable_implies_false(i8 %x) {
+; CHECK-LABEL: define void @unreachable_implies_false(
+; CHECK-SAME: i8 [[X:%.*]]) {
+; CHECK-NEXT: [[RET:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i8 [[X]], 6
+; CHECK-NEXT: store i1 false, ptr @a, align 1
+; CHECK-NEXT: call void @llvm.assume(i1 [[TMP0]])
+; CHECK-NEXT: ret void
+;
+; Original code before `simplifycfg` from #134992:
+; %cmp = icmp ugt i8 %x, 5
+; store i1 %cmp, ptr @a, align 1
+; br i1 %cmp, label %ub, label %ret
+; ret:
+; ret void
+; ub:
+; unreachable
+ret:
+ %cmp = icmp ugt i8 %x, 5
+ store i1 %cmp, ptr @a, align 1
+ %0 = xor i1 %cmp, true
+ call void @llvm.assume(i1 %0)
+ ret void
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/173134
More information about the llvm-commits
mailing list