[llvm] [InstCombine] Fold align assume into load's !align metadata if possible. (PR #123247)

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 7 07:37:49 PDT 2025


https://github.com/fhahn updated https://github.com/llvm/llvm-project/pull/123247

>From 789dc63f6a72f7a1c5e098dbc558d2ded05abc4e Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Tue, 7 Oct 2025 14:38:07 +0100
Subject: [PATCH 1/3] [InstCombine] Add test for removing redundant assume if
 align matches.

---
 llvm/test/Transforms/InstCombine/assume-align.ll | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/llvm/test/Transforms/InstCombine/assume-align.ll b/llvm/test/Transforms/InstCombine/assume-align.ll
index 4185b10eeca95..e0a6abc5d29a1 100644
--- a/llvm/test/Transforms/InstCombine/assume-align.ll
+++ b/llvm/test/Transforms/InstCombine/assume-align.ll
@@ -247,6 +247,16 @@ define ptr @redundant_assume_align_8_via_asume(ptr %p) {
   ret ptr %p
 }
 
+ at g2 = external constant i128, align 8
+
+define void @redundant_assume_align_global_eq() {
+; CHECK-LABEL: @redundant_assume_align_global_eq(
+; CHECK-NEXT:    ret void
+;
+  call void @llvm.assume(i1 true) [ "align"(ptr @g2, i64 8) ]
+  ret void
+}
+
 define ptr @assume_align_1(ptr %p) {
 ; CHECK-LABEL: @assume_align_1(
 ; CHECK-NEXT:    call void @foo(ptr [[P:%.*]])

>From c64ad8f9bcb82c90dbec2a8724f6491fa89ca1d0 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Thu, 2 Oct 2025 08:54:01 +0100
Subject: [PATCH 2/3] [InstCombine] Fold align assume into load's !align
 metadata if possible.

---
 .../InstCombine/InstCombineCalls.cpp          | 15 +++++++++++---
 .../Transforms/InstCombine/assume-align.ll    | 20 +++++++++++++------
 .../AArch64/infer-align-from-assumption.ll    | 19 +++++++++---------
 3 files changed, 35 insertions(+), 19 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index e1e24a99d0474..75ef4bead046c 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3446,9 +3446,18 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
         KnownBits Known = computeKnownBits(RK.WasOn, /*CtxI=*/nullptr);
         unsigned TZ = std::min(Known.countMinTrailingZeros(),
                                Value::MaxAlignmentExponent);
-        if ((1ULL << TZ) < RK.ArgValue)
-          continue;
-        return CallBase::removeOperandBundle(II, OBU.getTagID());
+        if ((1ULL << TZ) >= RK.ArgValue)
+          return CallBase::removeOperandBundle(II, OBU.getTagID());
+
+        auto *LI = dyn_cast<LoadInst>(OBU.Inputs[0]);
+        if (LI &&
+            isValidAssumeForContext(II, LI, &DT, /*AllowEphemerals=*/true)) {
+          LI->setMetadata(LLVMContext::MD_align,
+                          MDNode::get(II->getContext(),
+                                      ValueAsMetadata::getConstant(
+                                          Builder.getInt64(RK.ArgValue))));
+          return CallBase::removeOperandBundle(II, OBU.getTagID());
+        }
       }
     }
 
diff --git a/llvm/test/Transforms/InstCombine/assume-align.ll b/llvm/test/Transforms/InstCombine/assume-align.ll
index e0a6abc5d29a1..83821812067ab 100644
--- a/llvm/test/Transforms/InstCombine/assume-align.ll
+++ b/llvm/test/Transforms/InstCombine/assume-align.ll
@@ -123,11 +123,9 @@ define i8 @assume_align_non_pow2(ptr %p) {
   ret i8 %v
 }
 
-; TODO: Can fold alignment assumption into !align metadata on load.
 define ptr @fold_assume_align_pow2_of_loaded_pointer_into_align_metadata(ptr %p) {
 ; CHECK-LABEL: @fold_assume_align_pow2_of_loaded_pointer_into_align_metadata(
-; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i64 8) ]
+; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8, !align [[META0:![0-9]+]]
 ; CHECK-NEXT:    ret ptr [[P2]]
 ;
   %p2 = load ptr, ptr %p
@@ -135,6 +133,16 @@ define ptr @fold_assume_align_pow2_of_loaded_pointer_into_align_metadata(ptr %p)
   ret ptr %p2
 }
 
+define ptr @fold_assume_align_i32_pow2_of_loaded_pointer_into_align_metadata(ptr %p) {
+; CHECK-LABEL: @fold_assume_align_i32_pow2_of_loaded_pointer_into_align_metadata(
+; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8, !align [[META0]]
+; CHECK-NEXT:    ret ptr [[P2]]
+;
+  %p2 = load ptr, ptr %p
+  call void @llvm.assume(i1 true) [ "align"(ptr %p2, i32 8) ]
+  ret ptr %p2
+}
+
 define ptr @dont_fold_assume_align_pow2_of_loaded_pointer_into_align_metadata_due_to_call(ptr %p) {
 ; CHECK-LABEL: @dont_fold_assume_align_pow2_of_loaded_pointer_into_align_metadata_due_to_call(
 ; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8
@@ -187,7 +195,7 @@ define ptr @redundant_assume_align_1(ptr %p) {
 
 define ptr @redundant_assume_align_8_via_align_metadata(ptr %p) {
 ; CHECK-LABEL: @redundant_assume_align_8_via_align_metadata(
-; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8, !align [[META0:![0-9]+]]
+; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8, !align [[META0]]
 ; CHECK-NEXT:    call void @foo(ptr [[P2]])
 ; CHECK-NEXT:    ret ptr [[P2]]
 ;
@@ -199,8 +207,7 @@ define ptr @redundant_assume_align_8_via_align_metadata(ptr %p) {
 
 define ptr @assume_align_16_via_align_metadata(ptr %p) {
 ; CHECK-LABEL: @assume_align_16_via_align_metadata(
-; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8, !align [[META0]]
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i32 16) ]
+; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8, !align [[META1:![0-9]+]]
 ; CHECK-NEXT:    call void @foo(ptr [[P2]])
 ; CHECK-NEXT:    ret ptr [[P2]]
 ;
@@ -283,4 +290,5 @@ define ptr @assume_load_pointer_result(ptr %p, i64 %align) {
 
 ;.
 ; CHECK: [[META0]] = !{i64 8}
+; CHECK: [[META1]] = !{i64 16}
 ;.
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/infer-align-from-assumption.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/infer-align-from-assumption.ll
index 4196625e6bd21..d381b999c7dcc 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/infer-align-from-assumption.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/infer-align-from-assumption.ll
@@ -8,15 +8,13 @@ declare void @llvm.assume(i1 noundef)
 define i32 @earlycse_entry(ptr %p) {
 ; CHECK-LABEL: define i32 @earlycse_entry(
 ; CHECK-SAME: ptr captures(none) [[P:%.*]]) local_unnamed_addr {
-; CHECK-NEXT:    [[L_I:%.*]] = load ptr, ptr [[P]], align 8
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[L_I]], i64 4) ]
+; CHECK-NEXT:    [[L_I:%.*]] = load ptr, ptr [[P]], align 8, !align [[META0:![0-9]+]]
 ; CHECK-NEXT:    [[L_ASSUME_ALIGNED_I_I:%.*]] = load i32, ptr [[L_I]], align 4
 ; CHECK-NEXT:    [[R_I_I:%.*]] = tail call i32 @swap(i32 [[L_ASSUME_ALIGNED_I_I]])
 ; CHECK-NEXT:    [[L_2_I:%.*]] = load ptr, ptr [[P]], align 8
 ; CHECK-NEXT:    [[GEP_I:%.*]] = getelementptr i8, ptr [[L_2_I]], i64 4
 ; CHECK-NEXT:    store ptr [[GEP_I]], ptr [[P]], align 8
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_I]], i64 4) ]
-; CHECK-NEXT:    [[L_ASSUME_ALIGNED_I_I2:%.*]] = load i32, ptr [[GEP_I]], align 4
+; CHECK-NEXT:    [[L_ASSUME_ALIGNED_I_I2:%.*]] = load i32, ptr [[GEP_I]], align 1
 ; CHECK-NEXT:    [[R_I_I3:%.*]] = tail call i32 @swap(i32 [[L_ASSUME_ALIGNED_I_I2]])
 ; CHECK-NEXT:    [[L_2_I4:%.*]] = load ptr, ptr [[P]], align 8
 ; CHECK-NEXT:    [[GEP_I5:%.*]] = getelementptr i8, ptr [[L_2_I4]], i64 4
@@ -31,8 +29,7 @@ define i32 @earlycse_entry(ptr %p) {
 define i32 @earlycse_fn1(ptr %p) {
 ; CHECK-LABEL: define i32 @earlycse_fn1(
 ; CHECK-SAME: ptr captures(none) [[P:%.*]]) local_unnamed_addr {
-; CHECK-NEXT:    [[L:%.*]] = load ptr, ptr [[P]], align 8
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[L]], i64 4) ]
+; CHECK-NEXT:    [[L:%.*]] = load ptr, ptr [[P]], align 8, !align [[META0]]
 ; CHECK-NEXT:    [[L_ASSUME_ALIGNED_I:%.*]] = load i32, ptr [[L]], align 4
 ; CHECK-NEXT:    [[R_I:%.*]] = tail call i32 @swap(i32 [[L_ASSUME_ALIGNED_I]])
 ; CHECK-NEXT:    [[L_2:%.*]] = load ptr, ptr [[P]], align 8
@@ -67,8 +64,7 @@ declare i32 @swap(i32)
 define void @sroa_align_entry(ptr %p) {
 ; CHECK-LABEL: define void @sroa_align_entry(
 ; CHECK-SAME: ptr readonly captures(none) [[P:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[P]], i64 8) ]
-; CHECK-NEXT:    [[DOT0_COPYLOAD_I_I_I:%.*]] = load i64, ptr [[P]], align 8
+; CHECK-NEXT:    [[DOT0_COPYLOAD_I_I_I:%.*]] = load i64, ptr [[P]], align 1
 ; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[DOT0_COPYLOAD_I_I_I]] to ptr
 ; CHECK-NEXT:    store i32 0, ptr [[TMP2]], align 4
 ; CHECK-NEXT:    ret void
@@ -83,8 +79,7 @@ define void @sroa_align_entry(ptr %p) {
 define ptr @sroa_fn1(ptr %p) {
 ; CHECK-LABEL: define ptr @sroa_fn1(
 ; CHECK-SAME: ptr readonly captures(none) [[P:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] {
-; CHECK-NEXT:    [[L:%.*]] = load ptr, ptr [[P]], align 8
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[L]], i64 8) ]
+; CHECK-NEXT:    [[L:%.*]] = load ptr, ptr [[P]], align 8, !align [[META1:![0-9]+]]
 ; CHECK-NEXT:    [[L_FN3_I_I:%.*]] = load i64, ptr [[L]], align 8
 ; CHECK-NEXT:    [[I_I:%.*]] = inttoptr i64 [[L_FN3_I_I]] to ptr
 ; CHECK-NEXT:    ret ptr [[I_I]]
@@ -118,3 +113,7 @@ define i64 @sroa_fn3(ptr %0) {
   %l.fn3 = load i64, ptr %0, align 1
   ret i64 %l.fn3
 }
+;.
+; CHECK: [[META0]] = !{i64 4}
+; CHECK: [[META1]] = !{i64 8}
+;.

>From b0f7aaa2f577addfa013e6194e15bdc9369c3ae3 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Wed, 18 Sep 2024 13:38:52 +0100
Subject: [PATCH 3/3] [EarlyCSE] Rematerialize alignment assumption.

---
 llvm/lib/Transforms/Scalar/EarlyCSE.cpp                  | 9 +++++++++
 .../PhaseOrdering/AArch64/infer-align-from-assumption.ll | 3 ++-
 2 files changed, 11 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 0f8cc6ca6ed21..67f96d09a7c23 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -31,6 +31,7 @@
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/Instruction.h"
 #include "llvm/IR/Instructions.h"
@@ -1600,6 +1601,14 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
         if (InVal.IsLoad)
           if (auto *I = dyn_cast<Instruction>(Op))
             combineMetadataForCSE(I, &Inst, false);
+        if (auto *AlignMD = Inst.getMetadata(LLVMContext::MD_align)) {
+          auto *A = mdconst::extract<ConstantInt>(AlignMD->getOperand(0));
+          if (Op->getPointerAlignment(SQ.DL).value() % A->getZExtValue() != 0) {
+            IRBuilder B(&Inst);
+            B.CreateAlignmentAssumption(SQ.DL, Op, A);
+          }
+        }
+
         if (!Inst.use_empty())
           Inst.replaceAllUsesWith(Op);
         salvageKnowledge(&Inst, &AC);
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/infer-align-from-assumption.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/infer-align-from-assumption.ll
index d381b999c7dcc..0ab19c0a797b8 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/infer-align-from-assumption.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/infer-align-from-assumption.ll
@@ -14,7 +14,8 @@ define i32 @earlycse_entry(ptr %p) {
 ; CHECK-NEXT:    [[L_2_I:%.*]] = load ptr, ptr [[P]], align 8
 ; CHECK-NEXT:    [[GEP_I:%.*]] = getelementptr i8, ptr [[L_2_I]], i64 4
 ; CHECK-NEXT:    store ptr [[GEP_I]], ptr [[P]], align 8
-; CHECK-NEXT:    [[L_ASSUME_ALIGNED_I_I2:%.*]] = load i32, ptr [[GEP_I]], align 1
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_I]], i64 4) ]
+; CHECK-NEXT:    [[L_ASSUME_ALIGNED_I_I2:%.*]] = load i32, ptr [[GEP_I]], align 4
 ; CHECK-NEXT:    [[R_I_I3:%.*]] = tail call i32 @swap(i32 [[L_ASSUME_ALIGNED_I_I2]])
 ; CHECK-NEXT:    [[L_2_I4:%.*]] = load ptr, ptr [[P]], align 8
 ; CHECK-NEXT:    [[GEP_I5:%.*]] = getelementptr i8, ptr [[L_2_I4]], i64 4



More information about the llvm-commits mailing list