[llvm] [ValueTracking] Enhance alignment propagation in computeKnownBits. (PR #166935)

Shamshura Egor via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 1 06:17:52 PST 2025


https://github.com/egorshamshura updated https://github.com/llvm/llvm-project/pull/166935

>From 7d53230d6985bbdb4fd65ef820895c58b2218c50 Mon Sep 17 00:00:00 2001
From: Shamshura Egor <shamshuraegor at gmail.com>
Date: Mon, 1 Dec 2025 14:00:13 +0000
Subject: [PATCH 1/2] Added tests.

---
 .../Transforms/InferAlignment/ptrtoint.ll     | 64 +++++++++++++++++++
 1 file changed, 64 insertions(+)
 create mode 100644 llvm/test/Transforms/InferAlignment/ptrtoint.ll

diff --git a/llvm/test/Transforms/InferAlignment/ptrtoint.ll b/llvm/test/Transforms/InferAlignment/ptrtoint.ll
new file mode 100644
index 0000000000000..8c6b755282924
--- /dev/null
+++ b/llvm/test/Transforms/InferAlignment/ptrtoint.ll
@@ -0,0 +1,64 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
+
+define i64 @base(ptr %0) {
+; CHECK-LABEL: define i64 @base(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[V3:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V3]], 2
+; CHECK-NEXT:    ret i64 [[V5]]
+;
+  %v1 = load i32, ptr %0, align 4
+  %v3 = ptrtoint ptr %0 to i64
+  %v5 = and i64 %v3, 2
+  ret i64 %v5
+}
+
+define i64 @best_alignment(ptr %0) {
+; CHECK-LABEL: define i64 @best_alignment(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[TMP0]], align 8
+; CHECK-NEXT:    [[V1_3:%.*]] = load i32, ptr [[TMP0]], align 16
+; CHECK-NEXT:    [[V3:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V3]], 15
+; CHECK-NEXT:    ret i64 [[V5]]
+;
+  %v1 = load i32, ptr %0, align 8
+  %v2 = load i32, ptr %0, align 16
+  %v3 = ptrtoint ptr %0 to i64
+  %v5 = and i64 %v3, 15
+  ret i64 %v5
+}
+
+declare void @func()
+
+define i64 @negative_test(ptr %0) {
+; CHECK-LABEL: define i64 @negative_test(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[V3:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V3]], 2
+; CHECK-NEXT:    call void @func()
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT:    ret i64 [[V5]]
+;
+  %v3 = ptrtoint ptr %0 to i64
+  %v5 = and i64 %v3, 2
+  call void @func()
+  %v1 = load i32, ptr %0, align 4
+  ret i64 %v5
+}
+
+define i64 @ptrtoaddr(ptr %0) {
+; CHECK-LABEL: define i64 @ptrtoaddr(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[V3:%.*]] = ptrtoaddr ptr [[TMP0]] to i64
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[V5:%.*]] = and i64 2, [[V3]]
+; CHECK-NEXT:    ret i64 [[V5]]
+;
+  %v3 = ptrtoaddr ptr %0 to i64
+  %v1 = load i32, ptr %0, align 4
+  %v5 = and i64 2, %v3
+  ret i64 %v5
+}

>From ddb38825171ca2a9da4432945deaeb1d034717a2 Mon Sep 17 00:00:00 2001
From: Shamshura Egor <shamshuraegor at gmail.com>
Date: Mon, 1 Dec 2025 14:17:36 +0000
Subject: [PATCH 2/2] Added opt.

---
 llvm/lib/Transforms/Scalar/InferAlignment.cpp | 43 +++++++++++++++++--
 .../Transforms/InferAlignment/ptrtoint.ll     |  6 +--
 2 files changed, 42 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/Transforms/Scalar/InferAlignment.cpp b/llvm/lib/Transforms/Scalar/InferAlignment.cpp
index 39751c04eba08..00392103b7987 100644
--- a/llvm/lib/Transforms/Scalar/InferAlignment.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAlignment.cpp
@@ -12,19 +12,26 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Transforms/Scalar/InferAlignment.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
 #include "llvm/Analysis/AssumptionCache.h"
 #include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Instruction.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Alignment.h"
 #include "llvm/Support/KnownBits.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Transforms/Utils/Local.h"
 
 using namespace llvm;
+using namespace llvm::PatternMatch;
 
 static bool tryToImproveAlign(
     const DataLayout &DL, Instruction *I,
-    function_ref<Align(Value *PtrOp, Align OldAlign, Align PrefAlign)> Fn) {
+    function_ref<Align(Value *PtrOp, Align OldAlign, Align PrefAlign)> Fn,
+    function_ref<Align(Instruction &I, Value *PtrOp)> ActualAlignFn) {
 
   if (auto *PtrOp = getLoadStorePointerOperand(I)) {
     Align OldAlign = getLoadStoreAlignment(I);
@@ -37,6 +44,17 @@ static bool tryToImproveAlign(
     }
   }
 
+  Value *PtrOp;
+  ConstantInt *Const;
+  if (match(I,
+            m_c_And(m_PtrToIntOrAddr(m_Value(PtrOp)), m_ConstantInt(Const)))) {
+    Align ActualAlign = ActualAlignFn(*I, PtrOp);
+    if (Const->getValue().ult(ActualAlign.value())) {
+      I->replaceAllUsesWith(Constant::getNullValue(I->getType()));
+      return true;
+    }
+  }
+
   IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
   if (!II)
     return false;
@@ -75,11 +93,15 @@ bool inferAlignment(Function &F, AssumptionCache &AC, DominatorTree &DT) {
   for (BasicBlock &BB : F) {
     for (Instruction &I : BB) {
       Changed |= tryToImproveAlign(
-          DL, &I, [&](Value *PtrOp, Align OldAlign, Align PrefAlign) {
+          DL, &I,
+          [&](Value *PtrOp, Align OldAlign, Align PrefAlign) {
             if (PrefAlign > OldAlign)
               return std::max(OldAlign,
                               tryEnforceAlignment(PtrOp, PrefAlign, DL));
             return OldAlign;
+          },
+          [&](Instruction &, Value *PtrOp) {
+            return PtrOp->getPointerAlignment(DL);
           });
     }
   }
@@ -120,6 +142,17 @@ bool inferAlignment(Function &F, AssumptionCache &AC, DominatorTree &DT) {
     return LoadStoreAlign;
   };
 
+  auto ActualAlginFn = [&](Instruction &I, Value *PtrOp) {
+    Align KnownAlign = InferFromKnownBits(I, PtrOp);
+    Align BaseAlign = PtrOp->getPointerAlignment(DL);
+    if (auto It = BestBasePointerAligns.find(PtrOp);
+        It != BestBasePointerAligns.end()) {
+      BaseAlign = std::max(BaseAlign, It->second);
+    }
+    Align ActualAlign = std::max(KnownAlign, BaseAlign);
+    return ActualAlign;
+  };
+
   for (BasicBlock &BB : F) {
     // We need to reset the map for each block because alignment information
     // can only be propagated from instruction A to B if A dominates B.
@@ -131,10 +164,12 @@ bool inferAlignment(Function &F, AssumptionCache &AC, DominatorTree &DT) {
 
     for (Instruction &I : BB) {
       Changed |= tryToImproveAlign(
-          DL, &I, [&](Value *PtrOp, Align OldAlign, Align PrefAlign) {
+          DL, &I,
+          [&](Value *PtrOp, Align OldAlign, Align PrefAlign) {
             return std::max(InferFromKnownBits(I, PtrOp),
                             InferFromBasePointer(PtrOp, OldAlign));
-          });
+          },
+          ActualAlginFn);
     }
   }
 
diff --git a/llvm/test/Transforms/InferAlignment/ptrtoint.ll b/llvm/test/Transforms/InferAlignment/ptrtoint.ll
index 8c6b755282924..ccc6c426ca2dd 100644
--- a/llvm/test/Transforms/InferAlignment/ptrtoint.ll
+++ b/llvm/test/Transforms/InferAlignment/ptrtoint.ll
@@ -7,7 +7,7 @@ define i64 @base(ptr %0) {
 ; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[TMP0]], align 4
 ; CHECK-NEXT:    [[V3:%.*]] = ptrtoint ptr [[TMP0]] to i64
 ; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V3]], 2
-; CHECK-NEXT:    ret i64 [[V5]]
+; CHECK-NEXT:    ret i64 0
 ;
   %v1 = load i32, ptr %0, align 4
   %v3 = ptrtoint ptr %0 to i64
@@ -22,7 +22,7 @@ define i64 @best_alignment(ptr %0) {
 ; CHECK-NEXT:    [[V1_3:%.*]] = load i32, ptr [[TMP0]], align 16
 ; CHECK-NEXT:    [[V3:%.*]] = ptrtoint ptr [[TMP0]] to i64
 ; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V3]], 15
-; CHECK-NEXT:    ret i64 [[V5]]
+; CHECK-NEXT:    ret i64 0
 ;
   %v1 = load i32, ptr %0, align 8
   %v2 = load i32, ptr %0, align 16
@@ -55,7 +55,7 @@ define i64 @ptrtoaddr(ptr %0) {
 ; CHECK-NEXT:    [[V3:%.*]] = ptrtoaddr ptr [[TMP0]] to i64
 ; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[TMP0]], align 4
 ; CHECK-NEXT:    [[V5:%.*]] = and i64 2, [[V3]]
-; CHECK-NEXT:    ret i64 [[V5]]
+; CHECK-NEXT:    ret i64 0
 ;
   %v3 = ptrtoaddr ptr %0 to i64
   %v1 = load i32, ptr %0, align 4



More information about the llvm-commits mailing list