[llvm] [EarlyCSE] Rematerialize alignment assumption. (PR #109131)

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 16 10:15:12 PST 2025


https://github.com/fhahn updated https://github.com/llvm/llvm-project/pull/109131

>From 2268c8c7d5a6e1f31f272b83f666dcfa73da8dda Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Thu, 16 Jan 2025 17:24:35 +0000
Subject: [PATCH 1/7] [PhaseOrdering] Add eest making sure alignment doesn't
 get dropped.

---
 .../infer-align-from-assumption.ll            | 82 +++++++++++++++++++
 1 file changed, 82 insertions(+)
 create mode 100644 llvm/test/Transforms/PhaseOrdering/infer-align-from-assumption.ll

diff --git a/llvm/test/Transforms/PhaseOrdering/infer-align-from-assumption.ll b/llvm/test/Transforms/PhaseOrdering/infer-align-from-assumption.ll
new file mode 100644
index 00000000000000..998f6fe5b7efce
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/infer-align-from-assumption.ll
@@ -0,0 +1,82 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes='default<O3>' -S %s | FileCheck %s
+
+target triple = "arm64-apple-macosx"
+
+declare void @llvm.assume(i1 noundef)
+
+define i32 @entry(ptr %0) {
+; CHECK-LABEL: define i32 @entry(
+; CHECK-SAME: ptr nocapture [[TMP0:%.*]]) local_unnamed_addr {
+; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 4) ]
+; CHECK-NEXT:    [[DOT0_COPYLOAD_I_I_I:%.*]] = load i32, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I_I_I]])
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 4
+; CHECK-NEXT:    store ptr [[TMP5]], ptr [[TMP0]], align 8
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP5]], i64 4) ]
+; CHECK-NEXT:    [[DOT0_COPYLOAD_I_I_I1:%.*]] = load i32, ptr [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I_I_I1]])
+; CHECK-NEXT:    [[TMP7:%.*]] = load ptr, ptr [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 4
+; CHECK-NEXT:    store ptr [[TMP8]], ptr [[TMP0]], align 8
+; CHECK-NEXT:    ret i32 [[TMP6]]
+;
+  %2 = call i32 @fn1(ptr %0)
+  %3 = call i32 @fn1(ptr %0)
+  ret i32 %3
+}
+
+
+define i32 @fn1(ptr %0) {
+; CHECK-LABEL: define i32 @fn1(
+; CHECK-SAME: ptr nocapture [[TMP0:%.*]]) local_unnamed_addr {
+; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 4) ]
+; CHECK-NEXT:    [[DOT0_COPYLOAD_I_I:%.*]] = load i32, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I_I]])
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 4
+; CHECK-NEXT:    store ptr [[TMP5]], ptr [[TMP0]], align 8
+; CHECK-NEXT:    ret i32 [[TMP3]]
+;
+  %2 = call i32 @fn2(ptr %0)
+  ret i32 %2
+}
+
+define i32 @fn2(ptr %0) {
+; CHECK-LABEL: define i32 @fn2(
+; CHECK-SAME: ptr nocapture [[TMP0:%.*]]) local_unnamed_addr {
+; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 4) ]
+; CHECK-NEXT:    [[DOT0_COPYLOAD_I:%.*]] = load i32, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I]])
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 4
+; CHECK-NEXT:    store ptr [[TMP5]], ptr [[TMP0]], align 8
+; CHECK-NEXT:    ret i32 [[TMP3]]
+;
+  %2 = load ptr, ptr %0, align 8
+  %3 = call i32 @load_assume_aligned(ptr %2)
+  %4 = load ptr, ptr %0, align 8
+  %5 = getelementptr i8, ptr %4, i64 4
+  store ptr %5, ptr %0, align 8
+  ret i32 %3
+}
+
+define i32 @load_assume_aligned(ptr %0) {
+; CHECK-LABEL: define i32 @load_assume_aligned(
+; CHECK-SAME: ptr [[TMP0:%.*]]) local_unnamed_addr {
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP0]], i64 4) ]
+; CHECK-NEXT:    [[DOT0_COPYLOAD:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD]])
+; CHECK-NEXT:    ret i32 [[TMP2]]
+;
+  call void @llvm.assume(i1 true) [ "align"(ptr %0, i64 4) ]
+  %.0.copyload = load i32, ptr %0, align 1
+  %2 = call i32 @swap(i32 %.0.copyload)
+  ret i32 %2
+}
+
+declare i32 @swap(i32)

>From a0f983976c1622e8343799f747b6ef933198037a Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Wed, 18 Sep 2024 13:38:52 +0100
Subject: [PATCH 2/7] [EarlyCSE] Rematerialize alignment assumption.

---
 llvm/lib/Transforms/Scalar/EarlyCSE.cpp             | 13 +++++++++++++
 .../EarlyCSE/materialize-align-assumptions.ll       |  3 +++
 2 files changed, 16 insertions(+)

diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 3a0ae6b01a1144..4f8a2e06e6b451 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -31,6 +31,7 @@
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/Instruction.h"
 #include "llvm/IR/Instructions.h"
@@ -1599,6 +1600,18 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
         if (InVal.IsLoad)
           if (auto *I = dyn_cast<Instruction>(Op))
             combineMetadataForCSE(I, &Inst, false);
+
+        // If the load has align metadata, preserve it via an alignment
+        // assumption. Note that this doesn't use salavageKnowledge, as we need
+        // to create the assumption for the value we replaced the load with.
+        if (auto *AlignMD = Inst.getMetadata(LLVMContext::MD_align)) {
+          auto *A = mdconst::extract<ConstantInt>(AlignMD->getOperand(0));
+          if (Op->getPointerAlignment(SQ.DL).value() % A->getZExtValue() != 0) {
+            IRBuilder B(&Inst);
+            B.CreateAlignmentAssumption(SQ.DL, Op, A);
+          }
+        }
+
         if (!Inst.use_empty())
           Inst.replaceAllUsesWith(Op);
         salvageKnowledge(&Inst, &AC);
diff --git a/llvm/test/Transforms/EarlyCSE/materialize-align-assumptions.ll b/llvm/test/Transforms/EarlyCSE/materialize-align-assumptions.ll
index ea63376957162b..628577b0975071 100644
--- a/llvm/test/Transforms/EarlyCSE/materialize-align-assumptions.ll
+++ b/llvm/test/Transforms/EarlyCSE/materialize-align-assumptions.ll
@@ -10,6 +10,7 @@ define ptr @align_replacement_does_not_have_align_metadata(ptr noalias %p) {
 ; CHECK-NEXT:    call void @foo(ptr [[L_1]])
 ; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[L_1]], i64 4
 ; CHECK-NEXT:    store ptr [[GEP]], ptr [[P]], align 8
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[GEP]], i64 4) ]
 ; CHECK-NEXT:    ret ptr [[GEP]]
 ;
   %l.1 = load ptr, ptr %p, align 8
@@ -27,6 +28,7 @@ define ptr @align_replacement_does_not_have_align_metadata2(ptr noalias %p) {
 ; CHECK-NEXT:    [[L_1:%.*]] = load ptr, ptr [[P]], align 8
 ; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[L_1]], i64 4
 ; CHECK-NEXT:    store ptr [[GEP]], ptr [[P]], align 8
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[GEP]], i64 4) ]
 ; CHECK-NEXT:    ret ptr [[GEP]]
 ;
   %l.1 = load ptr, ptr %p, align 8
@@ -54,6 +56,7 @@ define ptr @align_replacement_has_smaller_alignment(ptr noalias %p) {
 ; CHECK-SAME: ptr noalias [[P:%.*]]) {
 ; CHECK-NEXT:    [[L_1:%.*]] = load ptr, ptr [[P]], align 8, !align [[META0]]
 ; CHECK-NEXT:    call void @foo(ptr [[L_1]])
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[L_1]], i64 8) ]
 ; CHECK-NEXT:    ret ptr [[L_1]]
 ;
   %l.1 = load ptr, ptr %p, align 8, !align !0

>From 29d08d5835d84b69a3c4ff9755e91399aa0bd9f1 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Thu, 19 Sep 2024 21:33:40 +0100
Subject: [PATCH 3/7] !fixup check noundef

---
 llvm/lib/Transforms/Scalar/EarlyCSE.cpp       | 20 ++++++-----
 .../EarlyCSE/materialize-align-assumptions.ll | 35 ++++++++++++++-----
 2 files changed, 39 insertions(+), 16 deletions(-)

diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 4f8a2e06e6b451..3745311e0ff307 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -1601,14 +1601,18 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
           if (auto *I = dyn_cast<Instruction>(Op))
             combineMetadataForCSE(I, &Inst, false);
 
-        // If the load has align metadata, preserve it via an alignment
-        // assumption. Note that this doesn't use salavageKnowledge, as we need
-        // to create the assumption for the value we replaced the load with.
-        if (auto *AlignMD = Inst.getMetadata(LLVMContext::MD_align)) {
-          auto *A = mdconst::extract<ConstantInt>(AlignMD->getOperand(0));
-          if (Op->getPointerAlignment(SQ.DL).value() % A->getZExtValue() != 0) {
-            IRBuilder B(&Inst);
-            B.CreateAlignmentAssumption(SQ.DL, Op, A);
+        // If the load has align and noundef metadata, preserve it via an
+        // alignment assumption. Note that this doesn't use salavageKnowledge,
+        // as we need to create the assumption for the value we replaced the
+        // load with.
+        if (Inst.hasMetadata(LLVMContext::MD_noundef)) {
+          if (auto *AlignMD = Inst.getMetadata(LLVMContext::MD_align)) {
+            auto *A = mdconst::extract<ConstantInt>(AlignMD->getOperand(0));
+            if (Op->getPointerAlignment(SQ.DL).value() % A->getZExtValue() !=
+                0) {
+              IRBuilder B(&Inst);
+              B.CreateAlignmentAssumption(SQ.DL, Op, A);
+            }
           }
         }
 
diff --git a/llvm/test/Transforms/EarlyCSE/materialize-align-assumptions.ll b/llvm/test/Transforms/EarlyCSE/materialize-align-assumptions.ll
index 628577b0975071..837a73a00d6431 100644
--- a/llvm/test/Transforms/EarlyCSE/materialize-align-assumptions.ll
+++ b/llvm/test/Transforms/EarlyCSE/materialize-align-assumptions.ll
@@ -3,6 +3,24 @@
 
 declare void @foo(ptr)
 
+define ptr @align_replacement_does_not_have_align_metadata_missing_noundef(ptr noalias %p) {
+; CHECK-LABEL: define ptr @align_replacement_does_not_have_align_metadata_missing_noundef(
+; CHECK-SAME: ptr noalias [[P:%.*]]) {
+; CHECK-NEXT:    [[L_1:%.*]] = load ptr, ptr [[P]], align 8
+; CHECK-NEXT:    call void @foo(ptr [[L_1]])
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[L_1]], i64 4
+; CHECK-NEXT:    store ptr [[GEP]], ptr [[P]], align 8
+; CHECK-NEXT:    ret ptr [[GEP]]
+;
+  %l.1 = load ptr, ptr %p, align 8
+  call void @foo(ptr %l.1)
+  %l.2 = load ptr, ptr %p, align 8
+  %gep = getelementptr i8, ptr %l.2, i64 4
+  store ptr %gep, ptr %p, align 8
+  %l.3 = load ptr, ptr %p, align 8, !align !0
+  ret ptr %l.3
+}
+
 define ptr @align_replacement_does_not_have_align_metadata(ptr noalias %p) {
 ; CHECK-LABEL: define ptr @align_replacement_does_not_have_align_metadata(
 ; CHECK-SAME: ptr noalias [[P:%.*]]) {
@@ -18,7 +36,7 @@ define ptr @align_replacement_does_not_have_align_metadata(ptr noalias %p) {
   %l.2 = load ptr, ptr %p, align 8
   %gep = getelementptr i8, ptr %l.2, i64 4
   store ptr %gep, ptr %p, align 8
-  %l.3 = load ptr, ptr %p, align 8, !align !0
+  %l.3 = load ptr, ptr %p, align 8, !align !0, !noundef !{}
   ret ptr %l.3
 }
 
@@ -34,7 +52,7 @@ define ptr @align_replacement_does_not_have_align_metadata2(ptr noalias %p) {
   %l.1 = load ptr, ptr %p, align 8
   %gep = getelementptr i8, ptr %l.1, i64 4
   store ptr %gep, ptr %p, align 8
-  %l.2 = load ptr, ptr %p, align 8, !align !0
+  %l.2 = load ptr, ptr %p, align 8, !align !0, !noundef !{}
   ret ptr %l.2
 }
 
@@ -61,7 +79,7 @@ define ptr @align_replacement_has_smaller_alignment(ptr noalias %p) {
 ;
   %l.1 = load ptr, ptr %p, align 8, !align !0
   call void @foo(ptr %l.1)
-  %l.2 = load ptr, ptr %p, align 8, !align !1
+  %l.2 = load ptr, ptr %p, align 8, !align !1, !noundef !{}
   ret ptr %l.2
 }
 
@@ -70,12 +88,12 @@ define ptr @align_replacement_has_larger_alignment(ptr %p) {
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:    [[L_1:%.*]] = load ptr, ptr [[P]], align 8, !align [[META1:![0-9]+]]
 ; CHECK-NEXT:    call void @foo(ptr [[L_1]])
-; CHECK-NEXT:    [[L_2:%.*]] = load ptr, ptr [[P]], align 8, !align [[META0]]
+; CHECK-NEXT:    [[L_2:%.*]] = load ptr, ptr [[P]], align 8, !align [[META0]], !noundef [[META2:![0-9]+]]
 ; CHECK-NEXT:    ret ptr [[L_2]]
 ;
   %l.1 = load ptr, ptr %p, align 8, !align !1
   call void @foo(ptr %l.1)
-  %l.2 = load ptr, ptr %p, align 8, !align !0
+  %l.2 = load ptr, ptr %p, align 8, !align !0, !noundef !{}
   ret ptr %l.2
 }
 
@@ -84,12 +102,12 @@ define ptr @align_1(ptr %p) {
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:    [[L_1:%.*]] = load ptr, ptr [[P]], align 8
 ; CHECK-NEXT:    call void @foo(ptr [[L_1]])
-; CHECK-NEXT:    [[L_2:%.*]] = load ptr, ptr [[P]], align 8, !align [[META2:![0-9]+]]
+; CHECK-NEXT:    [[L_2:%.*]] = load ptr, ptr [[P]], align 8, !align [[META3:![0-9]+]], !noundef [[META2]]
 ; CHECK-NEXT:    ret ptr [[L_2]]
 ;
   %l.1 = load ptr, ptr %p, align 8
   call void @foo(ptr %l.1)
-  %l.2 = load ptr, ptr %p, align 8, !align !2
+  %l.2 = load ptr, ptr %p, align 8, !align !2, !noundef !{}
   ret ptr %l.2
 }
 
@@ -99,5 +117,6 @@ define ptr @align_1(ptr %p) {
 ;.
 ; CHECK: [[META0]] = !{i64 4}
 ; CHECK: [[META1]] = !{i64 8}
-; CHECK: [[META2]] = !{i64 1}
+; CHECK: [[META2]] = !{}
+; CHECK: [[META3]] = !{i64 1}
 ;.

>From 8d9091b308c075c24172135f464e2a8b323d82a8 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Mon, 13 Jan 2025 21:31:42 +0000
Subject: [PATCH 4/7] Use computeKnownBits.

---
 llvm/lib/Transforms/Scalar/EarlyCSE.cpp | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 3745311e0ff307..378c16f126f4de 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -16,6 +16,7 @@
 #include "llvm/ADT/Hashing.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/ScopedHashTable.h"
+#include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/AssumptionCache.h"
@@ -1607,9 +1608,11 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
         // load with.
         if (Inst.hasMetadata(LLVMContext::MD_noundef)) {
           if (auto *AlignMD = Inst.getMetadata(LLVMContext::MD_align)) {
+            Inst.setMetadata(LLVMContext::MD_align, nullptr);
             auto *A = mdconst::extract<ConstantInt>(AlignMD->getOperand(0));
-            if (Op->getPointerAlignment(SQ.DL).value() % A->getZExtValue() !=
-                0) {
+            auto KB = computeKnownBits(Op, SQ.DL);
+            unsigned AlignFromKB = 1 << KB.countMinTrailingZeros();
+            if (AlignFromKB < A->getZExtValue()) {
               IRBuilder B(&Inst);
               B.CreateAlignmentAssumption(SQ.DL, Op, A);
             }

>From 4f216395790a646cc45a6544a0fcc7635729a9ae Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Mon, 13 Jan 2025 21:32:09 +0000
Subject: [PATCH 5/7] Remove align assumptions if possible.

---
 .../InstCombine/InstCombineCalls.cpp          | 36 +++++++++++++++++++
 1 file changed, 36 insertions(+)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 67b7ec3ae3c9ec..b47450571a9f91 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3226,6 +3226,42 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
         MaybeSimplifyHint(OBU.Inputs[0]);
         MaybeSimplifyHint(OBU.Inputs[1]);
       }
+
+      if (OBU.getTagName() == "align" && OBU.Inputs.size() == 2) {
+        RetainedKnowledge RK = getKnowledgeFromBundle(
+            *cast<AssumeInst>(II), II->bundle_op_info_begin()[Idx]);
+        if (!RK || RK.AttrKind != Attribute::Alignment ||
+            !isPowerOf2_64(RK.ArgValue))
+          continue;
+        SetVector<const Instruction *> WorkList;
+        bool AlignNeeded = false;
+        WorkList.insert(II);
+        for (unsigned I = 0; I != WorkList.size(); ++I) {
+          if (auto *LI = dyn_cast<LoadInst>(WorkList[I])) {
+            if (auto *AlignMD = LI->getMetadata(LLVMContext::MD_align)) {
+              auto *A = mdconst::extract<ConstantInt>(AlignMD->getOperand(0));
+
+              if (A->getZExtValue() % RK.ArgValue != 0) {
+                AlignNeeded = true;
+                break;
+              }
+            }
+          }
+          if (isa<ICmpInst>(WorkList[I])) {
+            AlignNeeded = true;
+            break;
+          }
+          if (WorkList.size() > 16) {
+            AlignNeeded = true;
+            break;
+          }
+
+          for (const User *U : WorkList[I]->users())
+            WorkList.insert(cast<Instruction>(U));
+        }
+        auto *New = CallBase::removeOperandBundle(II, OBU.getTagID());
+        return New;
+      }
     }
 
     // Convert nonnull assume like:

>From 8441043ceceec1f08c9368312ed5dbba1b8bdc9f Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Tue, 14 Jan 2025 14:08:44 +0000
Subject: [PATCH 6/7] step

---
 .../InstCombine/InstCombineCalls.cpp          | 36 ------------
 llvm/lib/Transforms/Scalar/EarlyCSE.cpp       | 55 +++++++++++++++++--
 2 files changed, 49 insertions(+), 42 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index b47450571a9f91..67b7ec3ae3c9ec 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3226,42 +3226,6 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
         MaybeSimplifyHint(OBU.Inputs[0]);
         MaybeSimplifyHint(OBU.Inputs[1]);
       }
-
-      if (OBU.getTagName() == "align" && OBU.Inputs.size() == 2) {
-        RetainedKnowledge RK = getKnowledgeFromBundle(
-            *cast<AssumeInst>(II), II->bundle_op_info_begin()[Idx]);
-        if (!RK || RK.AttrKind != Attribute::Alignment ||
-            !isPowerOf2_64(RK.ArgValue))
-          continue;
-        SetVector<const Instruction *> WorkList;
-        bool AlignNeeded = false;
-        WorkList.insert(II);
-        for (unsigned I = 0; I != WorkList.size(); ++I) {
-          if (auto *LI = dyn_cast<LoadInst>(WorkList[I])) {
-            if (auto *AlignMD = LI->getMetadata(LLVMContext::MD_align)) {
-              auto *A = mdconst::extract<ConstantInt>(AlignMD->getOperand(0));
-
-              if (A->getZExtValue() % RK.ArgValue != 0) {
-                AlignNeeded = true;
-                break;
-              }
-            }
-          }
-          if (isa<ICmpInst>(WorkList[I])) {
-            AlignNeeded = true;
-            break;
-          }
-          if (WorkList.size() > 16) {
-            AlignNeeded = true;
-            break;
-          }
-
-          for (const User *U : WorkList[I]->users())
-            WorkList.insert(cast<Instruction>(U));
-        }
-        auto *New = CallBase::removeOperandBundle(II, OBU.getTagID());
-        return New;
-      }
     }
 
     // Convert nonnull assume like:
diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 378c16f126f4de..aeb98d3204caf8 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -1606,15 +1606,58 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
         // alignment assumption. Note that this doesn't use salavageKnowledge,
         // as we need to create the assumption for the value we replaced the
         // load with.
-        if (Inst.hasMetadata(LLVMContext::MD_noundef)) {
-          if (auto *AlignMD = Inst.getMetadata(LLVMContext::MD_align)) {
+        if (auto *AlignMD = Inst.getMetadata(LLVMContext::MD_align)) {
+          if (Inst.hasMetadata(LLVMContext::MD_noundef) ||
+              programUndefinedIfPoison(&Inst)) {
             Inst.setMetadata(LLVMContext::MD_align, nullptr);
-            auto *A = mdconst::extract<ConstantInt>(AlignMD->getOperand(0));
+            auto *B = mdconst::extract<ConstantInt>(AlignMD->getOperand(0));
             auto KB = computeKnownBits(Op, SQ.DL);
             unsigned AlignFromKB = 1 << KB.countMinTrailingZeros();
-            if (AlignFromKB < A->getZExtValue()) {
-              IRBuilder B(&Inst);
-              B.CreateAlignmentAssumption(SQ.DL, Op, A);
+            if (AlignFromKB < B->getZExtValue()) {
+              SetVector<const Value *> WorkList;
+              bool AlignNeeded = false;
+              for (const User *U : Inst.users())
+                if (auto *I = dyn_cast<Instruction>(U))
+                  WorkList.insert(I);
+
+              for (unsigned I = 0; I != WorkList.size(); ++I) {
+                auto *Curr = WorkList[I];
+                if (auto *LI = dyn_cast<LoadInst>(Curr)) {
+                  if (LI->getAlign().value() < B->getZExtValue()) {
+                    AlignNeeded = true;
+                    break;
+                  }
+                  continue;
+                }
+                if (auto *SI = dyn_cast<StoreInst>(Curr)) {
+                  if (SI->getAlign().value() < B->getZExtValue()) {
+                    AlignNeeded = true;
+                    break;
+                  }
+                  continue;
+                }
+                if (isa<ReturnInst>(Curr)) {
+                  AlignNeeded = true;
+                  break;
+                }
+                if (isa<ICmpInst>(Curr) &&
+                    !isa<Constant>(cast<Instruction>(Curr)->getOperand(0)) &&
+                    !isa<Constant>(cast<Instruction>(Curr)->getOperand(1))) {
+                  AlignNeeded = true;
+                  break;
+                }
+                if (WorkList.size() > 16) {
+                  AlignNeeded = true;
+                  break;
+                }
+
+                for (const User *U : Curr->users())
+                  WorkList.insert(cast<Instruction>(U));
+              }
+              if (AlignNeeded) {
+                IRBuilder Builder(&Inst);
+                Builder.CreateAlignmentAssumption(SQ.DL, Op, B);
+              }
             }
           }
         }

>From 1e2b44f681b7e6220ebaa6cfe40dece6161406ac Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Wed, 15 Jan 2025 19:16:26 +0000
Subject: [PATCH 7/7] [InstCombine] Fold align assume into load's !align
 metadata if possible

https://github.com/llvm/llvm-project/pull/108958/
---
 .../InstCombine/InstCombineCalls.cpp          | 30 ++++++++++++++++---
 .../Transforms/InstCombine/assume-align.ll    | 30 +++++++++++++++++--
 .../infer-align-from-assumption.ll            | 14 ++++-----
 .../inlining-alignment-assumptions.ll         |  3 +-
 4 files changed, 60 insertions(+), 17 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 67b7ec3ae3c9ec..bded45cf6b02a7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3207,12 +3207,13 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
       // TODO: apply range metadata for range check patterns?
     }
 
-    // Separate storage assumptions apply to the underlying allocations, not any
-    // particular pointer within them. When evaluating the hints for AA purposes
-    // we getUnderlyingObject them; by precomputing the answers here we can
-    // avoid having to do so repeatedly there.
     for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {
       OperandBundleUse OBU = II->getOperandBundleAt(Idx);
+
+      // Separate storage assumptions apply to the underlying allocations, not
+      // any particular pointer within them. When evaluating the hints for AA
+      // purposes we getUnderlyingObject them; by precomputing the answers here
+      // we can avoid having to do so repeatedly there.
       if (OBU.getTagName() == "separate_storage") {
         assert(OBU.Inputs.size() == 2);
         auto MaybeSimplifyHint = [&](const Use &U) {
@@ -3226,6 +3227,27 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
         MaybeSimplifyHint(OBU.Inputs[0]);
         MaybeSimplifyHint(OBU.Inputs[1]);
       }
+      // Try to fold alignment assumption into a load's !align metadata, if the
+      // assumption is valid in the load's context.
+      if (OBU.getTagName() == "align" && OBU.Inputs.size() == 2) {
+        RetainedKnowledge RK = getKnowledgeFromBundle(
+            *cast<AssumeInst>(II), II->bundle_op_info_begin()[Idx]);
+        if (!RK || RK.AttrKind != Attribute::Alignment ||
+            !isPowerOf2_64(RK.ArgValue))
+          continue;
+
+        auto *LI = dyn_cast<LoadInst>(OBU.Inputs[0]);
+        if (!LI ||
+            !isValidAssumeForContext(II, LI, &DT, /*AllowEphemerals=*/true))
+          continue;
+
+        LI->setMetadata(
+            LLVMContext::MD_align,
+            MDNode::get(II->getContext(), ValueAsMetadata::getConstant(
+                                              Builder.getInt64(RK.ArgValue))));
+        auto *New = CallBase::removeOperandBundle(II, OBU.getTagID());
+        return New;
+      }
     }
 
     // Convert nonnull assume like:
diff --git a/llvm/test/Transforms/InstCombine/assume-align.ll b/llvm/test/Transforms/InstCombine/assume-align.ll
index 47659ff8c84909..549821802fe674 100644
--- a/llvm/test/Transforms/InstCombine/assume-align.ll
+++ b/llvm/test/Transforms/InstCombine/assume-align.ll
@@ -123,11 +123,9 @@ define i8 @assume_align_non_pow2(ptr %p) {
   ret i8 %v
 }
 
-; TODO: Can fold alignment assumption into !align metadata on load.
 define ptr @fold_assume_align_pow2_of_loaded_pointer_into_align_metadata(ptr %p) {
 ; CHECK-LABEL: @fold_assume_align_pow2_of_loaded_pointer_into_align_metadata(
-; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i64 8) ]
+; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8, !align [[META0:![0-9]+]]
 ; CHECK-NEXT:    ret ptr [[P2]]
 ;
   %p2 = load ptr, ptr %p
@@ -135,6 +133,16 @@ define ptr @fold_assume_align_pow2_of_loaded_pointer_into_align_metadata(ptr %p)
   ret ptr %p2
 }
 
+define ptr @fold_assume_align_i32_pow2_of_loaded_pointer_into_align_metadata(ptr %p) {
+; CHECK-LABEL: @fold_assume_align_i32_pow2_of_loaded_pointer_into_align_metadata(
+; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8, !align [[META0]]
+; CHECK-NEXT:    ret ptr [[P2]]
+;
+  %p2 = load ptr, ptr %p
+  call void @llvm.assume(i1 true) [ "align"(ptr %p2, i32 8) ]
+  ret ptr %p2
+}
+
 define ptr @dont_fold_assume_align_pow2_of_loaded_pointer_into_align_metadata_due_to_call(ptr %p) {
 ; CHECK-LABEL: @dont_fold_assume_align_pow2_of_loaded_pointer_into_align_metadata_due_to_call(
 ; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8
@@ -171,3 +179,19 @@ define ptr @dont_fold_assume_align_zero_of_loaded_pointer_into_align_metadata(pt
   call void @llvm.assume(i1 true) [ "align"(ptr %p2, i64 0) ]
   ret ptr %p2
 }
+
+; !align must have a constant integer alignment.
+define ptr @dont_fold_assume_align_not_constant_of_loaded_pointer_into_align_metadata(ptr %p, i64 %align) {
+; CHECK-LABEL: @dont_fold_assume_align_not_constant_of_loaded_pointer_into_align_metadata(
+; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8, !align [[META1:![0-9]+]]
+; CHECK-NEXT:    ret ptr [[P2]]
+;
+  %p2 = load ptr, ptr %p
+  call void @llvm.assume(i1 true) [ "align"(ptr %p2, i64 %align) ]
+  ret ptr %p2
+}
+
+;.
+; CHECK: [[META0]] = !{i64 8}
+; CHECK: [[META1]] = !{i64 1}
+;.
diff --git a/llvm/test/Transforms/PhaseOrdering/infer-align-from-assumption.ll b/llvm/test/Transforms/PhaseOrdering/infer-align-from-assumption.ll
index 998f6fe5b7efce..91372795531dc8 100644
--- a/llvm/test/Transforms/PhaseOrdering/infer-align-from-assumption.ll
+++ b/llvm/test/Transforms/PhaseOrdering/infer-align-from-assumption.ll
@@ -8,14 +8,12 @@ declare void @llvm.assume(i1 noundef)
 define i32 @entry(ptr %0) {
 ; CHECK-LABEL: define i32 @entry(
 ; CHECK-SAME: ptr nocapture [[TMP0:%.*]]) local_unnamed_addr {
-; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 4) ]
+; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8, !align [[META0:![0-9]+]]
 ; CHECK-NEXT:    [[DOT0_COPYLOAD_I_I_I:%.*]] = load i32, ptr [[TMP2]], align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I_I_I]])
 ; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
 ; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 4
 ; CHECK-NEXT:    store ptr [[TMP5]], ptr [[TMP0]], align 8
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP5]], i64 4) ]
 ; CHECK-NEXT:    [[DOT0_COPYLOAD_I_I_I1:%.*]] = load i32, ptr [[TMP5]], align 4
 ; CHECK-NEXT:    [[TMP6:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I_I_I1]])
 ; CHECK-NEXT:    [[TMP7:%.*]] = load ptr, ptr [[TMP0]], align 8
@@ -32,8 +30,7 @@ define i32 @entry(ptr %0) {
 define i32 @fn1(ptr %0) {
 ; CHECK-LABEL: define i32 @fn1(
 ; CHECK-SAME: ptr nocapture [[TMP0:%.*]]) local_unnamed_addr {
-; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 4) ]
+; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8, !align [[META0]]
 ; CHECK-NEXT:    [[DOT0_COPYLOAD_I_I:%.*]] = load i32, ptr [[TMP2]], align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I_I]])
 ; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
@@ -48,8 +45,7 @@ define i32 @fn1(ptr %0) {
 define i32 @fn2(ptr %0) {
 ; CHECK-LABEL: define i32 @fn2(
 ; CHECK-SAME: ptr nocapture [[TMP0:%.*]]) local_unnamed_addr {
-; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 4) ]
+; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8, !align [[META0]]
 ; CHECK-NEXT:    [[DOT0_COPYLOAD_I:%.*]] = load i32, ptr [[TMP2]], align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I]])
 ; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
@@ -68,7 +64,6 @@ define i32 @fn2(ptr %0) {
 define i32 @load_assume_aligned(ptr %0) {
 ; CHECK-LABEL: define i32 @load_assume_aligned(
 ; CHECK-SAME: ptr [[TMP0:%.*]]) local_unnamed_addr {
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP0]], i64 4) ]
 ; CHECK-NEXT:    [[DOT0_COPYLOAD:%.*]] = load i32, ptr [[TMP0]], align 4
 ; CHECK-NEXT:    [[TMP2:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD]])
 ; CHECK-NEXT:    ret i32 [[TMP2]]
@@ -80,3 +75,6 @@ define i32 @load_assume_aligned(ptr %0) {
 }
 
 declare i32 @swap(i32)
+;.
+; CHECK: [[META0]] = !{i64 4}
+;.
diff --git a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll
index b1cee80bde33fd..d57af87d164474 100644
--- a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll
+++ b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll
@@ -35,8 +35,7 @@ define void @caller1(i1 %c, ptr align 1 %ptr) {
 ; ASSUMPTIONS-ON-NEXT:    br i1 [[C:%.*]], label [[COMMON_RET:%.*]], label [[FALSE2:%.*]]
 ; ASSUMPTIONS-ON:       common.ret:
 ; ASSUMPTIONS-ON-NEXT:    [[DOTSINK:%.*]] = phi i64 [ 3, [[FALSE2]] ], [ 2, [[TMP0:%.*]] ]
-; ASSUMPTIONS-ON-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[PTR:%.*]], i64 8) ]
-; ASSUMPTIONS-ON-NEXT:    store volatile i64 0, ptr [[PTR]], align 8
+; ASSUMPTIONS-ON-NEXT:    store volatile i64 0, ptr [[PTR:%.*]], align 8
 ; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, ptr [[PTR]], align 8
 ; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, ptr [[PTR]], align 8
 ; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, ptr [[PTR]], align 8



More information about the llvm-commits mailing list