[llvm] 0f152a5 - [InferAlignment] Implement InferAlignmentPass

Dhruv Chawla via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 19 23:42:36 PDT 2023


Author: Dhruv Chawla
Date: 2023-09-20T12:03:36+05:30
New Revision: 0f152a55d3e4e71f7c795bf555e40c8895b97077

URL: https://github.com/llvm/llvm-project/commit/0f152a55d3e4e71f7c795bf555e40c8895b97077
DIFF: https://github.com/llvm/llvm-project/commit/0f152a55d3e4e71f7c795bf555e40c8895b97077.diff

LOG: [InferAlignment] Implement InferAlignmentPass

This pass aims to infer alignment for instructions as a separate pass,
to reduce redundant work done by InstCombine running multiple times. It
runs late in the pipeline, just before the back-end passes where this
information is most useful.

Differential Revision: https://reviews.llvm.org/D158529

Added: 
    llvm/include/llvm/Transforms/Scalar/InferAlignment.h
    llvm/lib/Transforms/Scalar/InferAlignment.cpp

Modified: 
    llvm/include/llvm/Transforms/Utils/Local.h
    llvm/lib/Passes/PassBuilder.cpp
    llvm/lib/Passes/PassBuilderPipelines.cpp
    llvm/lib/Passes/PassRegistry.def
    llvm/lib/Transforms/Scalar/CMakeLists.txt
    llvm/lib/Transforms/Utils/Local.cpp
    llvm/test/Transforms/InferAlignment/alloca.ll
    llvm/test/Transforms/InferAlignment/atomic.ll
    llvm/test/Transforms/InferAlignment/attributes.ll
    llvm/test/Transforms/InferAlignment/gep-2d.ll
    llvm/test/Transforms/InferAlignment/gep-array.ll
    llvm/test/Transforms/InferAlignment/irregular-size.ll
    llvm/test/Transforms/InferAlignment/propagate-assume.ll
    llvm/test/Transforms/InferAlignment/ptrmask.ll
    llvm/test/Transforms/InferAlignment/undef-and-null.ll
    llvm/test/Transforms/InferAlignment/vector.ll
    llvm/test/Transforms/InferAlignment/volatile.ll
    llvm/test/Transforms/InferAlignment/vscale.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Transforms/Scalar/InferAlignment.h b/llvm/include/llvm/Transforms/Scalar/InferAlignment.h
new file mode 100644
index 000000000000000..f6fc5f3b8d9872f
--- /dev/null
+++ b/llvm/include/llvm/Transforms/Scalar/InferAlignment.h
@@ -0,0 +1,27 @@
+//===- InferAlignment.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Infer alignment for load, stores and other memory operations based on
+// trailing zero known bits information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_INFERALIGNMENT_H
+#define LLVM_TRANSFORMS_SCALAR_INFERALIGNMENT_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct InferAlignmentPass : public PassInfoMixin<InferAlignmentPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_INFERALIGNMENT_H

diff  --git a/llvm/include/llvm/Transforms/Utils/Local.h b/llvm/include/llvm/Transforms/Utils/Local.h
index 81de70a2fb4cd45..c752bc7f4a31fdc 100644
--- a/llvm/include/llvm/Transforms/Utils/Local.h
+++ b/llvm/include/llvm/Transforms/Utils/Local.h
@@ -213,6 +213,15 @@ AllocaInst *DemoteRegToStack(Instruction &X,
 /// deleted and it returns the pointer to the alloca inserted.
 AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
 
+/// If the specified pointer points to an object that we control, try to modify
+/// the object's alignment to PrefAlign. Returns a minimum known alignment of
+/// the value after the operation, which may be lower than PrefAlign.
+///
+/// Increating value alignment isn't often possible though. If alignment is
+/// important, a more reliable approach is to simply align all global variables
+/// and allocation instructions to their preferred alignment from the beginning.
+Align tryEnforceAlignment(Value *V, Align PrefAlign, const DataLayout &DL);
+
 /// Try to ensure that the alignment of \p V is at least \p PrefAlign bytes. If
 /// the owning object can be modified and has an alignment less than \p
 /// PrefAlign, it will be increased and \p PrefAlign returned. If the alignment

diff  --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 5c7f26109930c9d..985ff88139323c6 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -171,6 +171,7 @@
 #include "llvm/Transforms/Scalar/IndVarSimplify.h"
 #include "llvm/Transforms/Scalar/InductiveRangeCheckElimination.h"
 #include "llvm/Transforms/Scalar/InferAddressSpaces.h"
+#include "llvm/Transforms/Scalar/InferAlignment.h"
 #include "llvm/Transforms/Scalar/InstSimplifyPass.h"
 #include "llvm/Transforms/Scalar/JumpThreading.h"
 #include "llvm/Transforms/Scalar/LICM.h"

diff  --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index b12b4ee3e0e59fd..529743cc8bd2e39 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -88,6 +88,7 @@
 #include "llvm/Transforms/Scalar/Float2Int.h"
 #include "llvm/Transforms/Scalar/GVN.h"
 #include "llvm/Transforms/Scalar/IndVarSimplify.h"
+#include "llvm/Transforms/Scalar/InferAlignment.h"
 #include "llvm/Transforms/Scalar/InstSimplifyPass.h"
 #include "llvm/Transforms/Scalar/JumpThreading.h"
 #include "llvm/Transforms/Scalar/LICM.h"
@@ -274,6 +275,11 @@ cl::opt<bool> EnableMemProfContextDisambiguation(
     "enable-memprof-context-disambiguation", cl::init(false), cl::Hidden,
     cl::ZeroOrMore, cl::desc("Enable MemProf context disambiguation"));
 
+cl::opt<bool> EnableInferAlignmentPass(
+    "enable-infer-alignment-pass", cl::init(false), cl::Hidden, cl::ZeroOrMore,
+    cl::desc("Enable the InferAlignment pass, disabling alignment inference in "
+             "InstCombine"));
+
 PipelineTuningOptions::PipelineTuningOptions() {
   LoopInterleaving = true;
   LoopVectorization = true;
@@ -1140,6 +1146,8 @@ void PassBuilder::addVectorPasses(OptimizationLevel Level,
   FPM.addPass(LoopVectorizePass(
       LoopVectorizeOptions(!PTO.LoopInterleaving, !PTO.LoopVectorization)));
 
+  if (EnableInferAlignmentPass)
+    FPM.addPass(InferAlignmentPass());
   if (IsFullLTO) {
     // The vectorizer may have significantly shortened a loop body; unroll
     // again. Unroll small loops to hide loop backedge latency and saturate any
@@ -1257,6 +1265,8 @@ void PassBuilder::addVectorPasses(OptimizationLevel Level,
     FPM.addPass(SROAPass(SROAOptions::PreserveCFG));
   }
 
+  if (EnableInferAlignmentPass)
+    FPM.addPass(InferAlignmentPass());
   FPM.addPass(InstCombinePass());
 
   // This is needed for two reasons:

diff  --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def
index b9aa015d02dd95d..df9f14920f29161 100644
--- a/llvm/lib/Passes/PassRegistry.def
+++ b/llvm/lib/Passes/PassRegistry.def
@@ -337,6 +337,7 @@ FUNCTION_PASS("gvn-hoist", GVNHoistPass())
 FUNCTION_PASS("gvn-sink", GVNSinkPass())
 FUNCTION_PASS("helloworld", HelloWorldPass())
 FUNCTION_PASS("infer-address-spaces", InferAddressSpacesPass())
+FUNCTION_PASS("infer-alignment", InferAlignmentPass())
 FUNCTION_PASS("instcount", InstCountPass())
 FUNCTION_PASS("instsimplify", InstSimplifyPass())
 FUNCTION_PASS("invalidate<all>", InvalidateAllAnalysesPass())

diff  --git a/llvm/lib/Transforms/Scalar/CMakeLists.txt b/llvm/lib/Transforms/Scalar/CMakeLists.txt
index eb008c15903a744..2dd27037a17de7f 100644
--- a/llvm/lib/Transforms/Scalar/CMakeLists.txt
+++ b/llvm/lib/Transforms/Scalar/CMakeLists.txt
@@ -22,6 +22,7 @@ add_llvm_component_library(LLVMScalarOpts
   InductiveRangeCheckElimination.cpp
   IndVarSimplify.cpp
   InferAddressSpaces.cpp
+  InferAlignment.cpp
   InstSimplifyPass.cpp
   JumpThreading.cpp
   LICM.cpp

diff  --git a/llvm/lib/Transforms/Scalar/InferAlignment.cpp b/llvm/lib/Transforms/Scalar/InferAlignment.cpp
new file mode 100644
index 000000000000000..b75b8d486fbbe8b
--- /dev/null
+++ b/llvm/lib/Transforms/Scalar/InferAlignment.cpp
@@ -0,0 +1,91 @@
+//===- InferAlignment.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Infer alignment for load, stores and other memory operations based on
+// trailing zero known bits information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Scalar/InferAlignment.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/Local.h"
+
+using namespace llvm;
+
+static bool tryToImproveAlign(
+    const DataLayout &DL, Instruction *I,
+    function_ref<Align(Value *PtrOp, Align OldAlign, Align PrefAlign)> Fn) {
+  if (auto *LI = dyn_cast<LoadInst>(I)) {
+    Value *PtrOp = LI->getPointerOperand();
+    Align OldAlign = LI->getAlign();
+    Align NewAlign = Fn(PtrOp, OldAlign, DL.getPrefTypeAlign(LI->getType()));
+    if (NewAlign > OldAlign) {
+      LI->setAlignment(NewAlign);
+      return true;
+    }
+  } else if (auto *SI = dyn_cast<StoreInst>(I)) {
+    Value *PtrOp = SI->getPointerOperand();
+    Value *ValOp = SI->getValueOperand();
+    Align OldAlign = SI->getAlign();
+    Align NewAlign = Fn(PtrOp, OldAlign, DL.getPrefTypeAlign(ValOp->getType()));
+    if (NewAlign > OldAlign) {
+      SI->setAlignment(NewAlign);
+      return true;
+    }
+  }
+  // TODO: Also handle memory intrinsics.
+  return false;
+}
+
+bool inferAlignment(Function &F, AssumptionCache &AC, DominatorTree &DT) {
+  const DataLayout &DL = F.getParent()->getDataLayout();
+  bool Changed = false;
+
+  // Enforce preferred type alignment if possible. We do this as a separate
+  // pass first, because it may improve the alignments we infer below.
+  for (BasicBlock &BB : F) {
+    for (Instruction &I : BB) {
+      Changed |= tryToImproveAlign(
+          DL, &I, [&](Value *PtrOp, Align OldAlign, Align PrefAlign) {
+            if (PrefAlign > OldAlign)
+              return std::max(OldAlign,
+                              tryEnforceAlignment(PtrOp, PrefAlign, DL));
+            return OldAlign;
+          });
+    }
+  }
+
+  // Compute alignment from known bits.
+  for (BasicBlock &BB : F) {
+    for (Instruction &I : BB) {
+      Changed |= tryToImproveAlign(
+          DL, &I, [&](Value *PtrOp, Align OldAlign, Align PrefAlign) {
+            KnownBits Known = computeKnownBits(PtrOp, DL, 0, &AC, &I, &DT);
+            unsigned TrailZ = std::min(Known.countMinTrailingZeros(),
+                                       +Value::MaxAlignmentExponent);
+            return Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
+          });
+    }
+  }
+
+  return Changed;
+}
+
+PreservedAnalyses InferAlignmentPass::run(Function &F,
+                                          FunctionAnalysisManager &AM) {
+  AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F);
+  DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
+  inferAlignment(F, AC, DT);
+  // Changes to alignment shouldn't invalidated analyses.
+  return PreservedAnalyses::all();
+}

diff  --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index e11c4aac66ad9ef..ddb47e693a643d8 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1388,15 +1388,8 @@ bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
   return Changed;
 }
 
-/// If the specified pointer points to an object that we control, try to modify
-/// the object's alignment to PrefAlign. Returns a minimum known alignment of
-/// the value after the operation, which may be lower than PrefAlign.
-///
-/// Increating value alignment isn't often possible though. If alignment is
-/// important, a more reliable approach is to simply align all global variables
-/// and allocation instructions to their preferred alignment from the beginning.
-static Align tryEnforceAlignment(Value *V, Align PrefAlign,
-                                 const DataLayout &DL) {
+Align llvm::tryEnforceAlignment(Value *V, Align PrefAlign,
+                                const DataLayout &DL) {
   V = V->stripPointerCasts();
 
   if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {

diff  --git a/llvm/test/Transforms/InferAlignment/alloca.ll b/llvm/test/Transforms/InferAlignment/alloca.ll
index b64413336d347b0..986f291889a2ade 100644
--- a/llvm/test/Transforms/InferAlignment/alloca.ll
+++ b/llvm/test/Transforms/InferAlignment/alloca.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt < %s -passes=no-op-function -S | FileCheck %s
+; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
 
 ; ------------------------------------------------------------------------------
 ; Scalar type
@@ -8,11 +8,11 @@
 define void @alloca_local(i8 %x, i32 %y) {
 ; CHECK-LABEL: define void @alloca_local
 ; CHECK-SAME: (i8 [[X:%.*]], i32 [[Y:%.*]]) {
-; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca i32, align 1
-; CHECK-NEXT:    [[LOAD_I8:%.*]] = load i8, ptr [[ALLOCA]], align 1
-; CHECK-NEXT:    [[LOAD_I32:%.*]] = load i32, ptr [[ALLOCA]], align 1
-; CHECK-NEXT:    store i8 [[X]], ptr [[ALLOCA]], align 1
-; CHECK-NEXT:    store i32 [[Y]], ptr [[ALLOCA]], align 1
+; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[LOAD_I8:%.*]] = load i8, ptr [[ALLOCA]], align 4
+; CHECK-NEXT:    [[LOAD_I32:%.*]] = load i32, ptr [[ALLOCA]], align 4
+; CHECK-NEXT:    store i8 [[X]], ptr [[ALLOCA]], align 4
+; CHECK-NEXT:    store i32 [[Y]], ptr [[ALLOCA]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %alloca = alloca i32, align 1
@@ -38,10 +38,10 @@ define void @alloca_struct(i32 %x) {
 ; CHECK-NEXT:    [[ALLOCA_STRUCT:%.*]] = alloca [[STRUCT_PAIR:%.*]], align 8
 ; CHECK-NEXT:    [[GEP_0:%.*]] = getelementptr [[STRUCT_PAIR]], ptr [[ALLOCA_STRUCT]], i64 0, i32 1
 ; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr { i32, i32 }, ptr [[GEP_0]], i64 0, i32 1
-; CHECK-NEXT:    [[LOAD_2:%.*]] = load i32, ptr [[GEP_0]], align 1
-; CHECK-NEXT:    store i32 0, ptr [[GEP_0]], align 1
-; CHECK-NEXT:    [[LOAD_1:%.*]] = load i32, ptr [[GEP_1]], align 1
-; CHECK-NEXT:    store i32 0, ptr [[GEP_1]], align 1
+; CHECK-NEXT:    [[LOAD_2:%.*]] = load i32, ptr [[GEP_0]], align 8
+; CHECK-NEXT:    store i32 0, ptr [[GEP_0]], align 8
+; CHECK-NEXT:    [[LOAD_1:%.*]] = load i32, ptr [[GEP_1]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[GEP_1]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %alloca.struct = alloca %struct.pair

diff  --git a/llvm/test/Transforms/InferAlignment/atomic.ll b/llvm/test/Transforms/InferAlignment/atomic.ll
index 23efc4381fd32fd..949e60a61edfad5 100644
--- a/llvm/test/Transforms/InferAlignment/atomic.ll
+++ b/llvm/test/Transforms/InferAlignment/atomic.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -S < %s -passes=no-op-function | FileCheck %s
+; RUN: opt -S < %s -passes=infer-alignment | FileCheck %s
 
 ; ------------------------------------------------------------------------------
 ; load/store of null
@@ -7,9 +7,9 @@
 
 define void @load_null() {
 ; CHECK-LABEL: define void @load_null() {
-; CHECK-NEXT:    [[X_0:%.*]] = load atomic i32, ptr null unordered, align 4
-; CHECK-NEXT:    [[X_1:%.*]] = load atomic i32, ptr null monotonic, align 4
-; CHECK-NEXT:    [[X_2:%.*]] = load atomic i32, ptr null seq_cst, align 4
+; CHECK-NEXT:    [[X_0:%.*]] = load atomic i32, ptr null unordered, align 4294967296
+; CHECK-NEXT:    [[X_1:%.*]] = load atomic i32, ptr null monotonic, align 4294967296
+; CHECK-NEXT:    [[X_2:%.*]] = load atomic i32, ptr null seq_cst, align 4294967296
 ; CHECK-NEXT:    ret void
 ;
   %x.0 = load atomic i32, ptr null unordered, align 4
@@ -20,9 +20,9 @@ define void @load_null() {
 
 define void @store_null() {
 ; CHECK-LABEL: define void @store_null() {
-; CHECK-NEXT:    store atomic i32 0, ptr null unordered, align 4
-; CHECK-NEXT:    store atomic i32 0, ptr null monotonic, align 4
-; CHECK-NEXT:    store atomic i32 0, ptr null seq_cst, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr null unordered, align 4294967296
+; CHECK-NEXT:    store atomic i32 0, ptr null monotonic, align 4294967296
+; CHECK-NEXT:    store atomic i32 0, ptr null seq_cst, align 4294967296
 ; CHECK-NEXT:    ret void
 ;
   store atomic i32 0, ptr null unordered, align 4
@@ -38,9 +38,9 @@ define void @store_null() {
 
 define void @load_nonnull() {
 ; CHECK-LABEL: define void @load_nonnull() {
-; CHECK-NEXT:    [[X_0:%.*]] = load atomic i32, ptr @c unordered, align 4
-; CHECK-NEXT:    [[X_1:%.*]] = load atomic i32, ptr @c monotonic, align 4
-; CHECK-NEXT:    [[X_2:%.*]] = load atomic i32, ptr @c seq_cst, align 4
+; CHECK-NEXT:    [[X_0:%.*]] = load atomic i32, ptr @c unordered, align 8
+; CHECK-NEXT:    [[X_1:%.*]] = load atomic i32, ptr @c monotonic, align 8
+; CHECK-NEXT:    [[X_2:%.*]] = load atomic i32, ptr @c seq_cst, align 8
 ; CHECK-NEXT:    ret void
 ;
   %x.0 = load atomic i32, ptr @c unordered, align 4
@@ -51,9 +51,9 @@ define void @load_nonnull() {
 
 define void @store_nonnull() {
 ; CHECK-LABEL: define void @store_nonnull() {
-; CHECK-NEXT:    store atomic i32 0, ptr @c unordered, align 4
-; CHECK-NEXT:    store atomic i32 0, ptr @c monotonic, align 4
-; CHECK-NEXT:    store atomic i32 0, ptr @c seq_cst, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr @c unordered, align 8
+; CHECK-NEXT:    store atomic i32 0, ptr @c monotonic, align 8
+; CHECK-NEXT:    store atomic i32 0, ptr @c seq_cst, align 8
 ; CHECK-NEXT:    ret void
 ;
   store atomic i32 0, ptr @c unordered, align 4
@@ -69,9 +69,9 @@ define void @store_nonnull() {
 define void @load_alloca() {
 ; CHECK-LABEL: define void @load_alloca() {
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[X_0:%.*]] = load atomic i32, ptr [[ALLOCA]] unordered, align 1
-; CHECK-NEXT:    [[X_1:%.*]] = load atomic i32, ptr [[ALLOCA]] monotonic, align 1
-; CHECK-NEXT:    [[X_2:%.*]] = load atomic i32, ptr [[ALLOCA]] seq_cst, align 1
+; CHECK-NEXT:    [[X_0:%.*]] = load atomic i32, ptr [[ALLOCA]] unordered, align 4
+; CHECK-NEXT:    [[X_1:%.*]] = load atomic i32, ptr [[ALLOCA]] monotonic, align 4
+; CHECK-NEXT:    [[X_2:%.*]] = load atomic i32, ptr [[ALLOCA]] seq_cst, align 4
 ; CHECK-NEXT:    ret void
 ;
   %alloca = alloca i32
@@ -84,9 +84,9 @@ define void @load_alloca() {
 define void @store_alloca() {
 ; CHECK-LABEL: define void @store_alloca() {
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store atomic i32 0, ptr [[ALLOCA]] unordered, align 1
-; CHECK-NEXT:    store atomic i32 0, ptr [[ALLOCA]] monotonic, align 1
-; CHECK-NEXT:    store atomic i32 0, ptr [[ALLOCA]] seq_cst, align 1
+; CHECK-NEXT:    store atomic i32 0, ptr [[ALLOCA]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr [[ALLOCA]] monotonic, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr [[ALLOCA]] seq_cst, align 4
 ; CHECK-NEXT:    ret void
 ;
   %alloca = alloca i32

diff  --git a/llvm/test/Transforms/InferAlignment/attributes.ll b/llvm/test/Transforms/InferAlignment/attributes.ll
index 6dce9a11d661fcd..c74dec9ac90d97c 100644
--- a/llvm/test/Transforms/InferAlignment/attributes.ll
+++ b/llvm/test/Transforms/InferAlignment/attributes.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt < %s -passes=no-op-function -S | FileCheck %s
+; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
 
 define void @attribute(ptr align 32 %a) {
 ; CHECK-LABEL: define void @attribute
 ; CHECK-SAME: (ptr align 32 [[A:%.*]]) {
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[A]], align 1
-; CHECK-NEXT:    store i32 123, ptr [[A]], align 1
+; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[A]], align 32
+; CHECK-NEXT:    store i32 123, ptr [[A]], align 32
 ; CHECK-NEXT:    ret void
 ;
   %load = load i32, ptr %a, align 1
@@ -17,8 +17,8 @@ define void @attribute_through_call(ptr align 32 %a) {
 ; CHECK-LABEL: define void @attribute_through_call
 ; CHECK-SAME: (ptr align 32 [[A:%.*]]) {
 ; CHECK-NEXT:    [[RES:%.*]] = call ptr @call(ptr [[A]])
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[RES]], align 1
-; CHECK-NEXT:    store i32 123, ptr [[RES]], align 1
+; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[RES]], align 32
+; CHECK-NEXT:    store i32 123, ptr [[RES]], align 32
 ; CHECK-NEXT:    ret void
 ;
   %res = call ptr @call(ptr %a)
@@ -31,8 +31,8 @@ define void @attribute_return_value(ptr %a) {
 ; CHECK-LABEL: define void @attribute_return_value
 ; CHECK-SAME: (ptr [[A:%.*]]) {
 ; CHECK-NEXT:    [[RES:%.*]] = call align 32 ptr @call(ptr [[A]])
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[RES]], align 1
-; CHECK-NEXT:    store i32 123, ptr [[RES]], align 1
+; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[RES]], align 32
+; CHECK-NEXT:    store i32 123, ptr [[RES]], align 32
 ; CHECK-NEXT:    ret void
 ;
   %res = call align 32 ptr @call(ptr %a)

diff  --git a/llvm/test/Transforms/InferAlignment/gep-2d.ll b/llvm/test/Transforms/InferAlignment/gep-2d.ll
index b88a9be988ccf32..4ce9e11f401c8de 100644
--- a/llvm/test/Transforms/InferAlignment/gep-2d.ll
+++ b/llvm/test/Transforms/InferAlignment/gep-2d.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt < %s -passes=no-op-function -S | FileCheck %s
+; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
 
 ; A multi-dimensional array in a nested loop.inner doing vector stores that
 ; aren't yet aligned. InferAlignment can understand the addressing in the
@@ -21,8 +21,8 @@ define void @nested_loop() {
 ; CHECK:       loop.inner:
 ; CHECK-NEXT:    [[J:%.*]] = phi i64 [ 0, [[LOOP_OUTER]] ], [ [[J_NEXT:%.*]], [[LOOP_INNER_TAIL:%.*]] ]
 ; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr [1001 x [20000 x double]], ptr @Nice, i64 0, i64 [[I]], i64 [[J]]
-; CHECK-NEXT:    store <2 x double> zeroinitializer, ptr [[GEP_1]], align 8
-; CHECK-NEXT:    [[LOAD_1:%.*]] = load <2 x double>, ptr [[GEP_1]], align 8
+; CHECK-NEXT:    store <2 x double> zeroinitializer, ptr [[GEP_1]], align 16
+; CHECK-NEXT:    [[LOAD_1:%.*]] = load <2 x double>, ptr [[GEP_1]], align 16
 ; CHECK-NEXT:    [[GEP_2:%.*]] = getelementptr [1001 x [20001 x double]], ptr @Awkward, i64 0, i64 [[I]], i64 [[J]]
 ; CHECK-NEXT:    store <2 x double> zeroinitializer, ptr [[GEP_2]], align 8
 ; CHECK-NEXT:    [[LOAD_2:%.*]] = load <2 x double>, ptr [[GEP_2]], align 8

diff  --git a/llvm/test/Transforms/InferAlignment/gep-array.ll b/llvm/test/Transforms/InferAlignment/gep-array.ll
index 76ba55eee649ea0..6f6051144b710f8 100644
--- a/llvm/test/Transforms/InferAlignment/gep-array.ll
+++ b/llvm/test/Transforms/InferAlignment/gep-array.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -passes=no-op-function -S < %s | FileCheck %s
+; RUN: opt -passes=infer-alignment -S < %s | FileCheck %s
 
 ; ------------------------------------------------------------------------------
 ; Array of pair
@@ -18,8 +18,8 @@ define void @simple_pair(i64 %idx) {
 ; CHECK-LABEL: define void @simple_pair
 ; CHECK-SAME: (i64 [[IDX:%.*]]) {
 ; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [4 x %pair.simple], ptr @array.simple, i64 0, i64 [[IDX]], i32 1
-; CHECK-NEXT:    [[RES:%.*]] = load i32, ptr [[GEP]], align 1
-; CHECK-NEXT:    store i32 0, ptr [[GEP]], align 1
+; CHECK-NEXT:    [[RES:%.*]] = load i32, ptr [[GEP]], align 8
+; CHECK-NEXT:    store i32 0, ptr [[GEP]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %gep = getelementptr inbounds [4 x %pair.simple], ptr @array.simple, i64 0, i64 %idx, i32 1
@@ -39,11 +39,11 @@ define void @simple_pair(i64 %idx) {
 
 define void @load_nested() {
 ; CHECK-LABEL: define void @load_nested() {
-; CHECK-NEXT:    [[X_0:%.*]] = load i32, ptr @array.array, align 4
+; CHECK-NEXT:    [[X_0:%.*]] = load i32, ptr @array.array, align 16
 ; CHECK-NEXT:    [[X_1:%.*]] = load i32, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 1), align 4
-; CHECK-NEXT:    [[X_2:%.*]] = load i32, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 4
+; CHECK-NEXT:    [[X_2:%.*]] = load i32, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 8
 ; CHECK-NEXT:    [[X_3:%.*]] = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 3), align 4
-; CHECK-NEXT:    [[X_4:%.*]] = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 4
+; CHECK-NEXT:    [[X_4:%.*]] = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 16
 ; CHECK-NEXT:    ret void
 ;
   %x.0 =  load i32, ptr @array.array, align 4
@@ -56,11 +56,11 @@ define void @load_nested() {
 
 define void @store_nested() {
 ; CHECK-LABEL: define void @store_nested() {
-; CHECK-NEXT:    store i32 1, ptr @array.array, align 4
+; CHECK-NEXT:    store i32 1, ptr @array.array, align 16
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 1), align 4
-; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 4
+; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 8
 ; CHECK-NEXT:    store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 3), align 4
-; CHECK-NEXT:    store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 4
+; CHECK-NEXT:    store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 16
 ; CHECK-NEXT:    ret void
 ;
   store i32 1, ptr @array.array, align 4

diff  --git a/llvm/test/Transforms/InferAlignment/irregular-size.ll b/llvm/test/Transforms/InferAlignment/irregular-size.ll
index caec3f55b61211e..9413c8ac5be46bc 100644
--- a/llvm/test/Transforms/InferAlignment/irregular-size.ll
+++ b/llvm/test/Transforms/InferAlignment/irregular-size.ll
@@ -1,12 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt < %s -passes=no-op-function -S | FileCheck %s
+; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
 
 define void @non_pow2_size(i177 %X) {
 ; CHECK-LABEL: define void @non_pow2_size
 ; CHECK-SAME: (i177 [[X:%.*]]) {
-; CHECK-NEXT:    [[A:%.*]] = alloca i177, align 1
-; CHECK-NEXT:    [[L1:%.*]] = load i177, ptr [[A]], align 1
-; CHECK-NEXT:    store i177 [[X]], ptr [[A]], align 1
+; CHECK-NEXT:    [[A:%.*]] = alloca i177, align 8
+; CHECK-NEXT:    [[L1:%.*]] = load i177, ptr [[A]], align 8
+; CHECK-NEXT:    store i177 [[X]], ptr [[A]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %A = alloca i177, align 1
@@ -27,13 +27,13 @@ define void @load_vector_i4(i4 %X) {
 ; CHECK-NEXT:    [[PTR_2:%.*]] = getelementptr [16 x <2 x i4>], ptr @vector_i4, i64 0, i64 4
 ; CHECK-NEXT:    [[PTR_3:%.*]] = getelementptr [16 x <2 x i4>], ptr @vector_i4, i64 0, i64 8
 ; CHECK-NEXT:    [[RES_0:%.*]] = load i4, ptr [[PTR_0]], align 1
-; CHECK-NEXT:    [[RES_1:%.*]] = load i4, ptr [[PTR_1]], align 1
-; CHECK-NEXT:    [[RES_2:%.*]] = load i4, ptr [[PTR_2]], align 1
-; CHECK-NEXT:    [[RES_3:%.*]] = load i4, ptr [[PTR_3]], align 1
+; CHECK-NEXT:    [[RES_1:%.*]] = load i4, ptr [[PTR_1]], align 2
+; CHECK-NEXT:    [[RES_2:%.*]] = load i4, ptr [[PTR_2]], align 4
+; CHECK-NEXT:    [[RES_3:%.*]] = load i4, ptr [[PTR_3]], align 8
 ; CHECK-NEXT:    store i4 [[X]], ptr [[PTR_0]], align 1
-; CHECK-NEXT:    store i4 [[X]], ptr [[PTR_1]], align 1
-; CHECK-NEXT:    store i4 [[X]], ptr [[PTR_2]], align 1
-; CHECK-NEXT:    store i4 [[X]], ptr [[PTR_3]], align 1
+; CHECK-NEXT:    store i4 [[X]], ptr [[PTR_1]], align 2
+; CHECK-NEXT:    store i4 [[X]], ptr [[PTR_2]], align 4
+; CHECK-NEXT:    store i4 [[X]], ptr [[PTR_3]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %ptr.0 = getelementptr [16 x <2 x i4>], ptr @vector_i4, i64 0, i64 1

diff  --git a/llvm/test/Transforms/InferAlignment/propagate-assume.ll b/llvm/test/Transforms/InferAlignment/propagate-assume.ll
index a5c7afa0393baec..8cf0cb35035edd3 100644
--- a/llvm/test/Transforms/InferAlignment/propagate-assume.ll
+++ b/llvm/test/Transforms/InferAlignment/propagate-assume.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt < %s -passes=no-op-function -S | FileCheck %s
+; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
 
 ; ------------------------------------------------------------------------------
 ; Simple test
@@ -12,8 +12,8 @@ define void @simple_forwardpropagate(ptr %a) {
 ; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
 ; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
 ; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
-; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4
-; CHECK-NEXT:    store i32 345, ptr [[A]], align 4
+; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
+; CHECK-NEXT:    store i32 345, ptr [[A]], align 32
 ; CHECK-NEXT:    ret void
 ;
   %ptrint = ptrtoint ptr %a to i64
@@ -30,8 +30,8 @@ define void @simple_forwardpropagate(ptr %a) {
 define void @simple_backpropagate(ptr %a) {
 ; CHECK-LABEL: define void @simple_backpropagate
 ; CHECK-SAME: (ptr [[A:%.*]]) {
-; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4
-; CHECK-NEXT:    store i32 345, ptr [[A]], align 4
+; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
+; CHECK-NEXT:    store i32 345, ptr [[A]], align 32
 ; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
 ; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
 ; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
@@ -53,8 +53,8 @@ define void @simple_forwardpropagate_bundle(ptr %a) {
 ; CHECK-LABEL: define void @simple_forwardpropagate_bundle
 ; CHECK-SAME: (ptr [[A:%.*]]) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i32 32) ]
-; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4
-; CHECK-NEXT:    store i32 345, ptr [[A]], align 4
+; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
+; CHECK-NEXT:    store i32 345, ptr [[A]], align 32
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.assume(i1 true) ["align"(ptr %a, i32 32)]
@@ -66,8 +66,8 @@ define void @simple_forwardpropagate_bundle(ptr %a) {
 define void @simple_backpropagate_bundle(ptr %a) {
 ; CHECK-LABEL: define void @simple_backpropagate_bundle
 ; CHECK-SAME: (ptr [[A:%.*]]) {
-; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4
-; CHECK-NEXT:    store i32 345, ptr [[A]], align 4
+; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
+; CHECK-NEXT:    store i32 345, ptr [[A]], align 32
 ; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i32 32) ]
 ; CHECK-NEXT:    ret void
 ;
@@ -97,10 +97,10 @@ define void @loop_forwardpropagate(ptr %a, ptr %b) {
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
-; CHECK-NEXT:    [[LOAD_B:%.*]] = load i32, ptr [[GEP_B]], align 4
+; CHECK-NEXT:    [[LOAD_B:%.*]] = load i32, ptr [[GEP_B]], align 64
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[LOAD_B]], 1
 ; CHECK-NEXT:    [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
-; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP_A]], align 4
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP_A]], align 64
 ; CHECK-NEXT:    [[I_NEXT]] = add nuw nsw i64 [[I]], 16
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[I_NEXT]], 1648
 ; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
@@ -149,10 +149,10 @@ define void @loop_forwardpropagate_bundle(ptr %a, ptr %b) {
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
-; CHECK-NEXT:    [[LOAD_B:%.*]] = load i32, ptr [[GEP_B]], align 4
+; CHECK-NEXT:    [[LOAD_B:%.*]] = load i32, ptr [[GEP_B]], align 64
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[LOAD_B]], 1
 ; CHECK-NEXT:    [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
-; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP_A]], align 4
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[GEP_A]], align 64
 ; CHECK-NEXT:    [[I_NEXT]] = add nuw nsw i64 [[I]], 16
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[I_NEXT]], 1648
 ; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
@@ -190,11 +190,11 @@ define void @complex_backpropagate(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: define void @complex_backpropagate
 ; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
 ; CHECK-NEXT:    [[LOAD_B:%.*]] = load i32, ptr [[B]], align 4
-; CHECK-NEXT:    store i32 [[LOAD_B]], ptr [[A]], align 4
+; CHECK-NEXT:    store i32 [[LOAD_B]], ptr [[A]], align 32
 ; CHECK-NEXT:    [[OBJ_SIZE:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[C]], i1 false, i1 false, i1 false)
-; CHECK-NEXT:    store i64 [[OBJ_SIZE]], ptr [[ALLOCA]], align 4
+; CHECK-NEXT:    store i64 [[OBJ_SIZE]], ptr [[ALLOCA]], align 8
 ; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
 ; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
 ; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
@@ -222,11 +222,11 @@ define void @complex_backpropagate_bundle(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: define void @complex_backpropagate_bundle
 ; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT:    [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
 ; CHECK-NEXT:    [[LOAD_B:%.*]] = load i32, ptr [[B]], align 4
-; CHECK-NEXT:    store i32 [[LOAD_B]], ptr [[A]], align 4
+; CHECK-NEXT:    store i32 [[LOAD_B]], ptr [[A]], align 32
 ; CHECK-NEXT:    [[OBJ_SIZE:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[C]], i1 false, i1 false, i1 false)
-; CHECK-NEXT:    store i64 [[OBJ_SIZE]], ptr [[ALLOCA]], align 4
+; CHECK-NEXT:    store i64 [[OBJ_SIZE]], ptr [[ALLOCA]], align 8
 ; CHECK-NEXT:    tail call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i32 32) ]
 ; CHECK-NEXT:    ret void
 ;

diff  --git a/llvm/test/Transforms/InferAlignment/ptrmask.ll b/llvm/test/Transforms/InferAlignment/ptrmask.ll
index 1db2d093216484e..52a8bcecba13dee 100644
--- a/llvm/test/Transforms/InferAlignment/ptrmask.ll
+++ b/llvm/test/Transforms/InferAlignment/ptrmask.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt < %s -passes=no-op-function -S | FileCheck %s
+; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
 
 ; ------------------------------------------------------------------------------
 ; load instructions
@@ -11,9 +11,9 @@ define void @load(ptr align 1 %ptr) {
 ; CHECK-NEXT:    [[ALIGNED_0:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -2)
 ; CHECK-NEXT:    [[ALIGNED_1:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -4)
 ; CHECK-NEXT:    [[ALIGNED_2:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -8)
-; CHECK-NEXT:    [[LOAD_0:%.*]] = load <16 x i8>, ptr [[ALIGNED_0]], align 1
-; CHECK-NEXT:    [[LOAD_1:%.*]] = load <16 x i8>, ptr [[ALIGNED_1]], align 1
-; CHECK-NEXT:    [[LOAD_2:%.*]] = load <16 x i8>, ptr [[ALIGNED_2]], align 1
+; CHECK-NEXT:    [[LOAD_0:%.*]] = load <16 x i8>, ptr [[ALIGNED_0]], align 2
+; CHECK-NEXT:    [[LOAD_1:%.*]] = load <16 x i8>, ptr [[ALIGNED_1]], align 4
+; CHECK-NEXT:    [[LOAD_2:%.*]] = load <16 x i8>, ptr [[ALIGNED_2]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %aligned.0 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -2)
@@ -37,9 +37,9 @@ define void @store(ptr align 1 %ptr) {
 ; CHECK-NEXT:    [[ALIGNED_0:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -2)
 ; CHECK-NEXT:    [[ALIGNED_1:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -4)
 ; CHECK-NEXT:    [[ALIGNED_2:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -8)
-; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[ALIGNED_0]], align 1
-; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[ALIGNED_1]], align 1
-; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[ALIGNED_2]], align 1
+; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[ALIGNED_0]], align 2
+; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[ALIGNED_1]], align 4
+; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[ALIGNED_2]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %aligned.0 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -2)
@@ -62,8 +62,8 @@ define void @ptrmask_overaligned(ptr align 16 %ptr) {
 ; CHECK-LABEL: define void @ptrmask_overaligned
 ; CHECK-SAME: (ptr align 16 [[PTR:%.*]]) {
 ; CHECK-NEXT:    [[ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -8)
-; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 1
-; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[ALIGNED]], align 1
+; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 16
+; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[ALIGNED]], align 16
 ; CHECK-NEXT:    ret void
 ;
   %aligned = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -8)

diff  --git a/llvm/test/Transforms/InferAlignment/undef-and-null.ll b/llvm/test/Transforms/InferAlignment/undef-and-null.ll
index 76b751a4d411f94..86f6d62eca9e710 100644
--- a/llvm/test/Transforms/InferAlignment/undef-and-null.ll
+++ b/llvm/test/Transforms/InferAlignment/undef-and-null.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -passes=no-op-function -S < %s | FileCheck %s
+; RUN: opt -passes=infer-alignment -S < %s | FileCheck %s
 
 define void @load_undef_null(ptr %P) {
 ; CHECK-LABEL: define void @load_undef_null
 ; CHECK-SAME: (ptr [[P:%.*]]) {
 ; CHECK-NEXT:    [[RET_0:%.*]] = load i32, ptr undef, align 4
-; CHECK-NEXT:    [[RET_1:%.*]] = load i32, ptr null, align 4
+; CHECK-NEXT:    [[RET_1:%.*]] = load i32, ptr null, align 4294967296
 ; CHECK-NEXT:    ret void
 ;
   %ret.0 = load i32, ptr undef
@@ -17,7 +17,7 @@ define void @store_undef_null(ptr %P) {
 ; CHECK-LABEL: define void @store_undef_null
 ; CHECK-SAME: (ptr [[P:%.*]]) {
 ; CHECK-NEXT:    store i32 123, ptr undef, align 4
-; CHECK-NEXT:    store i32 124, ptr null, align 4
+; CHECK-NEXT:    store i32 124, ptr null, align 4294967296
 ; CHECK-NEXT:    ret void
 ;
   store i32 123, ptr undef

diff  --git a/llvm/test/Transforms/InferAlignment/vector.ll b/llvm/test/Transforms/InferAlignment/vector.ll
index 1599b583f32445a..e3dcfe346d7e71a 100644
--- a/llvm/test/Transforms/InferAlignment/vector.ll
+++ b/llvm/test/Transforms/InferAlignment/vector.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt < %s -passes=no-op-function -S | FileCheck %s
+; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
 
 ; InferAlignment should be able to prove vector alignment in the
 ; presence of a few mild address computation tricks.
@@ -12,8 +12,8 @@ define void @alloca(<2 x i64> %y) {
 ; CHECK-LABEL: define void @alloca
 ; CHECK-SAME: (<2 x i64> [[Y:%.*]]) {
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca <2 x i64>, align 16
-; CHECK-NEXT:    [[LOAD:%.*]] = load <2 x i64>, ptr [[ALLOCA]], align 1
-; CHECK-NEXT:    store <2 x i64> [[Y]], ptr [[ALLOCA]], align 1
+; CHECK-NEXT:    [[LOAD:%.*]] = load <2 x i64>, ptr [[ALLOCA]], align 16
+; CHECK-NEXT:    store <2 x i64> [[Y]], ptr [[ALLOCA]], align 16
 ; CHECK-NEXT:    ret void
 ;
   %alloca = alloca <2 x i64>
@@ -31,8 +31,8 @@ define void @alloca(<2 x i64> %y) {
 define void @global(<2 x i64> %y) {
 ; CHECK-LABEL: define void @global
 ; CHECK-SAME: (<2 x i64> [[Y:%.*]]) {
-; CHECK-NEXT:    [[LOAD:%.*]] = load <2 x i64>, ptr @x.vector, align 1
-; CHECK-NEXT:    store <2 x i64> [[Y]], ptr @x.vector, align 1
+; CHECK-NEXT:    [[LOAD:%.*]] = load <2 x i64>, ptr @x.vector, align 16
+; CHECK-NEXT:    store <2 x i64> [[Y]], ptr @x.vector, align 16
 ; CHECK-NEXT:    ret void
 ;
   %load = load <2 x i64>, ptr @x.vector, align 1
@@ -55,8 +55,8 @@ define void @vector_singular(i32 %i, <2 x i64> %y) {
 ; CHECK-LABEL: define void @vector_singular
 ; CHECK-SAME: (i32 [[I:%.*]], <2 x i64> [[Y:%.*]]) {
 ; CHECK-NEXT:    [[GEP:%.*]] = getelementptr <2 x i64>, ptr @vector, i32 [[I]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load <2 x i64>, ptr [[GEP]], align 1
-; CHECK-NEXT:    store <2 x i64> [[Y]], ptr [[GEP]], align 1
+; CHECK-NEXT:    [[LOAD:%.*]] = load <2 x i64>, ptr [[GEP]], align 16
+; CHECK-NEXT:    store <2 x i64> [[Y]], ptr [[GEP]], align 16
 ; CHECK-NEXT:    ret void
 ;
   %gep = getelementptr <2 x i64>, ptr @vector, i32 %i
@@ -73,8 +73,8 @@ define void @vector_array(i32 %i, i32 %j, <2 x i64> %y) {
 ; CHECK-LABEL: define void @vector_array
 ; CHECK-SAME: (i32 [[I:%.*]], i32 [[J:%.*]], <2 x i64> [[Y:%.*]]) {
 ; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [13 x <2 x i64>], ptr @vector.arr, i32 [[I]], i32 [[J]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load <2 x i64>, ptr [[GEP]], align 1
-; CHECK-NEXT:    store <2 x i64> [[Y]], ptr [[GEP]], align 1
+; CHECK-NEXT:    [[LOAD:%.*]] = load <2 x i64>, ptr [[GEP]], align 16
+; CHECK-NEXT:    store <2 x i64> [[Y]], ptr [[GEP]], align 16
 ; CHECK-NEXT:    ret void
 ;
   %gep = getelementptr [13 x <2 x i64>], ptr @vector.arr, i32 %i, i32 %j
@@ -93,11 +93,11 @@ define void @vector_array(i32 %i, i32 %j, <2 x i64> %y) {
 
 define void @nonvector_array() {
 ; CHECK-LABEL: define void @nonvector_array() {
-; CHECK-NEXT:    [[LOAD_0:%.*]] = load <16 x i8>, ptr @x.array, align 1
-; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr @x.array, align 1
+; CHECK-NEXT:    [[LOAD_0:%.*]] = load <16 x i8>, ptr @x.array, align 16
+; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr @x.array, align 16
 ; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [4 x i32], ptr @x.array, i16 0, i16 2
-; CHECK-NEXT:    [[LOAD_1:%.*]] = load <16 x i8>, ptr [[GEP]], align 1
-; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[GEP]], align 1
+; CHECK-NEXT:    [[LOAD_1:%.*]] = load <16 x i8>, ptr [[GEP]], align 8
+; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[GEP]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %load.0 = load <16 x i8>, ptr @x.array, align 1

diff  --git a/llvm/test/Transforms/InferAlignment/volatile.ll b/llvm/test/Transforms/InferAlignment/volatile.ll
index f2991b6fc2d2dbf..88ff0d0faf92221 100644
--- a/llvm/test/Transforms/InferAlignment/volatile.ll
+++ b/llvm/test/Transforms/InferAlignment/volatile.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt < %s -passes=no-op-function -S | FileCheck %s
+; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
 
 define void @load_volatile() {
 ; CHECK-LABEL: define void @load_volatile() {
 ; CHECK-NEXT:    [[A:%.*]] = alloca { i32 }, align 8
 ; CHECK-NEXT:    [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[LOAD_A:%.*]] = load volatile i32, ptr [[A]], align 4
+; CHECK-NEXT:    [[LOAD_A:%.*]] = load volatile i32, ptr [[A]], align 8
 ; CHECK-NEXT:    [[LOAD_B:%.*]] = load volatile i32, ptr [[B]], align 4
 ; CHECK-NEXT:    ret void
 ;
@@ -20,7 +20,7 @@ define void @store_volatile() {
 ; CHECK-LABEL: define void @store_volatile() {
 ; CHECK-NEXT:    [[A:%.*]] = alloca { i32 }, align 8
 ; CHECK-NEXT:    [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store volatile i32 123, ptr [[A]], align 4
+; CHECK-NEXT:    store volatile i32 123, ptr [[A]], align 8
 ; CHECK-NEXT:    store volatile i32 123, ptr [[B]], align 4
 ; CHECK-NEXT:    ret void
 ;

diff  --git a/llvm/test/Transforms/InferAlignment/vscale.ll b/llvm/test/Transforms/InferAlignment/vscale.ll
index 5152d50a6bb770f..8abac031c6fa498 100644
--- a/llvm/test/Transforms/InferAlignment/vscale.ll
+++ b/llvm/test/Transforms/InferAlignment/vscale.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -passes=no-op-function -S < %s | FileCheck %s
+; RUN: opt -passes=infer-alignment -S < %s | FileCheck %s
 
 ; <4 x i32> -> 16 byte alignment
 define void @alignment_sustain(ptr align 16 %ptr) {
@@ -23,8 +23,8 @@ define void @alignment_increase(ptr align 32 %ptr) {
 ; CHECK-LABEL: define void @alignment_increase
 ; CHECK-SAME: (ptr align 32 [[PTR:%.*]]) {
 ; CHECK-NEXT:    [[GEP:%.*]] = getelementptr <vscale x 8 x i32>, ptr [[PTR]], i32 3
-; CHECK-NEXT:    [[LOAD:%.*]] = load <8 x i32>, ptr [[GEP]], align 16
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[GEP]], align 16
+; CHECK-NEXT:    [[LOAD:%.*]] = load <8 x i32>, ptr [[GEP]], align 32
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[GEP]], align 32
 ; CHECK-NEXT:    ret void
 ;
   %gep = getelementptr <vscale x 8 x i32>, ptr %ptr, i32 3


        


More information about the llvm-commits mailing list