[llvm] 3e992d8 - [InferAlignment] Enable InferAlignment pass by default

Dhruv Chawla via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 19 23:42:40 PDT 2023


Author: Dhruv Chawla
Date: 2023-09-20T12:08:52+05:30
New Revision: 3e992d81afc3925a8685eb15f794dd4a6ba3e97e

URL: https://github.com/llvm/llvm-project/commit/3e992d81afc3925a8685eb15f794dd4a6ba3e97e
DIFF: https://github.com/llvm/llvm-project/commit/3e992d81afc3925a8685eb15f794dd4a6ba3e97e.diff

LOG: [InferAlignment] Enable InferAlignment pass by default

This gives an improvement of 0.6%:
https://llvm-compile-time-tracker.com/compare.php?from=7d35fe6d08e2b9b786e1c8454cd2391463832167&to=0456c8e8a42be06b62ad4c3e3cf34b21f2633d1e&stat=instructions:u

Differential Revision: https://reviews.llvm.org/D158600

Added: 
    

Modified: 
    llvm/lib/Passes/PassBuilderPipelines.cpp
    llvm/test/Analysis/BasicAA/featuretest.ll
    llvm/test/Analysis/ValueTracking/assume.ll
    llvm/test/CodeGen/AMDGPU/implicit-arg-v5-opt.ll
    llvm/test/CodeGen/AMDGPU/reqd-work-group-size.ll
    llvm/test/Other/new-pm-defaults.ll
    llvm/test/Other/new-pm-lto-defaults.ll
    llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
    llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
    llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
    llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
    llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
    llvm/test/Transforms/InstCombine/addrspacecast.ll
    llvm/test/Transforms/InstCombine/align-addr.ll
    llvm/test/Transforms/InstCombine/align-attr.ll
    llvm/test/Transforms/InstCombine/alloca-cast-debuginfo.ll
    llvm/test/Transforms/InstCombine/alloca.ll
    llvm/test/Transforms/InstCombine/apint-shift.ll
    llvm/test/Transforms/InstCombine/assume-align.ll
    llvm/test/Transforms/InstCombine/assume-loop-align.ll
    llvm/test/Transforms/InstCombine/assume.ll
    llvm/test/Transforms/InstCombine/assume_inevitable.ll
    llvm/test/Transforms/InstCombine/atomic.ll
    llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
    llvm/test/Transforms/InstCombine/constant-fold-gep.ll
    llvm/test/Transforms/InstCombine/dbg-scalable-store-fixed-frag.ll
    llvm/test/Transforms/InstCombine/fcmp-denormals-are-zero.ll
    llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
    llvm/test/Transforms/InstCombine/gep-custom-dl.ll
    llvm/test/Transforms/InstCombine/getelementptr.ll
    llvm/test/Transforms/InstCombine/load-cmp.ll
    llvm/test/Transforms/InstCombine/load-combine-metadata-dominance.ll
    llvm/test/Transforms/InstCombine/load.ll
    llvm/test/Transforms/InstCombine/loadstore-alignment.ll
    llvm/test/Transforms/InstCombine/memcpy-from-global.ll
    llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll
    llvm/test/Transforms/InstCombine/phi.ll
    llvm/test/Transforms/InstCombine/pr33689_same_bitwidth.ll
    llvm/test/Transforms/InstCombine/pr44552.ll
    llvm/test/Transforms/InstCombine/pr59613.ll
    llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll
    llvm/test/Transforms/InstCombine/select.ll
    llvm/test/Transforms/InstCombine/store.ll
    llvm/test/Transforms/InstCombine/trivial-dse-calls.ll
    llvm/test/Transforms/InstCombine/vscale_gep.ll
    llvm/test/Transforms/LoopVectorize/X86/pr42674.ll
    llvm/test/Transforms/LoopVectorize/X86/small-size.ll
    llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
    llvm/test/Transforms/LoopVectorize/non-const-n.ll
    llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll

Removed: 
    llvm/test/Transforms/InstCombine/align-2d-gep.ll


################################################################################
diff  --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index 529743cc8bd2e39..29cf2f75fd6ca3f 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -276,7 +276,7 @@ cl::opt<bool> EnableMemProfContextDisambiguation(
     cl::ZeroOrMore, cl::desc("Enable MemProf context disambiguation"));
 
 cl::opt<bool> EnableInferAlignmentPass(
-    "enable-infer-alignment-pass", cl::init(false), cl::Hidden, cl::ZeroOrMore,
+    "enable-infer-alignment-pass", cl::init(true), cl::Hidden, cl::ZeroOrMore,
     cl::desc("Enable the InferAlignment pass, disabling alignment inference in "
              "InstCombine"));
 

diff  --git a/llvm/test/Analysis/BasicAA/featuretest.ll b/llvm/test/Analysis/BasicAA/featuretest.ll
index f78fa7cf44eda57..f556c95747a19c7 100644
--- a/llvm/test/Analysis/BasicAA/featuretest.ll
+++ b/llvm/test/Analysis/BasicAA/featuretest.ll
@@ -131,7 +131,7 @@ define i32 @gep_distance_test3(ptr %A) {
 ;
 ; USE_ASSUME-LABEL: @gep_distance_test3(
 ; USE_ASSUME-NEXT:    [[C:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
-; USE_ASSUME-NEXT:    store i8 42, ptr [[C]], align 4
+; USE_ASSUME-NEXT:    store i8 42, ptr [[C]], align 1
 ; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[A]], i64 4), "nonnull"(ptr [[A]]), "align"(ptr [[A]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret i32 0
 ;

diff  --git a/llvm/test/Analysis/ValueTracking/assume.ll b/llvm/test/Analysis/ValueTracking/assume.ll
index a5533a4f1db100e..cc098e10138321d 100644
--- a/llvm/test/Analysis/ValueTracking/assume.ll
+++ b/llvm/test/Analysis/ValueTracking/assume.ll
@@ -100,7 +100,7 @@ define dso_local i32 @test4a(ptr readonly %0, i1 %cond) {
 ; CHECK:       A:
 ; CHECK-NEXT:    br i1 false, label [[TMP4:%.*]], label [[TMP2:%.*]]
 ; CHECK:       2:
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
 ; CHECK-NEXT:    br label [[TMP4]]
 ; CHECK:       4:
 ; CHECK-NEXT:    [[TMP5:%.*]] = phi i32 [ [[TMP3]], [[TMP2]] ], [ poison, [[A]] ]

diff  --git a/llvm/test/CodeGen/AMDGPU/implicit-arg-v5-opt.ll b/llvm/test/CodeGen/AMDGPU/implicit-arg-v5-opt.ll
index 7774677b09ef962..954218f339fa239 100644
--- a/llvm/test/CodeGen/AMDGPU/implicit-arg-v5-opt.ll
+++ b/llvm/test/CodeGen/AMDGPU/implicit-arg-v5-opt.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=amdgpu-lower-kernel-attributes,instcombine %s | FileCheck -enable-var-scope -check-prefix=GCN %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=amdgpu-lower-kernel-attributes,instcombine,infer-alignment %s | FileCheck -enable-var-scope -check-prefix=GCN %s
 
 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
 define amdgpu_kernel void @get_local_size_x(ptr addrspace(1) %out) #0 {

diff  --git a/llvm/test/CodeGen/AMDGPU/reqd-work-group-size.ll b/llvm/test/CodeGen/AMDGPU/reqd-work-group-size.ll
index f9f817302b6dab9..ecdc3845efc4b7f 100644
--- a/llvm/test/CodeGen/AMDGPU/reqd-work-group-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/reqd-work-group-size.ll
@@ -1,5 +1,5 @@
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=amdgpu-lower-kernel-attributes,instcombine %s | FileCheck -enable-var-scope %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=amdgpu-lower-kernel-attributes,instcombine %s | FileCheck -enable-var-scope %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=amdgpu-lower-kernel-attributes,instcombine,infer-alignment %s | FileCheck -enable-var-scope %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=amdgpu-lower-kernel-attributes,instcombine,infer-alignment %s | FileCheck -enable-var-scope %s
 
 target datalayout = "n32"
 

diff  --git a/llvm/test/Other/new-pm-defaults.ll b/llvm/test/Other/new-pm-defaults.ll
index 016dfad98c69f70..ad8fab8bad3b73e 100644
--- a/llvm/test/Other/new-pm-defaults.ll
+++ b/llvm/test/Other/new-pm-defaults.ll
@@ -246,6 +246,7 @@
 ; CHECK-O-NEXT: Running analysis: LoopAccessAnalysis on foo
 ; CHECK-O-NEXT: Running pass: InjectTLIMappings
 ; CHECK-O-NEXT: Running pass: LoopVectorizePass
+; CHECK-O-NEXT: Running pass: InferAlignmentPass
 ; CHECK-O-NEXT: Running pass: LoopLoadEliminationPass
 ; CHECK-O-NEXT: Running pass: InstCombinePass
 ; CHECK-O-NEXT: Running pass: SimplifyCFGPass
@@ -257,6 +258,7 @@
 ; CHECK-O-NEXT: Running pass: LoopUnrollPass
 ; CHECK-O-NEXT: Running pass: WarnMissedTransformationsPass
 ; CHECK-O-NEXT: Running pass: SROAPass
+; CHECK-O-NEXT: Running pass: InferAlignmentPass
 ; CHECK-O-NEXT: Running pass: InstCombinePass
 ; CHECK-O-NEXT: Running pass: LoopSimplifyPass
 ; CHECK-O-NEXT: Running pass: LCSSAPass

diff  --git a/llvm/test/Other/new-pm-lto-defaults.ll b/llvm/test/Other/new-pm-lto-defaults.ll
index c444197e0db7062..63ea58caa5b4a62 100644
--- a/llvm/test/Other/new-pm-lto-defaults.ll
+++ b/llvm/test/Other/new-pm-lto-defaults.ll
@@ -116,6 +116,7 @@
 ; CHECK-O23SZ-NEXT: Running analysis: LoopAccessAnalysis on foo
 ; CHECK-O23SZ-NEXT: Running pass: LoopVectorizePass on foo
 ; CHECK-O23SZ-NEXT: Running analysis: DemandedBitsAnalysis on foo
+; CHECK-O23SZ-NEXT: Running pass: InferAlignmentPass on foo
 ; CHECK-O23SZ-NEXT: Running pass: LoopUnrollPass on foo
 ; CHECK-O23SZ-NEXT: WarnMissedTransformationsPass on foo
 ; CHECK-O23SZ-NEXT: Running pass: SROAPass on foo
@@ -128,6 +129,7 @@
 ; CHECK-O3-NEXT: Running pass: SLPVectorizerPass on foo
 ; CHECK-OS-NEXT: Running pass: SLPVectorizerPass on foo
 ; CHECK-O23SZ-NEXT: Running pass: VectorCombinePass on foo
+; CHECK-O23SZ-NEXT: Running pass: InferAlignmentPass on foo
 ; CHECK-O23SZ-NEXT: Running pass: InstCombinePass on foo
 ; CHECK-O23SZ-NEXT: Running pass: LoopSimplifyPass
 ; CHECK-O23SZ-NEXT: Running pass: LCSSAPass

diff  --git a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
index 79010c3eb808044..46d00a083b92cea 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
@@ -173,6 +173,7 @@
 ; CHECK-POSTLINK-O-NEXT: Running analysis: LoopAccessAnalysis on foo
 ; CHECK-POSTLINK-O-NEXT: Running pass: InjectTLIMappings
 ; CHECK-POSTLINK-O-NEXT: Running pass: LoopVectorizePass
+; CHECK-POSTLINK-O-NEXT: Running pass: InferAlignmentPass
 ; CHECK-POSTLINK-O-NEXT: Running pass: LoopLoadEliminationPass
 ; CHECK-POSTLINK-O-NEXT: Running pass: InstCombinePass
 ; CHECK-POSTLINK-O-NEXT: Running pass: SimplifyCFGPass
@@ -184,6 +185,7 @@
 ; CHECK-POSTLINK-O-NEXT: Running pass: LoopUnrollPass
 ; CHECK-POSTLINK-O-NEXT: Running pass: WarnMissedTransformationsPass
 ; CHECK-POSTLINK-O-NEXT: Running pass: SROAPass
+; CHECK-POSTLINK-O-NEXT: Running pass: InferAlignmentPass
 ; CHECK-POSTLINK-O-NEXT: Running pass: InstCombinePass
 ; CHECK-POSTLINK-O-NEXT: Running pass: LoopSimplifyPass
 ; CHECK-POSTLINK-O-NEXT: Running pass: LCSSAPass

diff  --git a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
index bfa3ed6e4b757dc..d0e1b8fb9ab30a9 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
@@ -160,6 +160,7 @@
 ; CHECK-O-NEXT: Running analysis: LoopAccessAnalysis on foo
 ; CHECK-O-NEXT: Running pass: InjectTLIMappings
 ; CHECK-O-NEXT: Running pass: LoopVectorizePass
+; CHECK-O-NEXT: Running pass: InferAlignmentPass
 ; CHECK-O-NEXT: Running pass: LoopLoadEliminationPass
 ; CHECK-O-NEXT: Running pass: InstCombinePass
 ; CHECK-O-NEXT: Running pass: SimplifyCFGPass
@@ -171,6 +172,7 @@
 ; CHECK-O-NEXT: Running pass: LoopUnrollPass
 ; CHECK-O-NEXT: Running pass: WarnMissedTransformationsPass
 ; CHECK-O-NEXT: Running pass: SROAPass
+; CHECK-O-NEXT: Running pass: InferAlignmentPass
 ; CHECK-O-NEXT: Running pass: InstCombinePass
 ; CHECK-O-NEXT: Running pass: LoopSimplifyPass
 ; CHECK-O-NEXT: Running pass: LCSSAPass

diff  --git a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
index 004ec790e984749..4b033ad238e2fb7 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
@@ -167,6 +167,7 @@
 ; CHECK-O-NEXT: Running analysis: LoopAccessAnalysis
 ; CHECK-O-NEXT: Running pass: InjectTLIMappings
 ; CHECK-O-NEXT: Running pass: LoopVectorizePass
+; CHECK-O-NEXT: Running pass: InferAlignmentPass
 ; CHECK-O-NEXT: Running pass: LoopLoadEliminationPass
 ; CHECK-O-NEXT: Running pass: InstCombinePass
 ; CHECK-O-NEXT: Running pass: SimplifyCFGPass
@@ -178,6 +179,7 @@
 ; CHECK-O-NEXT: Running pass: LoopUnrollPass
 ; CHECK-O-NEXT: Running pass: WarnMissedTransformationsPass
 ; CHECK-O-NEXT: Running pass: SROAPass
+; CHECK-O-NEXT: Running pass: InferAlignmentPass
 ; CHECK-O-NEXT: Running pass: InstCombinePass
 ; CHECK-O-NEXT: Running pass: LoopSimplifyPass
 ; CHECK-O-NEXT: Running pass: LCSSAPass

diff  --git a/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll b/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
index 00bce165efa2ae3..dee71b2290acfcb 100644
--- a/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
+++ b/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
@@ -12,10 +12,10 @@ define i32 @bar(i64 %key_token2) nounwind {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[IOSPEC:%.*]] = alloca [[STRUCT_KEY:%.*]], align 8
 ; CHECK-NEXT:    [[RET:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 0, ptr [[IOSPEC]], align 8
+; CHECK-NEXT:    store i32 0, ptr [[IOSPEC]], align 4
 ; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds { i32, i32 }, ptr [[IOSPEC]], i32 0, i32 1
 ; CHECK-NEXT:    store i32 0, ptr [[TMP0]], align 4
-; CHECK-NEXT:    store i64 [[KEY_TOKEN2:%.*]], ptr [[IOSPEC]], align 8
+; CHECK-NEXT:    store i64 [[KEY_TOKEN2:%.*]], ptr [[IOSPEC]], align 4
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 (...) @foo(ptr nonnull byval([[STRUCT_KEY]]) align 4 [[IOSPEC]], ptr nonnull [[RET]]) #[[ATTR0:[0-9]+]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[RET]], align 4
 ; CHECK-NEXT:    ret i32 [[TMP2]]

diff  --git a/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll b/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
index 4316018bb4e3785..5ed8d9507ca78e6 100644
--- a/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
+++ b/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
@@ -27,59 +27,59 @@ define ptr @_Z3fooRSt6vectorIiSaIiEE(ptr %X) {
 ; IC-NEXT:    [[TMP1:%.*]] = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl", ptr [[X:%.*]], i32 0, i32 1
 ; IC-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 4
 ; IC-NEXT:    [[TMP3:%.*]] = load ptr, ptr [[X]], align 4
-; IC-NEXT:    store ptr [[TMP3]], ptr [[__FIRST_ADDR_I_I]], align 8
-; IC-NEXT:    store ptr [[TMP2]], ptr [[__LAST_ADDR_I_I]], align 8
+; IC-NEXT:    store ptr [[TMP3]], ptr [[__FIRST_ADDR_I_I]], align 4
+; IC-NEXT:    store ptr [[TMP2]], ptr [[__LAST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[TMP2]] to i32
 ; IC-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[TMP3]] to i32
 ; IC-NEXT:    [[TMP6:%.*]] = sub i32 [[TMP4]], [[TMP5]]
 ; IC-NEXT:    [[TMP7:%.*]] = ashr i32 [[TMP6]], 4
 ; IC-NEXT:    br label [[BB12_I_I:%.*]]
 ; IC:       bb.i.i:
-; IC-NEXT:    [[TMP8:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP8:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
 ; IC-NEXT:    [[TMP10:%.*]] = load i32, ptr [[TMP0]], align 4
 ; IC-NEXT:    [[TMP11:%.*]] = icmp eq i32 [[TMP9]], [[TMP10]]
 ; IC-NEXT:    br i1 [[TMP11]], label [[BB1_I_I:%.*]], label [[BB2_I_I:%.*]]
 ; IC:       bb1.i.i:
-; IC-NEXT:    [[TMP12:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP12:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT:%.*]]
 ; IC:       bb2.i.i:
-; IC-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP14:%.*]] = getelementptr i32, ptr [[TMP13]], i32 1
-; IC-NEXT:    store ptr [[TMP14]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    store ptr [[TMP14]], ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
 ; IC-NEXT:    [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
 ; IC-NEXT:    [[TMP17:%.*]] = icmp eq i32 [[TMP15]], [[TMP16]]
 ; IC-NEXT:    br i1 [[TMP17]], label [[BB4_I_I:%.*]], label [[BB5_I_I:%.*]]
 ; IC:       bb4.i.i:
-; IC-NEXT:    [[TMP18:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP18:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb5.i.i:
-; IC-NEXT:    [[TMP19:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP19:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP20:%.*]] = getelementptr i32, ptr [[TMP19]], i32 1
-; IC-NEXT:    store ptr [[TMP20]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    store ptr [[TMP20]], ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
 ; IC-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP0]], align 4
 ; IC-NEXT:    [[TMP23:%.*]] = icmp eq i32 [[TMP21]], [[TMP22]]
 ; IC-NEXT:    br i1 [[TMP23]], label [[BB7_I_I:%.*]], label [[BB8_I_I:%.*]]
 ; IC:       bb7.i.i:
-; IC-NEXT:    [[TMP24:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP24:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb8.i.i:
-; IC-NEXT:    [[TMP25:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP25:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP26:%.*]] = getelementptr i32, ptr [[TMP25]], i32 1
-; IC-NEXT:    store ptr [[TMP26]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    store ptr [[TMP26]], ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4
 ; IC-NEXT:    [[TMP28:%.*]] = load i32, ptr [[TMP0]], align 4
 ; IC-NEXT:    [[TMP29:%.*]] = icmp eq i32 [[TMP27]], [[TMP28]]
 ; IC-NEXT:    br i1 [[TMP29]], label [[BB10_I_I:%.*]], label [[BB11_I_I:%.*]]
 ; IC:       bb10.i.i:
-; IC-NEXT:    [[TMP30:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP30:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb11.i.i:
-; IC-NEXT:    [[TMP31:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP31:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP32:%.*]] = getelementptr i32, ptr [[TMP31]], i32 1
-; IC-NEXT:    store ptr [[TMP32]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    store ptr [[TMP32]], ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP33:%.*]] = add i32 [[__TRIP_COUNT_0_I_I:%.*]], -1
 ; IC-NEXT:    br label [[BB12_I_I]]
 ; IC:       bb12.i.i:
@@ -87,9 +87,9 @@ define ptr @_Z3fooRSt6vectorIiSaIiEE(ptr %X) {
 ; IC-NEXT:    [[TMP34:%.*]] = icmp sgt i32 [[__TRIP_COUNT_0_I_I]], 0
 ; IC-NEXT:    br i1 [[TMP34]], label [[BB_I_I:%.*]], label [[BB13_I_I:%.*]]
 ; IC:       bb13.i.i:
-; IC-NEXT:    [[TMP35:%.*]] = load ptr, ptr [[__LAST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP35:%.*]] = load ptr, ptr [[__LAST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP36:%.*]] = ptrtoint ptr [[TMP35]] to i32
-; IC-NEXT:    [[TMP37:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP37:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP37]] to i32
 ; IC-NEXT:    [[TMP39:%.*]] = sub i32 [[TMP36]], [[TMP38]]
 ; IC-NEXT:    [[TMP40:%.*]] = ashr i32 [[TMP39]], 2
@@ -99,49 +99,49 @@ define ptr @_Z3fooRSt6vectorIiSaIiEE(ptr %X) {
 ; IC-NEXT:    i32 3, label [[BB14_I_I:%.*]]
 ; IC-NEXT:    ]
 ; IC:       bb14.i.i:
-; IC-NEXT:    [[TMP41:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP41:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP42:%.*]] = load i32, ptr [[TMP41]], align 4
 ; IC-NEXT:    [[TMP43:%.*]] = load i32, ptr [[TMP0]], align 4
 ; IC-NEXT:    [[TMP44:%.*]] = icmp eq i32 [[TMP42]], [[TMP43]]
 ; IC-NEXT:    br i1 [[TMP44]], label [[BB16_I_I:%.*]], label [[BB17_I_I:%.*]]
 ; IC:       bb16.i.i:
-; IC-NEXT:    [[TMP45:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP45:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb17.i.i:
-; IC-NEXT:    [[TMP46:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP46:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP47:%.*]] = getelementptr i32, ptr [[TMP46]], i32 1
-; IC-NEXT:    store ptr [[TMP47]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    store ptr [[TMP47]], ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    br label [[BB18_I_I]]
 ; IC:       bb18.i.i:
-; IC-NEXT:    [[TMP48:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP48:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP49:%.*]] = load i32, ptr [[TMP48]], align 4
 ; IC-NEXT:    [[TMP50:%.*]] = load i32, ptr [[TMP0]], align 4
 ; IC-NEXT:    [[TMP51:%.*]] = icmp eq i32 [[TMP49]], [[TMP50]]
 ; IC-NEXT:    br i1 [[TMP51]], label [[BB20_I_I:%.*]], label [[BB21_I_I:%.*]]
 ; IC:       bb20.i.i:
-; IC-NEXT:    [[TMP52:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP52:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb21.i.i:
-; IC-NEXT:    [[TMP53:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP53:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP54:%.*]] = getelementptr i32, ptr [[TMP53]], i32 1
-; IC-NEXT:    store ptr [[TMP54]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    store ptr [[TMP54]], ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    br label [[BB22_I_I]]
 ; IC:       bb22.i.i:
-; IC-NEXT:    [[TMP55:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP55:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4
 ; IC-NEXT:    [[TMP57:%.*]] = load i32, ptr [[TMP0]], align 4
 ; IC-NEXT:    [[TMP58:%.*]] = icmp eq i32 [[TMP56]], [[TMP57]]
 ; IC-NEXT:    br i1 [[TMP58]], label [[BB24_I_I:%.*]], label [[BB25_I_I:%.*]]
 ; IC:       bb24.i.i:
-; IC-NEXT:    [[TMP59:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP59:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb25.i.i:
-; IC-NEXT:    [[TMP60:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP60:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    [[TMP61:%.*]] = getelementptr i32, ptr [[TMP60]], i32 1
-; IC-NEXT:    store ptr [[TMP61]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    store ptr [[TMP61]], ptr [[__FIRST_ADDR_I_I]], align 4
 ; IC-NEXT:    br label [[BB26_I_I]]
 ; IC:       bb26.i.i:
-; IC-NEXT:    [[TMP62:%.*]] = load ptr, ptr [[__LAST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP62:%.*]] = load ptr, ptr [[__LAST_ADDR_I_I]], align 4
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       _ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit:
 ; IC-NEXT:    [[DOT0_0_I_I:%.*]] = phi ptr [ [[TMP62]], [[BB26_I_I]] ], [ [[TMP59]], [[BB24_I_I]] ], [ [[TMP52]], [[BB20_I_I]] ], [ [[TMP45]], [[BB16_I_I]] ], [ [[TMP30]], [[BB10_I_I]] ], [ [[TMP24]], [[BB7_I_I]] ], [ [[TMP18]], [[BB4_I_I]] ], [ [[TMP12]], [[BB1_I_I]] ]

diff  --git a/llvm/test/Transforms/InstCombine/addrspacecast.ll b/llvm/test/Transforms/InstCombine/addrspacecast.ll
index 6b48cfb8fc4a59f..cbb88b9a09c93e7 100644
--- a/llvm/test/Transforms/InstCombine/addrspacecast.ll
+++ b/llvm/test/Transforms/InstCombine/addrspacecast.ll
@@ -173,7 +173,7 @@ end:
 
 define void @constant_fold_null() #0 {
 ; CHECK-LABEL: @constant_fold_null(
-; CHECK-NEXT:    store i32 7, ptr addrspace(4) addrspacecast (ptr addrspace(3) null to ptr addrspace(4)), align 4294967296
+; CHECK-NEXT:    store i32 7, ptr addrspace(4) addrspacecast (ptr addrspace(3) null to ptr addrspace(4)), align 4
 ; CHECK-NEXT:    ret void
 ;
   %cast = addrspacecast ptr addrspace(3) null to ptr addrspace(4)

diff  --git a/llvm/test/Transforms/InstCombine/align-2d-gep.ll b/llvm/test/Transforms/InstCombine/align-2d-gep.ll
deleted file mode 100644
index a2606fba2bf83ba..000000000000000
--- a/llvm/test/Transforms/InstCombine/align-2d-gep.ll
+++ /dev/null
@@ -1,65 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-
-; A multi-dimensional array in a nested loop doing vector stores that
-; aren't yet aligned. Instcombine can understand the addressing in the
-; Nice case to prove 16 byte alignment. In the Awkward case, the inner
-; array dimension is not even, so the stores to it won't always be
-; aligned. Instcombine should prove alignment in exactly one of the two
-; stores.
-
- at Nice    = global [1001 x [20000 x double]] zeroinitializer, align 32
- at Awkward = global [1001 x [20001 x double]] zeroinitializer, align 32
-
-define void @foo() nounwind  {
-; CHECK-LABEL: @foo(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    br label [[BB7_OUTER:%.*]]
-; CHECK:       bb7.outer:
-; CHECK-NEXT:    [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT26:%.*]], [[BB11:%.*]] ]
-; CHECK-NEXT:    br label [[BB1:%.*]]
-; CHECK:       bb1:
-; CHECK-NEXT:    [[J:%.*]] = phi i64 [ 0, [[BB7_OUTER]] ], [ [[INDVAR_NEXT:%.*]], [[BB1]] ]
-; CHECK-NEXT:    [[T4:%.*]] = getelementptr [1001 x [20000 x double]], ptr @Nice, i64 0, i64 [[I]], i64 [[J]]
-; CHECK-NEXT:    store <2 x double> zeroinitializer, ptr [[T4]], align 16
-; CHECK-NEXT:    [[S4:%.*]] = getelementptr [1001 x [20001 x double]], ptr @Awkward, i64 0, i64 [[I]], i64 [[J]]
-; CHECK-NEXT:    store <2 x double> zeroinitializer, ptr [[S4]], align 8
-; CHECK-NEXT:    [[INDVAR_NEXT]] = add i64 [[J]], 2
-; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 556
-; CHECK-NEXT:    br i1 [[EXITCOND]], label [[BB11]], label [[BB1]]
-; CHECK:       bb11:
-; CHECK-NEXT:    [[INDVAR_NEXT26]] = add i64 [[I]], 1
-; CHECK-NEXT:    [[EXITCOND27:%.*]] = icmp eq i64 [[INDVAR_NEXT26]], 991
-; CHECK-NEXT:    br i1 [[EXITCOND27]], label [[RETURN_SPLIT:%.*]], label [[BB7_OUTER]]
-; CHECK:       return.split:
-; CHECK-NEXT:    ret void
-;
-entry:
-  br label %bb7.outer
-
-bb7.outer:
-  %i = phi i64 [ 0, %entry ], [ %indvar.next26, %bb11 ]
-  br label %bb1
-
-bb1:
-  %j = phi i64 [ 0, %bb7.outer ], [ %indvar.next, %bb1 ]
-
-  %t4 = getelementptr [1001 x [20000 x double]], ptr @Nice, i64 0, i64 %i, i64 %j
-  store <2 x double><double 0.0, double 0.0>, ptr %t4, align 8
-
-  %s4 = getelementptr [1001 x [20001 x double]], ptr @Awkward, i64 0, i64 %i, i64 %j
-  store <2 x double><double 0.0, double 0.0>, ptr %s4, align 8
-
-  %indvar.next = add i64 %j, 2
-  %exitcond = icmp eq i64 %indvar.next, 556
-  br i1 %exitcond, label %bb11, label %bb1
-
-bb11:
-  %indvar.next26 = add i64 %i, 1
-  %exitcond27 = icmp eq i64 %indvar.next26, 991
-  br i1 %exitcond27, label %return.split, label %bb7.outer
-
-return.split:
-  ret void
-}

diff  --git a/llvm/test/Transforms/InstCombine/align-addr.ll b/llvm/test/Transforms/InstCombine/align-addr.ll
index d02f4ef800ef9bc..23f620310d7c26c 100644
--- a/llvm/test/Transforms/InstCombine/align-addr.ll
+++ b/llvm/test/Transforms/InstCombine/align-addr.ll
@@ -2,9 +2,6 @@
 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
 target datalayout = "E-p:64:64:64-p1:32:32:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
 
-; Instcombine should be able to prove vector alignment in the
-; presence of a few mild address computation tricks.
-
 define void @test0(ptr %b, i64 %n, i64 %u, i64 %y) nounwind  {
 ; CHECK-LABEL: @test0(
 ; CHECK-NEXT:  entry:
@@ -20,7 +17,7 @@ define void @test0(ptr %b, i64 %n, i64 %u, i64 %y) nounwind  {
 ; CHECK-NEXT:    [[J:%.*]] = mul i64 [[I]], [[V]]
 ; CHECK-NEXT:    [[H:%.*]] = add i64 [[J]], [[Z]]
 ; CHECK-NEXT:    [[T8:%.*]] = getelementptr double, ptr [[E]], i64 [[H]]
-; CHECK-NEXT:    store <2 x double> zeroinitializer, ptr [[T8]], align 16
+; CHECK-NEXT:    store <2 x double> zeroinitializer, ptr [[T8]], align 8
 ; CHECK-NEXT:    [[INDVAR_NEXT]] = add i64 [[I]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[N]]
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
@@ -58,7 +55,7 @@ return:
 define <16 x i8> @test1(<2 x i64> %x) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP:%.*]] = load <16 x i8>, ptr @GLOBAL, align 16
+; CHECK-NEXT:    [[TMP:%.*]] = load <16 x i8>, ptr @GLOBAL, align 1
 ; CHECK-NEXT:    ret <16 x i8> [[TMP]]
 ;
 entry:
@@ -70,7 +67,7 @@ entry:
 
 define <16 x i8> @test1_as1(<2 x i64> %x) {
 ; CHECK-LABEL: @test1_as1(
-; CHECK-NEXT:    [[TMP:%.*]] = load <16 x i8>, ptr addrspace(1) @GLOBAL_as1, align 16
+; CHECK-NEXT:    [[TMP:%.*]] = load <16 x i8>, ptr addrspace(1) @GLOBAL_as1, align 1
 ; CHECK-NEXT:    ret <16 x i8> [[TMP]]
 ;
   %tmp = load <16 x i8>, ptr addrspace(1) @GLOBAL_as1, align 1
@@ -81,7 +78,7 @@ define <16 x i8> @test1_as1(<2 x i64> %x) {
 
 define <16 x i8> @test1_as1_gep(<2 x i64> %x) {
 ; CHECK-LABEL: @test1_as1_gep(
-; CHECK-NEXT:    [[TMP:%.*]] = load <16 x i8>, ptr addrspace(1) getelementptr inbounds ([8 x i32], ptr addrspace(1) @GLOBAL_as1_gep, i32 0, i32 4), align 16
+; CHECK-NEXT:    [[TMP:%.*]] = load <16 x i8>, ptr addrspace(1) getelementptr inbounds ([8 x i32], ptr addrspace(1) @GLOBAL_as1_gep, i32 0, i32 4), align 1
 ; CHECK-NEXT:    ret <16 x i8> [[TMP]]
 ;
   %tmp = load <16 x i8>, ptr addrspace(1) getelementptr ([8 x i32], ptr addrspace(1) @GLOBAL_as1_gep, i16 0, i16 4), align 1
@@ -138,7 +135,7 @@ define <16 x i8> @ptrmask_align_unknown_ptr_align1(ptr align 1 %ptr, i64 %mask)
 define <16 x i8> @ptrmask_align_unknown_ptr_align8(ptr align 8 %ptr, i64 %mask) {
 ; CHECK-LABEL: @ptrmask_align_unknown_ptr_align8(
 ; CHECK-NEXT:    [[ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 8
+; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 1
 ; CHECK-NEXT:    ret <16 x i8> [[LOAD]]
 ;
   %aligned = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 %mask)
@@ -150,7 +147,7 @@ define <16 x i8> @ptrmask_align_unknown_ptr_align8(ptr align 8 %ptr, i64 %mask)
 define <16 x i8> @ptrmask_align2_ptr_align1(ptr align 1 %ptr) {
 ; CHECK-LABEL: @ptrmask_align2_ptr_align1(
 ; CHECK-NEXT:    [[ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -2)
-; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 2
+; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 1
 ; CHECK-NEXT:    ret <16 x i8> [[LOAD]]
 ;
   %aligned = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -2)
@@ -162,7 +159,7 @@ define <16 x i8> @ptrmask_align2_ptr_align1(ptr align 1 %ptr) {
 define <16 x i8> @ptrmask_align4_ptr_align1(ptr align 1 %ptr) {
 ; CHECK-LABEL: @ptrmask_align4_ptr_align1(
 ; CHECK-NEXT:    [[ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -4)
-; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 4
+; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 1
 ; CHECK-NEXT:    ret <16 x i8> [[LOAD]]
 ;
   %aligned = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -4)
@@ -174,7 +171,7 @@ define <16 x i8> @ptrmask_align4_ptr_align1(ptr align 1 %ptr) {
 define <16 x i8> @ptrmask_align8_ptr_align1(ptr align 1 %ptr) {
 ; CHECK-LABEL: @ptrmask_align8_ptr_align1(
 ; CHECK-NEXT:    [[ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -8)
-; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 8
+; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 1
 ; CHECK-NEXT:    ret <16 x i8> [[LOAD]]
 ;
   %aligned = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -8)
@@ -187,7 +184,7 @@ define <16 x i8> @ptrmask_align8_ptr_align1(ptr align 1 %ptr) {
 define <16 x i8> @ptrmask_align8_ptr_align8(ptr align 8 %ptr) {
 ; CHECK-LABEL: @ptrmask_align8_ptr_align8(
 ; CHECK-NEXT:    [[ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -8)
-; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 8
+; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 1
 ; CHECK-NEXT:    ret <16 x i8> [[LOAD]]
 ;
   %aligned = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -8)
@@ -200,7 +197,7 @@ define <16 x i8> @ptrmask_align8_ptr_align8(ptr align 8 %ptr) {
 define <16 x i8> @ptrmask_align8_ptr_align16(ptr align 16 %ptr) {
 ; CHECK-LABEL: @ptrmask_align8_ptr_align16(
 ; CHECK-NEXT:    [[ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 -8)
-; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 16
+; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 1
 ; CHECK-NEXT:    ret <16 x i8> [[LOAD]]
 ;
   %aligned = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -8)
@@ -213,7 +210,7 @@ define <16 x i8> @ptrmask_align8_ptr_align16(ptr align 16 %ptr) {
 define <16 x i8> @ptrmask_align8_ptr_align1_smallmask(ptr align 1 %ptr) {
 ; CHECK-LABEL: @ptrmask_align8_ptr_align1_smallmask(
 ; CHECK-NEXT:    [[ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[PTR:%.*]], i32 -8)
-; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 8
+; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 1
 ; CHECK-NEXT:    ret <16 x i8> [[LOAD]]
 ;
   %aligned = call ptr @llvm.ptrmask.p0.i32(ptr %ptr, i32 -8)
@@ -226,7 +223,7 @@ define <16 x i8> @ptrmask_align8_ptr_align1_smallmask(ptr align 1 %ptr) {
 define <16 x i8> @ptrmask_align8_ptr_align1_bigmask(ptr align 1 %ptr) {
 ; CHECK-LABEL: @ptrmask_align8_ptr_align1_bigmask(
 ; CHECK-NEXT:    [[ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i128(ptr [[PTR:%.*]], i128 -8)
-; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 8
+; CHECK-NEXT:    [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 1
 ; CHECK-NEXT:    ret <16 x i8> [[LOAD]]
 ;
   %aligned = call ptr @llvm.ptrmask.p0.i128(ptr %ptr, i128 -8)

diff  --git a/llvm/test/Transforms/InstCombine/align-attr.ll b/llvm/test/Transforms/InstCombine/align-attr.ll
index 687aa604a4438d8..e7b17e72a81712a 100644
--- a/llvm/test/Transforms/InstCombine/align-attr.ll
+++ b/llvm/test/Transforms/InstCombine/align-attr.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
 define i32 @foo1(ptr align 32 %a) #0 {
 ; CHECK-LABEL: @foo1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A:%.*]], align 32
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A:%.*]], align 4
 ; CHECK-NEXT:    ret i32 [[TMP0]]
 ;
 entry:
@@ -20,7 +20,7 @@ define i32 @foo2(ptr align 32 %a) #0 {
 ; CHECK-LABEL: @foo2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[V:%.*]] = call ptr @func1(ptr [[A:%.*]])
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A]], align 32
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A]], align 4
 ; CHECK-NEXT:    ret i32 [[TMP0]]
 ;
 entry:

diff  --git a/llvm/test/Transforms/InstCombine/alloca-cast-debuginfo.ll b/llvm/test/Transforms/InstCombine/alloca-cast-debuginfo.ll
index 6b7725098535e5e..9c0f7ec04d4a20e 100644
--- a/llvm/test/Transforms/InstCombine/alloca-cast-debuginfo.ll
+++ b/llvm/test/Transforms/InstCombine/alloca-cast-debuginfo.ll
@@ -26,10 +26,10 @@ target triple = "x86_64-pc-windows-msvc19.11.25508"
 define void @f(ptr %p) !dbg !11 {
 ; CHECK-LABEL: @f(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[LOCAL:%.*]] = alloca [[STRUCT_FOO:%.*]], align 8
+; CHECK-NEXT:    [[LOCAL:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
 ; CHECK-NEXT:    call void @llvm.dbg.declare(metadata ptr [[LOCAL]], metadata [[META22:![0-9]+]], metadata !DIExpression()), !dbg [[DBG23:![0-9]+]]
 ; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[P:%.*]], align 8, !dbg [[DBG24:![0-9]+]], !tbaa [[TBAA25:![0-9]+]]
-; CHECK-NEXT:    store i64 [[TMP0]], ptr [[LOCAL]], align 8, !dbg [[DBG29:![0-9]+]], !tbaa [[TBAA25]]
+; CHECK-NEXT:    store i64 [[TMP0]], ptr [[LOCAL]], align 4, !dbg [[DBG29:![0-9]+]], !tbaa [[TBAA25]]
 ; CHECK-NEXT:    call void @escape(ptr nonnull [[LOCAL]]), !dbg [[DBG30:![0-9]+]]
 ; CHECK-NEXT:    ret void, !dbg [[DBG31:![0-9]+]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/alloca.ll b/llvm/test/Transforms/InstCombine/alloca.ll
index 24129b0a1986d24..a64de28ee839773 100644
--- a/llvm/test/Transforms/InstCombine/alloca.ll
+++ b/llvm/test/Transforms/InstCombine/alloca.ll
@@ -132,7 +132,7 @@ define void @test6() {
 ; NODL-NEXT:  entry:
 ; NODL-NEXT:    [[A:%.*]] = alloca { i32 }, align 8
 ; NODL-NEXT:    [[B:%.*]] = alloca i32, align 4
-; NODL-NEXT:    store volatile i32 123, ptr [[A]], align 8
+; NODL-NEXT:    store volatile i32 123, ptr [[A]], align 4
 ; NODL-NEXT:    tail call void @f(ptr nonnull [[B]])
 ; NODL-NEXT:    ret void
 ;
@@ -186,13 +186,29 @@ declare ptr @llvm.stacksave()
 declare void @llvm.stackrestore(ptr)
 
 define void @test9(ptr %a) {
-; ALL-LABEL: @test9(
-; ALL-NEXT:  entry:
-; ALL-NEXT:    [[ARGMEM:%.*]] = alloca inalloca <{ [[STRUCT_TYPE:%.*]] }>, align 8
-; ALL-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A:%.*]], align 4
-; ALL-NEXT:    store i64 [[TMP0]], ptr [[ARGMEM]], align 8
-; ALL-NEXT:    call void @test9_aux(ptr nonnull inalloca(<{ [[STRUCT_TYPE]] }>) [[ARGMEM]])
-; ALL-NEXT:    ret void
+; CHECK-LABEL: @test9(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARGMEM:%.*]] = alloca inalloca <{ [[STRUCT_TYPE:%.*]] }>, align 1
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A:%.*]], align 4
+; CHECK-NEXT:    store i64 [[TMP0]], ptr [[ARGMEM]], align 4
+; CHECK-NEXT:    call void @test9_aux(ptr nonnull inalloca(<{ [[STRUCT_TYPE]] }>) [[ARGMEM]])
+; CHECK-NEXT:    ret void
+;
+; P32-LABEL: @test9(
+; P32-NEXT:  entry:
+; P32-NEXT:    [[ARGMEM:%.*]] = alloca inalloca <{ [[STRUCT_TYPE:%.*]] }>, align 1
+; P32-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A:%.*]], align 4
+; P32-NEXT:    store i64 [[TMP0]], ptr [[ARGMEM]], align 4
+; P32-NEXT:    call void @test9_aux(ptr nonnull inalloca(<{ [[STRUCT_TYPE]] }>) [[ARGMEM]])
+; P32-NEXT:    ret void
+;
+; NODL-LABEL: @test9(
+; NODL-NEXT:  entry:
+; NODL-NEXT:    [[ARGMEM:%.*]] = alloca inalloca <{ [[STRUCT_TYPE:%.*]] }>, align 8
+; NODL-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A:%.*]], align 4
+; NODL-NEXT:    store i64 [[TMP0]], ptr [[ARGMEM]], align 8
+; NODL-NEXT:    call void @test9_aux(ptr nonnull inalloca(<{ [[STRUCT_TYPE]] }>) [[ARGMEM]])
+; NODL-NEXT:    ret void
 ;
 entry:
   %inalloca.save = call ptr @llvm.stacksave()

diff  --git a/llvm/test/Transforms/InstCombine/apint-shift.ll b/llvm/test/Transforms/InstCombine/apint-shift.ll
index cbe8ed993b40026..2d862ff6debd156 100644
--- a/llvm/test/Transforms/InstCombine/apint-shift.ll
+++ b/llvm/test/Transforms/InstCombine/apint-shift.ll
@@ -565,7 +565,7 @@ define i40 @test26(i40 %A) {
 define i177 @ossfuzz_9880(i177 %X) {
 ; CHECK-LABEL: @ossfuzz_9880(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i177, align 8
-; CHECK-NEXT:    [[L1:%.*]] = load i177, ptr [[A]], align 8
+; CHECK-NEXT:    [[L1:%.*]] = load i177, ptr [[A]], align 4
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i177 [[L1]], -1
 ; CHECK-NEXT:    [[B5_NEG:%.*]] = sext i1 [[TMP1]] to i177
 ; CHECK-NEXT:    [[B14:%.*]] = add i177 [[L1]], [[B5_NEG]]

diff  --git a/llvm/test/Transforms/InstCombine/assume-align.ll b/llvm/test/Transforms/InstCombine/assume-align.ll
index 80da69b2da3ed8a..798707f317d2999 100644
--- a/llvm/test/Transforms/InstCombine/assume-align.ll
+++ b/llvm/test/Transforms/InstCombine/assume-align.ll
@@ -56,10 +56,10 @@ define void @f2(ptr %a) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[TMP2]], 0
 ; CHECK-NEXT:    br i1 [[TMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
 ; CHECK:       if.then:
-; CHECK-NEXT:    store i64 16, ptr [[TMP0]], align 8
+; CHECK-NEXT:    store i64 16, ptr [[TMP0]], align 4
 ; CHECK-NEXT:    br label [[IF_END:%.*]]
 ; CHECK:       if.else:
-; CHECK-NEXT:    store i8 1, ptr [[TMP0]], align 8
+; CHECK-NEXT:    store i8 1, ptr [[TMP0]], align 1
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
 ; CHECK-NEXT:    ret void

diff  --git a/llvm/test/Transforms/InstCombine/assume-loop-align.ll b/llvm/test/Transforms/InstCombine/assume-loop-align.ll
index 24007bacd31ad39..e7eb18c61b6bb0e 100644
--- a/llvm/test/Transforms/InstCombine/assume-loop-align.ll
+++ b/llvm/test/Transforms/InstCombine/assume-loop-align.ll
@@ -22,10 +22,10 @@ define void @foo(ptr %a, ptr %b) #0 {
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 64
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
 ; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT:    store i32 [[ADD]], ptr [[ARRAYIDX5]], align 64
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[ARRAYIDX5]], align 4
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 16
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP1]], 1648

diff  --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll
index ee6d86610274bf7..52ce90e0515a500 100644
--- a/llvm/test/Transforms/InstCombine/assume.ll
+++ b/llvm/test/Transforms/InstCombine/assume.ll
@@ -7,12 +7,11 @@ target triple = "x86_64-unknown-linux-gnu"
 
 declare void @llvm.assume(i1) #1
 
-; Check that the alignment has been upgraded and that the assume has not
-; been removed:
+; Check that the assume has not been removed:
 
 define i32 @foo1(ptr %a) #0 {
 ; DEFAULT-LABEL: @foo1(
-; DEFAULT-NEXT:    [[T0:%.*]] = load i32, ptr [[A:%.*]], align 32
+; DEFAULT-NEXT:    [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4
 ; DEFAULT-NEXT:    [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
 ; DEFAULT-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
 ; DEFAULT-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
@@ -20,7 +19,7 @@ define i32 @foo1(ptr %a) #0 {
 ; DEFAULT-NEXT:    ret i32 [[T0]]
 ;
 ; BUNDLES-LABEL: @foo1(
-; BUNDLES-NEXT:    [[T0:%.*]] = load i32, ptr [[A:%.*]], align 32
+; BUNDLES-NEXT:    [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4
 ; BUNDLES-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 32) ]
 ; BUNDLES-NEXT:    ret i32 [[T0]]
 ;
@@ -40,12 +39,12 @@ define i32 @foo2(ptr %a) #0 {
 ; DEFAULT-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
 ; DEFAULT-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
 ; DEFAULT-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
-; DEFAULT-NEXT:    [[T0:%.*]] = load i32, ptr [[A]], align 32
+; DEFAULT-NEXT:    [[T0:%.*]] = load i32, ptr [[A]], align 4
 ; DEFAULT-NEXT:    ret i32 [[T0]]
 ;
 ; BUNDLES-LABEL: @foo2(
 ; BUNDLES-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[A:%.*]], i64 32) ]
-; BUNDLES-NEXT:    [[T0:%.*]] = load i32, ptr [[A]], align 32
+; BUNDLES-NEXT:    [[T0:%.*]] = load i32, ptr [[A]], align 4
 ; BUNDLES-NEXT:    ret i32 [[T0]]
 ;
   %ptrint = ptrtoint ptr %a to i64
@@ -266,7 +265,7 @@ define i32 @bundle2(ptr %P) {
 
 define i1 @nonnull1(ptr %a) {
 ; CHECK-LABEL: @nonnull1(
-; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8, !nonnull [[META6:![0-9]+]], !noundef [[META6]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8, !nonnull !6, !noundef !6
 ; CHECK-NEXT:    tail call void @escape(ptr nonnull [[LOAD]])
 ; CHECK-NEXT:    ret i1 false
 ;

diff  --git a/llvm/test/Transforms/InstCombine/assume_inevitable.ll b/llvm/test/Transforms/InstCombine/assume_inevitable.ll
index b86b84dba5232fe..2643c9b525cb587 100644
--- a/llvm/test/Transforms/InstCombine/assume_inevitable.ll
+++ b/llvm/test/Transforms/InstCombine/assume_inevitable.ll
@@ -3,15 +3,14 @@
 
 ; Check that assume is propagated backwards through all
 ; operations that are `isGuaranteedToTransferExecutionToSuccessor`
-; (it should reach the load and mark it as `align 32`).
 define i32 @assume_inevitable(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: @assume_inevitable(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[M:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A:%.*]], align 32
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A:%.*]], align 4
 ; CHECK-NEXT:    [[LOADRES:%.*]] = load i32, ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    [[LOADRES2:%.*]] = call i32 @llvm.annotation.i32.p0(i32 [[LOADRES]], ptr nonnull @.str, ptr nonnull @.str1, i32 2)
-; CHECK-NEXT:    store i32 [[LOADRES2]], ptr [[A]], align 32
+; CHECK-NEXT:    store i32 [[LOADRES2]], ptr [[A]], align 4
 ; CHECK-NEXT:    [[DUMMY_EQ:%.*]] = icmp ugt i32 [[LOADRES]], 42
 ; CHECK-NEXT:    tail call void @llvm.assume(i1 [[DUMMY_EQ]])
 ; CHECK-NEXT:    [[M_A:%.*]] = call ptr @llvm.ptr.annotation.p0.p0(ptr nonnull [[M]], ptr nonnull @.str, ptr nonnull @.str1, i32 2, ptr null)

diff  --git a/llvm/test/Transforms/InstCombine/atomic.ll b/llvm/test/Transforms/InstCombine/atomic.ll
index e4a3d0f8e1a7776..75ee53982dfe184 100644
--- a/llvm/test/Transforms/InstCombine/atomic.ll
+++ b/llvm/test/Transforms/InstCombine/atomic.ll
@@ -128,7 +128,7 @@ define i32 @test9() {
 
 define i32 @test9_no_null_opt() #0 {
 ; CHECK-LABEL: @test9_no_null_opt(
-; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr null unordered, align 4294967296
+; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr null unordered, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
   %x = load atomic i32, ptr null unordered, align 4
@@ -138,7 +138,7 @@ define i32 @test9_no_null_opt() #0 {
 ; FIXME: Could also fold
 define i32 @test10() {
 ; CHECK-LABEL: @test10(
-; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr null monotonic, align 4294967296
+; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr null monotonic, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
   %x = load atomic i32, ptr null monotonic, align 4
@@ -147,7 +147,7 @@ define i32 @test10() {
 
 define i32 @test10_no_null_opt() #0 {
 ; CHECK-LABEL: @test10_no_null_opt(
-; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr null monotonic, align 4294967296
+; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr null monotonic, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
   %x = load atomic i32, ptr null monotonic, align 4
@@ -157,7 +157,7 @@ define i32 @test10_no_null_opt() #0 {
 ; Would this be legal to fold?  Probably?
 define i32 @test11() {
 ; CHECK-LABEL: @test11(
-; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr null seq_cst, align 4294967296
+; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr null seq_cst, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
   %x = load atomic i32, ptr null seq_cst, align 4
@@ -166,7 +166,7 @@ define i32 @test11() {
 
 define i32 @test11_no_null_opt() #0 {
 ; CHECK-LABEL: @test11_no_null_opt(
-; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr null seq_cst, align 4294967296
+; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr null seq_cst, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
   %x = load atomic i32, ptr null seq_cst, align 4
@@ -177,7 +177,7 @@ define i32 @test11_no_null_opt() #0 {
 ; ordering imposed.
 define i32 @test12() {
 ; CHECK-LABEL: @test12(
-; CHECK-NEXT:    store atomic i32 poison, ptr null unordered, align 4294967296
+; CHECK-NEXT:    store atomic i32 poison, ptr null unordered, align 4
 ; CHECK-NEXT:    ret i32 0
 ;
   store atomic i32 0, ptr null unordered, align 4
@@ -186,7 +186,7 @@ define i32 @test12() {
 
 define i32 @test12_no_null_opt() #0 {
 ; CHECK-LABEL: @test12_no_null_opt(
-; CHECK-NEXT:    store atomic i32 0, ptr null unordered, align 4294967296
+; CHECK-NEXT:    store atomic i32 0, ptr null unordered, align 4
 ; CHECK-NEXT:    ret i32 0
 ;
   store atomic i32 0, ptr null unordered, align 4
@@ -196,7 +196,7 @@ define i32 @test12_no_null_opt() #0 {
 ; FIXME: Could also fold
 define i32 @test13() {
 ; CHECK-LABEL: @test13(
-; CHECK-NEXT:    store atomic i32 0, ptr null monotonic, align 4294967296
+; CHECK-NEXT:    store atomic i32 0, ptr null monotonic, align 4
 ; CHECK-NEXT:    ret i32 0
 ;
   store atomic i32 0, ptr null monotonic, align 4
@@ -205,7 +205,7 @@ define i32 @test13() {
 
 define i32 @test13_no_null_opt() #0 {
 ; CHECK-LABEL: @test13_no_null_opt(
-; CHECK-NEXT:    store atomic i32 0, ptr null monotonic, align 4294967296
+; CHECK-NEXT:    store atomic i32 0, ptr null monotonic, align 4
 ; CHECK-NEXT:    ret i32 0
 ;
   store atomic i32 0, ptr null monotonic, align 4
@@ -215,7 +215,7 @@ define i32 @test13_no_null_opt() #0 {
 ; Would this be legal to fold?  Probably?
 define i32 @test14() {
 ; CHECK-LABEL: @test14(
-; CHECK-NEXT:    store atomic i32 0, ptr null seq_cst, align 4294967296
+; CHECK-NEXT:    store atomic i32 0, ptr null seq_cst, align 4
 ; CHECK-NEXT:    ret i32 0
 ;
   store atomic i32 0, ptr null seq_cst, align 4
@@ -224,7 +224,7 @@ define i32 @test14() {
 
 define i32 @test14_no_null_opt() #0 {
 ; CHECK-LABEL: @test14_no_null_opt(
-; CHECK-NEXT:    store atomic i32 0, ptr null seq_cst, align 4294967296
+; CHECK-NEXT:    store atomic i32 0, ptr null seq_cst, align 4
 ; CHECK-NEXT:    ret i32 0
 ;
   store atomic i32 0, ptr null seq_cst, align 4

diff  --git a/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll b/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
index e59f7529c6722f0..30d5cd66066bb7f 100644
--- a/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
+++ b/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
@@ -185,7 +185,7 @@ define i32 @constant_fold_bitcast_itof_load() {
 
 define <4 x float> @constant_fold_bitcast_vector_as() {
 ; CHECK-LABEL: @constant_fold_bitcast_vector_as(
-; CHECK-NEXT:    [[A:%.*]] = load <4 x float>, ptr addrspace(3) @g_v4f_as3, align 16
+; CHECK-NEXT:    [[A:%.*]] = load <4 x float>, ptr addrspace(3) @g_v4f_as3, align 4
 ; CHECK-NEXT:    ret <4 x float> [[A]]
 ;
   %a = load <4 x float>, ptr addrspace(3) @g_v4f_as3, align 4
@@ -196,7 +196,7 @@ define <4 x float> @constant_fold_bitcast_vector_as() {
 
 define i32 @test_cast_gep_small_indices_as() {
 ; CHECK-LABEL: @test_cast_gep_small_indices_as(
-; CHECK-NEXT:    [[X:%.*]] = load i32, ptr addrspace(3) @i32_array_as3, align 16
+; CHECK-NEXT:    [[X:%.*]] = load i32, ptr addrspace(3) @i32_array_as3, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
   %x = load i32, ptr addrspace(3) @i32_array_as3, align 4
@@ -214,7 +214,7 @@ define i32 @test_cast_gep_small_indices_as() {
 
 define i32 @test_cast_gep_large_indices_as() {
 ; CHECK-LABEL: @test_cast_gep_large_indices_as(
-; CHECK-NEXT:    [[X:%.*]] = load i32, ptr addrspace(3) @i32_array_as3, align 16
+; CHECK-NEXT:    [[X:%.*]] = load i32, ptr addrspace(3) @i32_array_as3, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
   %x = load i32, ptr addrspace(3) @i32_array_as3, align 4
@@ -223,7 +223,7 @@ define i32 @test_cast_gep_large_indices_as() {
 
 define i32 @test_constant_cast_gep_struct_indices_as() {
 ; CHECK-LABEL: @test_constant_cast_gep_struct_indices_as(
-; CHECK-NEXT:    [[Y:%.*]] = load i32, ptr addrspace(3) getelementptr inbounds ([[STRUCT_FOO:%.*]], ptr addrspace(3) @constant_fold_global_ptr, i16 0, i32 2, i16 2), align 16
+; CHECK-NEXT:    [[Y:%.*]] = load i32, ptr addrspace(3) getelementptr inbounds ([[STRUCT_FOO:%.*]], ptr addrspace(3) @constant_fold_global_ptr, i16 0, i32 2, i16 2), align 4
 ; CHECK-NEXT:    ret i32 [[Y]]
 ;
   %x = getelementptr %struct.foo, ptr addrspace(3) @constant_fold_global_ptr, i18 0, i32 2, i12 2

diff  --git a/llvm/test/Transforms/InstCombine/constant-fold-gep.ll b/llvm/test/Transforms/InstCombine/constant-fold-gep.ll
index 80a18c0abfadc44..009c19dfa66cf93 100644
--- a/llvm/test/Transforms/InstCombine/constant-fold-gep.ll
+++ b/llvm/test/Transforms/InstCombine/constant-fold-gep.ll
@@ -11,26 +11,26 @@ target datalayout = "E-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-
 
 define void @frob() {
 ; CHECK-LABEL: @frob(
-; CHECK-NEXT:    store i32 1, ptr @Y, align 16
+; CHECK-NEXT:    store i32 1, ptr @Y, align 4
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 1), align 4
-; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 2), align 8
+; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 2), align 4
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 1, i64 0), align 4
-; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 1, i64 1), align 16
+; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 1, i64 1), align 4
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 1, i64 2), align 4
-; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 0, i64 0), align 8
+; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 0, i64 0), align 4
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 0, i64 1), align 4
-; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 0, i64 2), align 16
+; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 0, i64 2), align 4
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 1, i64 0), align 4
-; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 1, i64 1), align 8
+; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 1, i64 1), align 4
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 1, i64 2), align 4
-; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 0, i64 0), align 16
+; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 0, i64 0), align 4
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 0, i64 1), align 4
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 0, i64 2), align 8
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 1, i64 0), align 4
-; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 1, i64 1), align 16
+; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 1, i64 1), align 8
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 1, i64 2), align 4
 ; CHECK-NEXT:    store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 1, i64 0, i32 0, i64 0), align 8
-; CHECK-NEXT:    store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 2, i64 0, i32 0, i64 0), align 16
+; CHECK-NEXT:    store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 2, i64 0, i32 0, i64 0), align 8
 ; CHECK-NEXT:    store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 1, i64 0, i32 0, i64 1), align 8
 ; CHECK-NEXT:    ret void
 ;
@@ -97,25 +97,6 @@ entry:
   ret i16 %E
 }
 
-; Check that we improve the alignment information.
-; The base pointer is 16-byte aligned and we access the field at
-; an offset of 8-byte.
-; Every element in the @CallerInfos array is 16-byte aligned so
-; any access from the following gep is 8-byte aligned.
-%struct.CallerInfo = type { ptr, i32 }
- at CallerInfos = global [128 x %struct.CallerInfo] zeroinitializer, align 16
-
-define i32 @test_gep_in_struct(i64 %idx) {
-; CHECK-LABEL: @test_gep_in_struct(
-; CHECK-NEXT:    [[NS7:%.*]] = getelementptr inbounds [128 x %struct.CallerInfo], ptr @CallerInfos, i64 0, i64 [[IDX:%.*]], i32 1
-; CHECK-NEXT:    [[RES:%.*]] = load i32, ptr [[NS7]], align 8
-; CHECK-NEXT:    ret i32 [[RES]]
-;
-  %NS7 = getelementptr inbounds [128 x %struct.CallerInfo], ptr @CallerInfos, i64 0, i64 %idx, i32 1
-  %res = load i32, ptr %NS7, align 1
-  ret i32 %res
-}
-
 @g = external global i8
 @g2 = external global i8
 

diff  --git a/llvm/test/Transforms/InstCombine/dbg-scalable-store-fixed-frag.ll b/llvm/test/Transforms/InstCombine/dbg-scalable-store-fixed-frag.ll
index 5307ebe3993794d..a8a7ee4608f65e4 100644
--- a/llvm/test/Transforms/InstCombine/dbg-scalable-store-fixed-frag.ll
+++ b/llvm/test/Transforms/InstCombine/dbg-scalable-store-fixed-frag.ll
@@ -4,10 +4,10 @@
 define i32 @foo(<vscale x 2 x i32> %x) {
 ; CHECK-LABEL: @foo(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARR:%.*]] = alloca i32, align 8
+; CHECK-NEXT:    [[ARR:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    call void @llvm.dbg.value(metadata <vscale x 2 x i32> undef, metadata [[META8:![0-9]+]], metadata !DIExpression()), !dbg [[DBG14:![0-9]+]]
-; CHECK-NEXT:    store <vscale x 2 x i32> [[X:%.*]], ptr [[ARR]], align 8
-; CHECK-NEXT:    [[RES:%.*]] = load i32, ptr [[ARR]], align 8
+; CHECK-NEXT:    store <vscale x 2 x i32> [[X:%.*]], ptr [[ARR]], align 4
+; CHECK-NEXT:    [[RES:%.*]] = load i32, ptr [[ARR]], align 4
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
 entry:
@@ -21,10 +21,10 @@ entry:
 define i32 @foo2(<vscale x 2 x i32> %x) {
 ; CHECK-LABEL: @foo2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ARR:%.*]] = alloca [4 x i32], align 8
+; CHECK-NEXT:    [[ARR:%.*]] = alloca [4 x i32], align 4
 ; CHECK-NEXT:    call void @llvm.dbg.declare(metadata ptr [[ARR]], metadata [[META15:![0-9]+]], metadata !DIExpression()), !dbg [[DBG17:![0-9]+]]
-; CHECK-NEXT:    store <vscale x 2 x i32> [[X:%.*]], ptr [[ARR]], align 8
-; CHECK-NEXT:    [[RES:%.*]] = load i32, ptr [[ARR]], align 8
+; CHECK-NEXT:    store <vscale x 2 x i32> [[X:%.*]], ptr [[ARR]], align 4
+; CHECK-NEXT:    [[RES:%.*]] = load i32, ptr [[ARR]], align 4
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
 entry:

diff  --git a/llvm/test/Transforms/InstCombine/fcmp-denormals-are-zero.ll b/llvm/test/Transforms/InstCombine/fcmp-denormals-are-zero.ll
index 216d06c7be81e04..22c422c81603955 100644
--- a/llvm/test/Transforms/InstCombine/fcmp-denormals-are-zero.ll
+++ b/llvm/test/Transforms/InstCombine/fcmp-denormals-are-zero.ll
@@ -8,13 +8,13 @@
 define void @denormal_input_preserve_sign_fcmp_olt_smallest_normalized(float %f32, double %f64, half %f16) #0 {
 ; CHECK-LABEL: @denormal_input_preserve_sign_fcmp_olt_smallest_normalized(
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp oeq float [[F32:%.*]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp oeq double [[F64:%.*]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp oeq half [[F16:%.*]], 0xH0000
-; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF32_FLAGS:%.*]] = fcmp oeq float [[F32]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF32_FLAGS]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF32_FLAGS]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call float @llvm.fabs.f32(float %f32)
@@ -41,11 +41,11 @@ define void @denormal_input_preserve_sign_fcmp_olt_smallest_normalized(float %f3
 define void @denormal_input_preserve_sign_fcmp_uge_smallest_normalized(float %f32, double %f64, half %f16) #0 {
 ; CHECK-LABEL: @denormal_input_preserve_sign_fcmp_uge_smallest_normalized(
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp une float [[F32:%.*]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp une double [[F64:%.*]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp une half [[F16:%.*]], 0xH0000
-; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call float @llvm.fabs.f32(float %f32)
@@ -67,11 +67,11 @@ define void @denormal_input_preserve_sign_fcmp_uge_smallest_normalized(float %f3
 define void @denormal_input_preserve_sign_fcmp_oge_smallest_normalized(float %f32, double %f64, half %f16) #0 {
 ; CHECK-LABEL: @denormal_input_preserve_sign_fcmp_oge_smallest_normalized(
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp one float [[F32:%.*]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp one double [[F64:%.*]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp one half [[F16:%.*]], 0xH0000
-; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call float @llvm.fabs.f32(float %f32)
@@ -93,11 +93,11 @@ define void @denormal_input_preserve_sign_fcmp_oge_smallest_normalized(float %f3
 define void @denormal_input_preserve_sign_fcmp_ult_smallest_normalized(float %f32, double %f64, half %f16) #0 {
 ; CHECK-LABEL: @denormal_input_preserve_sign_fcmp_ult_smallest_normalized(
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp ueq float [[F32:%.*]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp ueq double [[F64:%.*]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp ueq half [[F16:%.*]], 0xH0000
-; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call float @llvm.fabs.f32(float %f32)
@@ -117,11 +117,11 @@ define void @denormal_input_preserve_sign_fcmp_ult_smallest_normalized(float %f3
 define void @denormal_input_preserve_sign_vector_fcmp_olt_smallest_normalized(<2 x float> %f32, <2 x double> %f64, <2 x half> %f16) #0 {
 ; CHECK-LABEL: @denormal_input_preserve_sign_vector_fcmp_olt_smallest_normalized(
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp oeq <2 x float> [[F32:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp oeq <2 x double> [[F64:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp oeq <2 x half> [[F16:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %f32)
@@ -141,11 +141,11 @@ define void @denormal_input_preserve_sign_vector_fcmp_olt_smallest_normalized(<2
 define void @denormal_input_preserve_sign_vector_fcmp_uge_smallest_normalized(<2 x float> %f32, <2 x double> %f64, <2 x half> %f16) #0 {
 ; CHECK-LABEL: @denormal_input_preserve_sign_vector_fcmp_uge_smallest_normalized(
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp une <2 x float> [[F32:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp une <2 x double> [[F64:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp une <2 x half> [[F16:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %f32)
@@ -165,11 +165,11 @@ define void @denormal_input_preserve_sign_vector_fcmp_uge_smallest_normalized(<2
 define void @denormal_input_preserve_sign_vector_fcmp_oge_smallest_normalized(<2 x float> %f32, <2 x double> %f64, <2 x half> %f16) #0 {
 ; CHECK-LABEL: @denormal_input_preserve_sign_vector_fcmp_oge_smallest_normalized(
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp one <2 x float> [[F32:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp one <2 x double> [[F64:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp one <2 x half> [[F16:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %f32)
@@ -189,11 +189,11 @@ define void @denormal_input_preserve_sign_vector_fcmp_oge_smallest_normalized(<2
 define void @denormal_input_preserve_sign_vector_fcmp_ult_smallest_normalized(<2 x float> %f32, <2 x double> %f64, <2 x half> %f16) #0 {
 ; CHECK-LABEL: @denormal_input_preserve_sign_vector_fcmp_ult_smallest_normalized(
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp ueq <2 x float> [[F32:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp ueq <2 x double> [[F64:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp ueq <2 x half> [[F16:%.*]], zeroinitializer
-; CHECK-NEXT:    store volatile <2 x i1> [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile <2 x i1> [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %f32)
@@ -215,11 +215,11 @@ define void @denormal_input_preserve_sign_vector_fcmp_ult_smallest_normalized(<2
 define void @denormal_input_positive_zero_fcmp_olt_smallest_normalized(float %f32, double %f64, half %f16) #1 {
 ; CHECK-LABEL: @denormal_input_positive_zero_fcmp_olt_smallest_normalized(
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp oeq float [[F32:%.*]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp oeq double [[F64:%.*]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp oeq half [[F16:%.*]], 0xH0000
-; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call float @llvm.fabs.f32(float %f32)
@@ -241,13 +241,13 @@ define void @denormal_input_ieee(float %f32, double %f64, half %f16) #2 {
 ; CHECK-LABEL: @denormal_input_ieee(
 ; CHECK-NEXT:    [[F32_FABS:%.*]] = call float @llvm.fabs.f32(float [[F32:%.*]])
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp olt float [[F32_FABS]], 0x3810000000000000
-; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[F64_FABS:%.*]] = call double @llvm.fabs.f64(double [[F64:%.*]])
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp olt double [[F64_FABS]], 0x10000000000000
-; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[F16_FABS:%.*]] = call half @llvm.fabs.f16(half [[F16:%.*]])
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp olt half [[F16_FABS]], 0xH0400
-; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call float @llvm.fabs.f32(float %f32)
@@ -268,13 +268,13 @@ define void @denormal_input_ieee(float %f32, double %f64, half %f16) #2 {
 define void @denormal_input_preserve_sign_f32_only(float %f32, double %f64, half %f16) #3 {
 ; CHECK-LABEL: @denormal_input_preserve_sign_f32_only(
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp oeq float [[F32:%.*]], 0.000000e+00
-; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[F64_FABS:%.*]] = call double @llvm.fabs.f64(double [[F64:%.*]])
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp olt double [[F64_FABS]], 0x10000000000000
-; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[F16_FABS:%.*]] = call half @llvm.fabs.f16(half [[F16:%.*]])
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp olt half [[F16_FABS]], 0xH0400
-; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call float @llvm.fabs.f32(float %f32)
@@ -295,13 +295,13 @@ define void @wrong_fcmp_type_ole(float %f32, double %f64, half %f16) #0 {
 ; CHECK-LABEL: @wrong_fcmp_type_ole(
 ; CHECK-NEXT:    [[F32_FABS:%.*]] = call float @llvm.fabs.f32(float [[F32:%.*]])
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp ole float [[F32_FABS]], 0x3810000000000000
-; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[F64_FABS:%.*]] = call double @llvm.fabs.f64(double [[F64:%.*]])
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp ole double [[F64_FABS]], 0x10000000000000
-; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[F16_FABS:%.*]] = call half @llvm.fabs.f16(half [[F16:%.*]])
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp ole half [[F16_FABS]], 0xH0400
-; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %f32.fabs = call float @llvm.fabs.f32(float %f32)
@@ -321,11 +321,11 @@ define void @wrong_fcmp_type_ole(float %f32, double %f64, half %f16) #0 {
 define void @missing_fabs(float %f32, double %f64, half %f16) #0 {
 ; CHECK-LABEL: @missing_fabs(
 ; CHECK-NEXT:    [[CMPF32:%.*]] = fcmp olt float [[F32:%.*]], 0x3810000000000000
-; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF32]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF64:%.*]] = fcmp olt double [[F64:%.*]], 0x10000000000000
-; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF64]], ptr @var, align 1
 ; CHECK-NEXT:    [[CMPF16:%.*]] = fcmp olt half [[F16:%.*]], 0xH0400
-; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 4
+; CHECK-NEXT:    store volatile i1 [[CMPF16]], ptr @var, align 1
 ; CHECK-NEXT:    ret void
 ;
   %cmpf32 = fcmp olt float %f32, 0x3810000000000000

diff  --git a/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll b/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
index f4be83ace0c0e78..15eb3e15ea44ae8 100644
--- a/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
+++ b/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
@@ -16,8 +16,8 @@ define void @bork() nounwind  {
 ; CHECK-LABEL: @bork(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[COLOR:%.*]] = alloca ptr, align 8
-; CHECK-NEXT:    [[TMP103:%.*]] = load ptr, ptr [[COLOR]], align 8
-; CHECK-NEXT:    [[TMP105:%.*]] = load ptr, ptr @"\01L_OBJC_SELECTOR_REFERENCES_81", align 8
+; CHECK-NEXT:    [[TMP103:%.*]] = load ptr, ptr [[COLOR]], align 4
+; CHECK-NEXT:    [[TMP105:%.*]] = load ptr, ptr @"\01L_OBJC_SELECTOR_REFERENCES_81", align 4
 ; CHECK-NEXT:    [[TMP107:%.*]] = call float @objc_msgSend_fpret(ptr [[TMP103]], ptr [[TMP105]]) #[[ATTR0:[0-9]+]]
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:

diff  --git a/llvm/test/Transforms/InstCombine/gep-custom-dl.ll b/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
index e80f1b242a52cce..41285c78f03edbe 100644
--- a/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
@@ -62,7 +62,7 @@ define void @test_evaluate_gep_nested_as_ptrs(ptr addrspace(2) %B) {
 
 define void @test_evaluate_gep_as_ptrs_array(ptr addrspace(2) %B) {
 ; CHECK-LABEL: @test_evaluate_gep_as_ptrs_array(
-; CHECK-NEXT:    store ptr addrspace(2) [[B:%.*]], ptr addrspace(1) getelementptr inbounds ([4 x ptr addrspace(2)], ptr addrspace(1) @arst, i32 0, i32 2), align 16
+; CHECK-NEXT:    store ptr addrspace(2) [[B:%.*]], ptr addrspace(1) getelementptr inbounds ([4 x ptr addrspace(2)], ptr addrspace(1) @arst, i32 0, i32 2), align 8
 ; CHECK-NEXT:    ret void
 ;
 

diff  --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index 9046cb6b4529eef..fb6147b68897519 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -91,7 +91,7 @@ define void @test5_as1(i8 %B) {
 ; This should be turned into a constexpr instead of being an instruction
 define void @test_evaluate_gep_nested_as_ptrs(ptr addrspace(2) %B) {
 ; CHECK-LABEL: @test_evaluate_gep_nested_as_ptrs(
-; CHECK-NEXT:    store ptr addrspace(2) [[B:%.*]], ptr addrspace(1) @global_as1_as2_ptr, align 8
+; CHECK-NEXT:    store ptr addrspace(2) [[B:%.*]], ptr addrspace(1) @global_as1_as2_ptr, align 4
 ; CHECK-NEXT:    ret void
 ;
   store ptr addrspace(2) %B, ptr addrspace(1) @global_as1_as2_ptr
@@ -458,7 +458,7 @@ define i32 @test20_as1(ptr addrspace(1) %P, i32 %A, i32 %B) {
 define i32 @test21() {
 ; CHECK-LABEL: @test21(
 ; CHECK-NEXT:    [[PBOB1:%.*]] = alloca [[INTSTRUCT:%.*]], align 8
-; CHECK-NEXT:    [[RVAL:%.*]] = load i32, ptr [[PBOB1]], align 8
+; CHECK-NEXT:    [[RVAL:%.*]] = load i32, ptr [[PBOB1]], align 4
 ; CHECK-NEXT:    ret i32 [[RVAL]]
 ;
   %pbob1 = alloca %intstruct
@@ -668,11 +668,11 @@ define i1 @test31(ptr %A) {
 define ptr @test32(ptr %v) {
 ; CHECK-LABEL: @test32(
 ; CHECK-NEXT:    [[A:%.*]] = alloca [4 x ptr], align 16
-; CHECK-NEXT:    store ptr null, ptr [[A]], align 16
+; CHECK-NEXT:    store ptr null, ptr [[A]], align 8
 ; CHECK-NEXT:    [[D:%.*]] = getelementptr inbounds { [16 x i8] }, ptr [[A]], i64 0, i32 0, i64 8
 ; CHECK-NEXT:    store ptr [[V:%.*]], ptr [[D]], align 8
 ; CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [4 x ptr], ptr [[A]], i64 0, i64 2
-; CHECK-NEXT:    [[G:%.*]] = load ptr, ptr [[F]], align 16
+; CHECK-NEXT:    [[G:%.*]] = load ptr, ptr [[F]], align 8
 ; CHECK-NEXT:    ret ptr [[G]]
 ;
   %A = alloca [4 x ptr], align 16

diff  --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll
index 7fb6e7a3a37b388..56f6e042b3cafdb 100644
--- a/llvm/test/Transforms/InstCombine/load-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/load-cmp.ll
@@ -216,7 +216,7 @@ define i1 @test10_struct(i32 %x) {
 define i1 @test10_struct_noinbounds(i32 %x) {
 ; CHECK-LABEL: @test10_struct_noinbounds(
 ; CHECK-NEXT:    [[P:%.*]] = getelementptr [[FOO:%.*]], ptr @GS, i32 [[X:%.*]], i32 0
-; CHECK-NEXT:    [[Q:%.*]] = load i32, ptr [[P]], align 8
+; CHECK-NEXT:    [[Q:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[Q]], 9
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -254,7 +254,7 @@ define i1 @test10_struct_noinbounds_i16(i16 %x) {
 ; CHECK-LABEL: @test10_struct_noinbounds_i16(
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32
 ; CHECK-NEXT:    [[P:%.*]] = getelementptr [[FOO:%.*]], ptr @GS, i32 [[TMP1]], i32 0
-; CHECK-NEXT:    [[Q:%.*]] = load i32, ptr [[P]], align 8
+; CHECK-NEXT:    [[Q:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[Q]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/load-combine-metadata-dominance.ll b/llvm/test/Transforms/InstCombine/load-combine-metadata-dominance.ll
index 07a15d01cf43c3f..13dfc4a59877a2e 100644
--- a/llvm/test/Transforms/InstCombine/load-combine-metadata-dominance.ll
+++ b/llvm/test/Transforms/InstCombine/load-combine-metadata-dominance.ll
@@ -125,7 +125,7 @@ define void @combine_metadata_dominance6(ptr %p) {
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    [[A:%.*]] = load ptr, ptr [[P]], align 8, !align !2, !noundef !0
-; CHECK-NEXT:    store i32 0, ptr [[A]], align 8
+; CHECK-NEXT:    store i32 0, ptr [[A]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:

diff  --git a/llvm/test/Transforms/InstCombine/load.ll b/llvm/test/Transforms/InstCombine/load.ll
index 0da1918018f867f..fb6c50d5fd8fcad 100644
--- a/llvm/test/Transforms/InstCombine/load.ll
+++ b/llvm/test/Transforms/InstCombine/load.ll
@@ -175,9 +175,9 @@ define <16 x i8> @test13(<2 x i64> %x) {
 define i8 @test14(i8 %x, i32 %y) {
 ; CHECK-LABEL: @test14(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i8 [[X:%.*]], ptr [[A]], align 4
+; CHECK-NEXT:    store i8 [[X:%.*]], ptr [[A]], align 1
 ; CHECK-NEXT:    store i32 [[Y:%.*]], ptr [[A]], align 4
-; CHECK-NEXT:    [[R:%.*]] = load i8, ptr [[A]], align 4
+; CHECK-NEXT:    [[R:%.*]] = load i8, ptr [[A]], align 1
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a = alloca i32
@@ -193,9 +193,9 @@ define i8 @test14(i8 %x, i32 %y) {
 
 define i8 @test15(i8 %x, i32 %y) {
 ; CHECK-LABEL: @test15(
-; CHECK-NEXT:    store i8 [[X:%.*]], ptr @test15_global, align 4
+; CHECK-NEXT:    store i8 [[X:%.*]], ptr @test15_global, align 1
 ; CHECK-NEXT:    store i32 [[Y:%.*]], ptr @test15_global, align 4
-; CHECK-NEXT:    [[R:%.*]] = load i8, ptr @test15_global, align 4
+; CHECK-NEXT:    [[R:%.*]] = load i8, ptr @test15_global, align 1
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   store i8 %x, ptr @test15_global
@@ -420,7 +420,7 @@ define i32 @load_via_strip_invariant_group() {
 
 define i4 @test_vector_load_i4_non_byte_sized() {
 ; CHECK-LABEL: @test_vector_load_i4_non_byte_sized(
-; CHECK-NEXT:    [[RES0:%.*]] = load i4, ptr @foo, align 8
+; CHECK-NEXT:    [[RES0:%.*]] = load i4, ptr @foo, align 1
 ; CHECK-NEXT:    ret i4 [[RES0]]
 ;
   %ptr0 = getelementptr i8, ptr @foo, i64 0

diff  --git a/llvm/test/Transforms/InstCombine/loadstore-alignment.ll b/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
index c71135fb996370c..0fc82a1d5343699 100644
--- a/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
+++ b/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
@@ -9,7 +9,7 @@ target datalayout = "E-p:64:64:64-p1:64:64:64-p2:32:32:32-a0:0:8-f32:32:32-f64:6
 
 define <2 x i64> @static_hem() {
 ; CHECK-LABEL: @static_hem(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (<2 x i64>, ptr @x, i64 7), align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (<2 x i64>, ptr @x, i64 7), align 1
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
   %t = getelementptr <2 x i64>, ptr @x, i32 7
@@ -21,7 +21,7 @@ define <2 x i64> @hem(i32 %i) {
 ; CHECK-LABEL: @hem(
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
 ; CHECK-NEXT:    [[T:%.*]] = getelementptr <2 x i64>, ptr @x, i64 [[TMP1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[T]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[T]], align 1
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
   %t = getelementptr <2 x i64>, ptr @x, i32 %i
@@ -34,7 +34,7 @@ define <2 x i64> @hem_2d(i32 %i, i32 %j) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[J:%.*]] to i64
 ; CHECK-NEXT:    [[T:%.*]] = getelementptr [13 x <2 x i64>], ptr @xx, i64 [[TMP1]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[T]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[T]], align 1
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
   %t = getelementptr [13 x <2 x i64>], ptr @xx, i32 %i, i32 %j
@@ -44,7 +44,7 @@ define <2 x i64> @hem_2d(i32 %i, i32 %j) {
 
 define <2 x i64> @foo() {
 ; CHECK-LABEL: @foo(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @x, align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @x, align 1
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
   %tmp1 = load <2 x i64>, ptr @x, align 1
@@ -55,7 +55,7 @@ define <2 x i64> @bar() {
 ; CHECK-LABEL: @bar(
 ; CHECK-NEXT:    [[T:%.*]] = alloca <2 x i64>, align 16
 ; CHECK-NEXT:    call void @kip(ptr nonnull [[T]])
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[T]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[T]], align 1
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
   %t = alloca <2 x i64>
@@ -66,7 +66,7 @@ define <2 x i64> @bar() {
 
 define void @static_hem_store(<2 x i64> %y) {
 ; CHECK-LABEL: @static_hem_store(
-; CHECK-NEXT:    store <2 x i64> [[Y:%.*]], ptr getelementptr (<2 x i64>, ptr @x, i64 7), align 16
+; CHECK-NEXT:    store <2 x i64> [[Y:%.*]], ptr getelementptr (<2 x i64>, ptr @x, i64 7), align 1
 ; CHECK-NEXT:    ret void
 ;
   %t = getelementptr <2 x i64>, ptr @x, i32 7
@@ -78,7 +78,7 @@ define void @hem_store(i32 %i, <2 x i64> %y) {
 ; CHECK-LABEL: @hem_store(
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
 ; CHECK-NEXT:    [[T:%.*]] = getelementptr <2 x i64>, ptr @x, i64 [[TMP1]]
-; CHECK-NEXT:    store <2 x i64> [[Y:%.*]], ptr [[T]], align 16
+; CHECK-NEXT:    store <2 x i64> [[Y:%.*]], ptr [[T]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %t = getelementptr <2 x i64>, ptr @x, i32 %i
@@ -91,7 +91,7 @@ define void @hem_2d_store(i32 %i, i32 %j, <2 x i64> %y) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[J:%.*]] to i64
 ; CHECK-NEXT:    [[T:%.*]] = getelementptr [13 x <2 x i64>], ptr @xx, i64 [[TMP1]], i64 [[TMP2]]
-; CHECK-NEXT:    store <2 x i64> [[Y:%.*]], ptr [[T]], align 16
+; CHECK-NEXT:    store <2 x i64> [[Y:%.*]], ptr [[T]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %t = getelementptr [13 x <2 x i64>], ptr @xx, i32 %i, i32 %j
@@ -101,7 +101,7 @@ define void @hem_2d_store(i32 %i, i32 %j, <2 x i64> %y) {
 
 define void @foo_store(<2 x i64> %y) {
 ; CHECK-LABEL: @foo_store(
-; CHECK-NEXT:    store <2 x i64> [[Y:%.*]], ptr @x, align 16
+; CHECK-NEXT:    store <2 x i64> [[Y:%.*]], ptr @x, align 1
 ; CHECK-NEXT:    ret void
 ;
   store <2 x i64> %y, ptr @x, align 1
@@ -112,7 +112,7 @@ define void @bar_store(<2 x i64> %y) {
 ; CHECK-LABEL: @bar_store(
 ; CHECK-NEXT:    [[T:%.*]] = alloca <2 x i64>, align 16
 ; CHECK-NEXT:    call void @kip(ptr nonnull [[T]])
-; CHECK-NEXT:    store <2 x i64> [[Y:%.*]], ptr [[T]], align 16
+; CHECK-NEXT:    store <2 x i64> [[Y:%.*]], ptr [[T]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %t = alloca <2 x i64>

diff  --git a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
index 349673974b8faae..ea9b16e1382ee90 100644
--- a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
+++ b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
@@ -10,7 +10,7 @@ define float @test1(i32 %hash, float %x, float %y, float %z, float %w) {
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], 124
 ; CHECK-NEXT:    [[TMP0:%.*]] = zext i32 [[TMP5]] to i64
 ; CHECK-NEXT:    [[TMP753:%.*]] = getelementptr [128 x float], ptr @C.0.1248, i64 0, i64 [[TMP0]]
-; CHECK-NEXT:    [[TMP9:%.*]] = load float, ptr [[TMP753]], align 16
+; CHECK-NEXT:    [[TMP9:%.*]] = load float, ptr [[TMP753]], align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = fmul float [[TMP9]], [[X:%.*]]
 ; CHECK-NEXT:    [[TMP13:%.*]] = fadd float [[TMP11]], 0.000000e+00
 ; CHECK-NEXT:    [[TMP17_SUM52:%.*]] = or i32 [[TMP5]], 1
@@ -22,7 +22,7 @@ define float @test1(i32 %hash, float %x, float %y, float %z, float %w) {
 ; CHECK-NEXT:    [[TMP27_SUM50:%.*]] = or i32 [[TMP5]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP27_SUM50]] to i64
 ; CHECK-NEXT:    [[TMP2849:%.*]] = getelementptr [128 x float], ptr @C.0.1248, i64 0, i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP29:%.*]] = load float, ptr [[TMP2849]], align 8
+; CHECK-NEXT:    [[TMP29:%.*]] = load float, ptr [[TMP2849]], align 4
 ; CHECK-NEXT:    [[TMP31:%.*]] = fmul float [[TMP29]], [[Z:%.*]]
 ; CHECK-NEXT:    [[TMP33:%.*]] = fadd float [[TMP31]], [[TMP23]]
 ; CHECK-NEXT:    [[TMP37_SUM48:%.*]] = or i32 [[TMP5]], 3

diff  --git a/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll b/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll
index e60d80cdf2da770..b35fceef372c663 100644
--- a/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll
+++ b/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll
@@ -30,12 +30,11 @@ define void @_Z4testv() {
 ; CHECK-NEXT:    [[I11:%.*]] = trunc i64 [[I7]] to i32
 ; CHECK-NEXT:    br label [[BB12]]
 ; CHECK:       bb12:
-; CHECK-NEXT:    [[STOREMERGE1:%.*]] = phi i32 [ [[I11]], [[BB10]] ], [ 1, [[BB9]] ]
 ; CHECK-NEXT:    [[STOREMERGE:%.*]] = phi i32 [ 1, [[BB9]] ], [ [[I11]], [[BB10]] ]
-; CHECK-NEXT:    store i32 [[STOREMERGE1]], ptr @arr_2, align 4
+; CHECK-NEXT:    store i32 [[STOREMERGE]], ptr @arr_2, align 4
 ; CHECK-NEXT:    store i16 [[I4]], ptr @arr_4, align 2
 ; CHECK-NEXT:    [[I8:%.*]] = sext i16 [[I4]] to i32
-; CHECK-NEXT:    store i32 [[I8]], ptr @arr_3, align 16
+; CHECK-NEXT:    store i32 [[I8]], ptr @arr_3, align 4
 ; CHECK-NEXT:    store i32 [[STOREMERGE]], ptr getelementptr inbounds ([0 x i32], ptr @arr_2, i64 0, i64 1), align 4
 ; CHECK-NEXT:    store i16 [[I4]], ptr getelementptr inbounds ([0 x i16], ptr @arr_4, i64 0, i64 1), align 2
 ; CHECK-NEXT:    store i32 [[I8]], ptr getelementptr inbounds ([8 x i32], ptr @arr_3, i64 0, i64 1), align 4
@@ -109,13 +108,13 @@ define i32 @
diff _types_
diff _width_no_merge(i1 %cond, i32 %a, i64 %b) {
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca i64, align 8
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
 ; CHECK:       A:
-; CHECK-NEXT:    store i32 [[A:%.*]], ptr [[ALLOCA]], align 8
+; CHECK-NEXT:    store i32 [[A:%.*]], ptr [[ALLOCA]], align 4
 ; CHECK-NEXT:    br label [[SINK:%.*]]
 ; CHECK:       B:
-; CHECK-NEXT:    store i64 [[B:%.*]], ptr [[ALLOCA]], align 8
+; CHECK-NEXT:    store i64 [[B:%.*]], ptr [[ALLOCA]], align 4
 ; CHECK-NEXT:    br label [[SINK]]
 ; CHECK:       sink:
-; CHECK-NEXT:    [[VAL:%.*]] = load i32, ptr [[ALLOCA]], align 8
+; CHECK-NEXT:    [[VAL:%.*]] = load i32, ptr [[ALLOCA]], align 4
 ; CHECK-NEXT:    ret i32 [[VAL]]
 ;
 entry:
@@ -135,10 +134,10 @@ sink:
 define <4 x i32> @vec_no_merge(i1 %cond, <2 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: @vec_no_merge(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca i64, align 16
+; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca i64, align 8
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
 ; CHECK:       A:
-; CHECK-NEXT:    store <2 x i32> [[A:%.*]], ptr [[ALLOCA]], align 16
+; CHECK-NEXT:    store <2 x i32> [[A:%.*]], ptr [[ALLOCA]], align 8
 ; CHECK-NEXT:    br label [[SINK:%.*]]
 ; CHECK:       B:
 ; CHECK-NEXT:    store <4 x i32> [[B:%.*]], ptr [[ALLOCA]], align 16
@@ -199,13 +198,13 @@ define %struct.tup @multi_elem_struct_no_merge(i1 %cond, %struct.tup %a, half %b
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca i64, align 8
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
 ; CHECK:       A:
-; CHECK-NEXT:    store [[STRUCT_TUP:%.*]] [[A:%.*]], ptr [[ALLOCA]], align 8
+; CHECK-NEXT:    store [[STRUCT_TUP:%.*]] [[A:%.*]], ptr [[ALLOCA]], align 4
 ; CHECK-NEXT:    br label [[SINK:%.*]]
 ; CHECK:       B:
-; CHECK-NEXT:    store half [[B:%.*]], ptr [[ALLOCA]], align 8
+; CHECK-NEXT:    store half [[B:%.*]], ptr [[ALLOCA]], align 2
 ; CHECK-NEXT:    br label [[SINK]]
 ; CHECK:       sink:
-; CHECK-NEXT:    [[VAL:%.*]] = load [[STRUCT_TUP]], ptr [[ALLOCA]], align 8
+; CHECK-NEXT:    [[VAL:%.*]] = load [[STRUCT_TUP]], ptr [[ALLOCA]], align 4
 ; CHECK-NEXT:    ret [[STRUCT_TUP]] [[VAL]]
 ;
 entry:
@@ -234,7 +233,7 @@ define i16 @same_types_
diff _align_no_merge(i1 %cond, i16 %a, i16 %b) {
 ; CHECK-NEXT:    store i16 [[B:%.*]], ptr [[ALLOCA]], align 4
 ; CHECK-NEXT:    br label [[SINK]]
 ; CHECK:       sink:
-; CHECK-NEXT:    [[VAL:%.*]] = load i16, ptr [[ALLOCA]], align 4
+; CHECK-NEXT:    [[VAL:%.*]] = load i16, ptr [[ALLOCA]], align 2
 ; CHECK-NEXT:    ret i16 [[VAL]]
 ;
 entry:
@@ -254,15 +253,17 @@ sink:
 define i64 @ptrtoint_merge(i1 %cond, i64 %a, ptr %b) {
 ; CHECK-LABEL: @ptrtoint_merge(
 ; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca ptr, align 8
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[BB0:%.*]], label [[BB1:%.*]]
 ; CHECK:       BB0:
+; CHECK-NEXT:    store i64 [[A:%.*]], ptr [[ALLOCA]], align 4
 ; CHECK-NEXT:    br label [[SINK:%.*]]
 ; CHECK:       BB1:
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B:%.*]] to i64
+; CHECK-NEXT:    store ptr [[B:%.*]], ptr [[ALLOCA]], align 8
 ; CHECK-NEXT:    br label [[SINK]]
 ; CHECK:       sink:
-; CHECK-NEXT:    [[STOREMERGE:%.*]] = phi i64 [ [[A:%.*]], [[BB0]] ], [ [[TMP0]], [[BB1]] ]
-; CHECK-NEXT:    ret i64 [[STOREMERGE]]
+; CHECK-NEXT:    [[VAL:%.*]] = load i64, ptr [[ALLOCA]], align 4
+; CHECK-NEXT:    ret i64 [[VAL]]
 ;
 entry:
   %alloca = alloca ptr

diff  --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll
index 81bed68139c3f3c..bb8e37b4cfb008d 100644
--- a/llvm/test/Transforms/InstCombine/phi.ll
+++ b/llvm/test/Transforms/InstCombine/phi.ll
@@ -508,8 +508,8 @@ define i32 @test16(ptr addrspace(1) %pointer1, i32 %flag, ptr %pointer2)
 ; CHECK-LABEL: @test16(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[POINTER1_ADDR:%.*]] = alloca ptr addrspace(1), align 8
-; CHECK-NEXT:    [[POINTER2_ADDR:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    [[POINTER1_ADDR:%.*]] = alloca ptr addrspace(1), align 4
+; CHECK-NEXT:    [[POINTER2_ADDR:%.*]] = alloca ptr, align 4
 ; CHECK-NEXT:    store ptr addrspace(1) [[POINTER1:%.*]], ptr [[POINTER1_ADDR]], align 8
 ; CHECK-NEXT:    store ptr [[POINTER2:%.*]], ptr [[POINTER2_ADDR]], align 8
 ; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[FLAG:%.*]], 0

diff  --git a/llvm/test/Transforms/InstCombine/pr33689_same_bitwidth.ll b/llvm/test/Transforms/InstCombine/pr33689_same_bitwidth.ll
index 465c1db0e0ce150..4c569993370b480 100644
--- a/llvm/test/Transforms/InstCombine/pr33689_same_bitwidth.ll
+++ b/llvm/test/Transforms/InstCombine/pr33689_same_bitwidth.ll
@@ -20,9 +20,9 @@ define void @f(i1 %cond) {
 ; CHECK:       bb2:
 ; CHECK-NEXT:    [[T9:%.*]] = load ptr, ptr @b, align 2
 ; CHECK-NEXT:    store i16 0, ptr [[T9]], align 2
-; CHECK-NEXT:    [[T10:%.*]] = load i32, ptr [[T1]], align 8
+; CHECK-NEXT:    [[T10:%.*]] = load i32, ptr [[T1]], align 4
 ; CHECK-NEXT:    [[T11:%.*]] = add i32 [[T10]], -1
-; CHECK-NEXT:    store i32 [[T11]], ptr [[T1]], align 8
+; CHECK-NEXT:    store i32 [[T11]], ptr [[T1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 bb0:

diff  --git a/llvm/test/Transforms/InstCombine/pr44552.ll b/llvm/test/Transforms/InstCombine/pr44552.ll
index 86899c3d026a61f..5301190d0123d4a 100644
--- a/llvm/test/Transforms/InstCombine/pr44552.ll
+++ b/llvm/test/Transforms/InstCombine/pr44552.ll
@@ -22,7 +22,7 @@
 define i16 @main() {
 ; CHECK-LABEL: @main(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i64 0, ptr @csmith_sink_, align 8
+; CHECK-NEXT:    store i64 0, ptr @csmith_sink_, align 1
 ; CHECK-NEXT:    ret i16 0
 ;
 entry:

diff  --git a/llvm/test/Transforms/InstCombine/pr59613.ll b/llvm/test/Transforms/InstCombine/pr59613.ll
index a669a0d4207e918..2db3bf0decf7061 100644
--- a/llvm/test/Transforms/InstCombine/pr59613.ll
+++ b/llvm/test/Transforms/InstCombine/pr59613.ll
@@ -4,7 +4,7 @@
 ; This used to crash, depending on the particular worklist iteration order.
 define void @pr59613(<6 x i16> %0) {
 ; CHECK-LABEL: @pr59613(
-; CHECK-NEXT:    store <6 x i16> poison, ptr null, align 4294967296
+; CHECK-NEXT:    store <6 x i16> poison, ptr null, align 16
 ; CHECK-NEXT:    ret void
 ;
   %cmp1 = icmp ne <6 x i16> %0, zeroinitializer

diff  --git a/llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll b/llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll
index ef0734b883f8754..71799d18ed312a5 100644
--- a/llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll
+++ b/llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll
@@ -21,9 +21,9 @@ entry:
 define void @scalable4i32_to_fixed16i32(ptr %out) {
 ; CHECK-LABEL: @scalable4i32_to_fixed16i32(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 4 x i32>, align 64
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP]], align 64
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 64
+; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 4 x i32>, align 16
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP]], align 16
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 16
 ; CHECK-NEXT:    store <16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;
@@ -55,9 +55,9 @@ entry:
 define void @scalable16i32_to_fixed16i32(ptr %out) {
 ; CHECK-LABEL: @scalable16i32_to_fixed16i32(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 64
-; CHECK-NEXT:    store volatile <16 x i32> zeroinitializer, ptr [[TMP]], align 64
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 64
+; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 16
+; CHECK-NEXT:    store volatile <16 x i32> zeroinitializer, ptr [[TMP]], align 16
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 16
 ; CHECK-NEXT:    store <16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;
@@ -72,9 +72,9 @@ entry:
 define void @scalable32i32_to_scalable16i32(ptr %out) {
 ; CHECK-LABEL: @scalable32i32_to_scalable16i32(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i32>, align 64
-; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 64
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 64
+; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i32>, align 16
+; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 16
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 16
 ; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;
@@ -89,9 +89,9 @@ entry:
 define void @scalable32i16_to_scalable16i32(ptr %out) {
 ; CHECK-LABEL: @scalable32i16_to_scalable16i32(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 64
-; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 64
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 64
+; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 16
+; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 16
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 16
 ; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;
@@ -106,11 +106,11 @@ entry:
 define void @scalable32i16_to_scalable16i32_multiuse(ptr %out, ptr %out2) {
 ; CHECK-LABEL: @scalable32i16_to_scalable16i32_multiuse(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 64
-; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 64
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 64
+; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 16
+; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 16
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 16
 ; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
-; CHECK-NEXT:    [[RELOAD2:%.*]] = load volatile <vscale x 32 x i16>, ptr [[TMP]], align 64
+; CHECK-NEXT:    [[RELOAD2:%.*]] = load volatile <vscale x 32 x i16>, ptr [[TMP]], align 16
 ; CHECK-NEXT:    store <vscale x 32 x i16> [[RELOAD2]], ptr [[OUT2:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;

diff  --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll
index a2cdb951d31bd02..82e163d93f0f1ab 100644
--- a/llvm/test/Transforms/InstCombine/select.ll
+++ b/llvm/test/Transforms/InstCombine/select.ll
@@ -1212,8 +1212,8 @@ define ptr @test83(i1 %flag) {
 ; CHECK-NEXT:    [[Y:%.*]] = alloca i64, align 8
 ; CHECK-NEXT:    call void @scribble_on_i64(ptr nonnull [[X]])
 ; CHECK-NEXT:    call void @scribble_on_i64(ptr nonnull [[Y]])
-; CHECK-NEXT:    [[T:%.*]] = load i64, ptr [[X]], align 8
-; CHECK-NEXT:    store i64 [[T]], ptr [[Y]], align 8
+; CHECK-NEXT:    [[T:%.*]] = load i64, ptr [[X]], align 4
+; CHECK-NEXT:    store i64 [[T]], ptr [[Y]], align 4
 ; CHECK-NEXT:    [[V:%.*]] = inttoptr i64 [[T]] to ptr
 ; CHECK-NEXT:    ret ptr [[V]]
 ;
@@ -1261,8 +1261,8 @@ define ptr @test85(i1 %flag) {
 ; CHECK-NEXT:    [[Y:%.*]] = alloca i128, align 8
 ; CHECK-NEXT:    call void @scribble_on_i128(ptr nonnull [[X]])
 ; CHECK-NEXT:    call void @scribble_on_i128(ptr nonnull [[Y]])
-; CHECK-NEXT:    [[T:%.*]] = load i128, ptr [[X]], align 8
-; CHECK-NEXT:    store i128 [[T]], ptr [[Y]], align 8
+; CHECK-NEXT:    [[T:%.*]] = load i128, ptr [[X]], align 4
+; CHECK-NEXT:    store i128 [[T]], ptr [[Y]], align 4
 ; CHECK-NEXT:    [[X_VAL:%.*]] = load ptr, ptr [[X]], align 8
 ; CHECK-NEXT:    [[Y_VAL:%.*]] = load ptr, ptr [[Y]], align 8
 ; CHECK-NEXT:    [[V:%.*]] = select i1 [[FLAG:%.*]], ptr [[X_VAL]], ptr [[Y_VAL]]
@@ -1290,8 +1290,8 @@ define i128 @test86(i1 %flag) {
 ; CHECK-NEXT:    call void @scribble_on_i128(ptr nonnull [[Y]])
 ; CHECK-NEXT:    [[T:%.*]] = load ptr, ptr [[X]], align 8
 ; CHECK-NEXT:    store ptr [[T]], ptr [[Y]], align 8
-; CHECK-NEXT:    [[X_VAL:%.*]] = load i128, ptr [[X]], align 8
-; CHECK-NEXT:    [[Y_VAL:%.*]] = load i128, ptr [[Y]], align 8
+; CHECK-NEXT:    [[X_VAL:%.*]] = load i128, ptr [[X]], align 4
+; CHECK-NEXT:    [[Y_VAL:%.*]] = load i128, ptr [[Y]], align 4
 ; CHECK-NEXT:    [[V:%.*]] = select i1 [[FLAG:%.*]], i128 [[X_VAL]], i128 [[Y_VAL]]
 ; CHECK-NEXT:    ret i128 [[V]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/store.ll b/llvm/test/Transforms/InstCombine/store.ll
index fab280b366f44c8..53e0865e16c872b 100644
--- a/llvm/test/Transforms/InstCombine/store.ll
+++ b/llvm/test/Transforms/InstCombine/store.ll
@@ -30,7 +30,7 @@ define void @store_into_undef(ptr %P) {
 
 define void @store_into_null(ptr %P) {
 ; CHECK-LABEL: @store_into_null(
-; CHECK-NEXT:    store i32 poison, ptr null, align 4294967296
+; CHECK-NEXT:    store i32 poison, ptr null, align 4
 ; CHECK-NEXT:    ret void
 ;
   store i32 124, ptr null

diff  --git a/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll b/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll
index 1d54fe0827ce476..feb98891035badf 100644
--- a/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll
+++ b/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll
@@ -124,7 +124,7 @@ define void @test_neg_unmodeled_write() {
 define i32 @test_neg_captured_by_call() {
 ; CHECK-LABEL: @test_neg_captured_by_call(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[A2:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    [[A2:%.*]] = alloca ptr, align 4
 ; CHECK-NEXT:    call void @f2(ptr nonnull writeonly [[A]], ptr nonnull [[A2]]) #[[ATTR3]]
 ; CHECK-NEXT:    [[A_COPY_CAST:%.*]] = load ptr, ptr [[A2]], align 8
 ; CHECK-NEXT:    [[RES:%.*]] = load i32, ptr [[A_COPY_CAST]], align 4

diff  --git a/llvm/test/Transforms/InstCombine/vscale_gep.ll b/llvm/test/Transforms/InstCombine/vscale_gep.ll
index 8bb4e2cb95ac93c..2a1865f69fe30db 100644
--- a/llvm/test/Transforms/InstCombine/vscale_gep.ll
+++ b/llvm/test/Transforms/InstCombine/vscale_gep.ll
@@ -41,7 +41,7 @@ define i32 @gep_alloca_inbounds_vscale_zero() {
 ; CHECK-LABEL: @gep_alloca_inbounds_vscale_zero(
 ; CHECK-NEXT:    [[A:%.*]] = alloca <vscale x 4 x i32>, align 16
 ; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds <vscale x 4 x i32>, ptr [[A]], i64 0, i64 2
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[TMP]], align 8
+; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[TMP]], align 4
 ; CHECK-NEXT:    ret i32 [[LOAD]]
 ;
   %a = alloca <vscale x 4 x i32>
@@ -55,7 +55,7 @@ define i32 @gep_alloca_inbounds_vscale_nonzero() {
 ; CHECK-LABEL: @gep_alloca_inbounds_vscale_nonzero(
 ; CHECK-NEXT:    [[A:%.*]] = alloca <vscale x 4 x i32>, align 16
 ; CHECK-NEXT:    [[TMP:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[A]], i64 1, i64 2
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[TMP]], align 8
+; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[TMP]], align 4
 ; CHECK-NEXT:    ret i32 [[LOAD]]
 ;
   %a = alloca <vscale x 4 x i32>

diff  --git a/llvm/test/Transforms/LoopVectorize/X86/pr42674.ll b/llvm/test/Transforms/LoopVectorize/X86/pr42674.ll
index 630fef820b809d2..97bb4a2b4db5361 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr42674.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr42674.ll
@@ -9,8 +9,8 @@
 define zeroext i8 @sum() {
 ; CHECK-LABEL: @sum(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <64 x i8>, ptr getelementptr inbounds ([128 x i8], ptr @bytes, i64 0, i64 64), align 16
-; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <64 x i8>, ptr @bytes, align 16
+; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <64 x i8>, ptr getelementptr inbounds ([128 x i8], ptr @bytes, i64 0, i64 64), align 1
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <64 x i8>, ptr @bytes, align 1
 ; CHECK-NEXT:    [[BIN_RDX:%.*]] = add <64 x i8> [[WIDE_LOAD2]], [[WIDE_LOAD]]
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i8 @llvm.vector.reduce.add.v64i8(<64 x i8> [[BIN_RDX]])
 ; CHECK-NEXT:    ret i8 [[TMP0]]

diff  --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
index fd99e3f2ddde074..6a6440dbed601a1 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
@@ -29,12 +29,12 @@ define void @example1() optsize {
 ; CHECK:       vector.body:
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[INDEX]]
-; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 16
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[INDEX]]
-; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP2]], align 16
+; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[INDEX]]
-; CHECK-NEXT:    store <4 x i32> [[TMP3]], ptr [[TMP4]], align 16
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], ptr [[TMP4]], align 4
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -90,7 +90,7 @@ define void @example2(i32 %n, i32 %x) optsize {
 ; CHECK-NEXT:    br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
 ; CHECK:       pred.store.if:
 ; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[INDEX]]
-; CHECK-NEXT:    store i32 [[X:%.*]], ptr [[TMP5]], align 16
+; CHECK-NEXT:    store i32 [[X:%.*]], ptr [[TMP5]], align 4
 ; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE]]
 ; CHECK:       pred.store.continue:
 ; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
@@ -106,7 +106,7 @@ define void @example2(i32 %n, i32 %x) optsize {
 ; CHECK:       pred.store.if3:
 ; CHECK-NEXT:    [[TMP10:%.*]] = or i64 [[INDEX]], 2
 ; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP10]]
-; CHECK-NEXT:    store i32 [[X]], ptr [[TMP11]], align 8
+; CHECK-NEXT:    store i32 [[X]], ptr [[TMP11]], align 4
 ; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE4]]
 ; CHECK:       pred.store.continue4:
 ; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3

diff  --git a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
index a89482a943569a3..80348f9e46130f1 100644
--- a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
+++ b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
@@ -25,10 +25,10 @@ define i32 @main() #0 {
 ; CHECK:       vector.body:
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [40000 x i8], ptr addrspace(1) @Y, i64 0, i64 [[INDEX]]
-; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr addrspace(1) [[TMP0]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr addrspace(1) [[TMP0]], align 1
 ; CHECK-NEXT:    [[TMP1:%.*]] = add <4 x i8> [[WIDE_LOAD]], <i8 1, i8 1, i8 1, i8 1>
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [40000 x i8], ptr @X, i64 0, i64 [[INDEX]]
-; CHECK-NEXT:    store <4 x i8> [[TMP1]], ptr [[TMP2]], align 4
+; CHECK-NEXT:    store <4 x i8> [[TMP1]], ptr [[TMP2]], align 1
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 40000
 ; CHECK-NEXT:    br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]

diff  --git a/llvm/test/Transforms/LoopVectorize/non-const-n.ll b/llvm/test/Transforms/LoopVectorize/non-const-n.ll
index aa04da5639a7210..295bf9111329c7f 100644
--- a/llvm/test/Transforms/LoopVectorize/non-const-n.ll
+++ b/llvm/test/Transforms/LoopVectorize/non-const-n.ll
@@ -20,12 +20,12 @@ define void @example1(i32 %n) nounwind uwtable ssp {
 ; CHECK:       vector.body:
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[INDEX]]
-; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 16
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[INDEX]]
-; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 16
+; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
 ; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[INDEX]]
-; CHECK-NEXT:    store <4 x i32> [[TMP4]], ptr [[TMP5]], align 16
+; CHECK-NEXT:    store <4 x i32> [[TMP4]], ptr [[TMP5]], align 4
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
 ; CHECK-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[INDEX]], [[TMP1]]
 ; CHECK-NEXT:    br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]

diff  --git a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll
index 7fde8ef2914a8e0..b1cee80bde33fde 100644
--- a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll
+++ b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll
@@ -19,7 +19,7 @@ define void @caller1(i1 %c, ptr align 1 %ptr) {
 ; ASSUMPTIONS-OFF-NEXT:    br i1 [[C:%.*]], label [[COMMON_RET:%.*]], label [[FALSE2:%.*]]
 ; ASSUMPTIONS-OFF:       common.ret:
 ; ASSUMPTIONS-OFF-NEXT:    [[DOTSINK:%.*]] = phi i64 [ 3, [[FALSE2]] ], [ 2, [[TMP0:%.*]] ]
-; ASSUMPTIONS-OFF-NEXT:    store volatile i64 0, ptr [[PTR:%.*]], align 8
+; ASSUMPTIONS-OFF-NEXT:    store volatile i64 0, ptr [[PTR:%.*]], align 4
 ; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, ptr [[PTR]], align 4
 ; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, ptr [[PTR]], align 4
 ; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, ptr [[PTR]], align 4


        


More information about the llvm-commits mailing list