[llvm] c65b4d6 - [SelectionDAG] Do not second-guess alignment for alloca

Andrew Savonichev via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 9 07:45:47 PST 2023


Author: Andrew Savonichev
Date: 2023-02-09T18:45:20+03:00
New Revision: c65b4d64d4b09795fe237b62a4226121c5b13248

URL: https://github.com/llvm/llvm-project/commit/c65b4d64d4b09795fe237b62a4226121c5b13248
DIFF: https://github.com/llvm/llvm-project/commit/c65b4d64d4b09795fe237b62a4226121c5b13248.diff

LOG: [SelectionDAG] Do not second-guess alignment for alloca

Alignment of an alloca in IR can be lower than the preferred alignment
on purpose, but this override essentially treats the preferred
alignment as the minimum alignment.

The patch changes this behavior to always use the specified
alignment. If alignment is not set explicitly in LLVM IR, it is set to
DL.getPrefTypeAlign(Ty) in computeAllocaDefaultAlign.

Tests are changed as well: explicit alignment is increased to match
the preferred alignment if it changes output, or omitted when it is
hard to determine the right value (e.g. for pointers, some structs, or
weird types).

Differential Revision: https://reviews.llvm.org/D135462

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
    llvm/test/CodeGen/AArch64/preferred-alignment.ll
    llvm/test/CodeGen/AArch64/seh-finally.ll
    llvm/test/CodeGen/AMDGPU/call-argument-types.ll
    llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
    llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
    llvm/test/CodeGen/ARM/ssp-data-layout.ll
    llvm/test/CodeGen/BPF/pr57872.ll
    llvm/test/CodeGen/BPF/undef.ll
    llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
    llvm/test/CodeGen/Mips/atomic64.ll
    llvm/test/CodeGen/Mips/cconv/byval.ll
    llvm/test/CodeGen/Mips/cconv/return-struct.ll
    llvm/test/CodeGen/Mips/largeimmprinting.ll
    llvm/test/CodeGen/Mips/o32_cc_byval.ll
    llvm/test/CodeGen/NVPTX/lower-byval-args.ll
    llvm/test/CodeGen/PowerPC/aix-cc-byval.ll
    llvm/test/CodeGen/PowerPC/aix-sret-param.ll
    llvm/test/CodeGen/PowerPC/byval.ll
    llvm/test/CodeGen/PowerPC/structsinregs.ll
    llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
    llvm/test/CodeGen/RISCV/frame.ll
    llvm/test/CodeGen/RISCV/mem64.ll
    llvm/test/CodeGen/RISCV/vararg.ll
    llvm/test/CodeGen/Thumb2/mve-stack.ll
    llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll
    llvm/test/CodeGen/VE/Scalar/atomic_load.ll
    llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
    llvm/test/CodeGen/WebAssembly/PR40172.ll
    llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
    llvm/test/CodeGen/X86/fast-isel-call.ll
    llvm/test/CodeGen/X86/load-local-v3i129.ll
    llvm/test/CodeGen/X86/pr44140.ll
    llvm/test/CodeGen/X86/ssp-data-layout.ll
    llvm/test/CodeGen/X86/win-cleanuppad.ll
    llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
    llvm/test/DebugInfo/AArch64/frameindices.ll
    llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll
    llvm/test/DebugInfo/X86/dbg-addr.ll
    llvm/test/DebugInfo/X86/dbg-declare-alloca.ll
    llvm/test/DebugInfo/X86/sret.ll
    llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index c18cd39ed2963..476648668e3ba 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -128,20 +128,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
     for (const Instruction &I : BB) {
       if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
         Type *Ty = AI->getAllocatedType();
-        Align TyPrefAlign = MF->getDataLayout().getPrefTypeAlign(Ty);
-        // The "specified" alignment is the alignment written on the alloca,
-        // or the preferred alignment of the type if none is specified.
-        //
-        // (Unspecified alignment on allocas will be going away soon.)
-        Align SpecifiedAlign = AI->getAlign();
-
-        // If the preferred alignment of the type is higher than the specified
-        // alignment of the alloca, promote the alignment, as long as it doesn't
-        // require realigning the stack.
-        //
-        // FIXME: Do we really want to second-guess the IR in isel?
-        Align Alignment =
-            std::max(std::min(TyPrefAlign, StackAlign), SpecifiedAlign);
+        Align Alignment = AI->getAlign();
 
         // Static allocas can be folded into the initial stack frame
         // adjustment. For targets that don't realign the stack, don't

diff  --git a/llvm/test/CodeGen/AArch64/preferred-alignment.ll b/llvm/test/CodeGen/AArch64/preferred-alignment.ll
index c9c5212d479d9..6149c8db91486 100644
--- a/llvm/test/CodeGen/AArch64/preferred-alignment.ll
+++ b/llvm/test/CodeGen/AArch64/preferred-alignment.ll
@@ -3,11 +3,11 @@
 ; Function Attrs: nounwind
 define i32 @foo() #0 {
 entry:
-  %c = alloca i8, align 1
+  %c = alloca i8
 ; CHECK:	add	x0, sp, #12
-  %s = alloca i16, align 2
+  %s = alloca i16
 ; CHECK-NEXT:	add	x1, sp, #8
-  %i = alloca i32, align 4
+  %i = alloca i32
 ; CHECK-NEXT:	add	x2, sp, #4
   %call = call i32 @bar(ptr %c, ptr %s, ptr %i)
   %0 = load i8, ptr %c, align 1

diff  --git a/llvm/test/CodeGen/AArch64/seh-finally.ll b/llvm/test/CodeGen/AArch64/seh-finally.ll
index 581053ec3f56d..04a30800d9294 100644
--- a/llvm/test/CodeGen/AArch64/seh-finally.ll
+++ b/llvm/test/CodeGen/AArch64/seh-finally.ll
@@ -42,7 +42,7 @@ entry:
 ; CHECK: ldur    w0, [x29, #-8]
 ; CHECK: bl      foo
 
-  %o = alloca %struct.S, align 4
+  %o = alloca %struct.S, align 8
   call void (...) @llvm.localescape(ptr %o)
   %0 = load i32, ptr %o, align 4
   invoke void @foo(i32 %0) #5

diff  --git a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
index 6f16dcb3c47da..1d00ab14c0d85 100644
--- a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
@@ -671,7 +671,7 @@ define amdgpu_kernel void @test_call_external_void_func_struct_i8_i32() #0 {
 ; GCN-NEXT: s_swappc_b64
 ; GCN-NOT: [[SP]]
 define amdgpu_kernel void @test_call_external_void_func_byval_struct_i8_i32() #0 {
-  %val = alloca { i8, i32 }, align 4, addrspace(5)
+  %val = alloca { i8, i32 }, align 8, addrspace(5)
   %gep0 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %val, i32 0, i32 0
   %gep1 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %val, i32 0, i32 1
   store i8 3, ptr addrspace(5) %gep0
@@ -702,8 +702,8 @@ define amdgpu_kernel void @test_call_external_void_func_byval_struct_i8_i32() #0
 ; GCN: buffer_store_byte [[LOAD_OUT_VAL0]], off
 ; GCN: buffer_store_dword [[LOAD_OUT_VAL1]], off
 define amdgpu_kernel void @test_call_external_void_func_sret_struct_i8_i32_byval_struct_i8_i32(i32) #0 {
-  %in.val = alloca { i8, i32 }, align 4, addrspace(5)
-  %out.val = alloca { i8, i32 }, align 4, addrspace(5)
+  %in.val = alloca { i8, i32 }, align 8, addrspace(5)
+  %out.val = alloca { i8, i32 }, align 8, addrspace(5)
   %in.gep0 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %in.val, i32 0, i32 0
   %in.gep1 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %in.val, i32 0, i32 1
   store i8 3, ptr addrspace(5) %in.gep0

diff  --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
index 00540a299d058..0f230e5703eef 100644
--- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
+++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
@@ -289,7 +289,7 @@ bb5:
 
 ; GCN: ds_write_b32 v{{[0-9]+}}, [[PTR]]
 define void @alloca_ptr_nonentry_block(i32 %arg0) #0 {
-  %alloca0 = alloca { i8, i32 }, align 4, addrspace(5)
+  %alloca0 = alloca { i8, i32 }, align 8, addrspace(5)
   %cmp = icmp eq i32 %arg0, 0
   br i1 %cmp, label %bb, label %ret
 

diff  --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
index db8b2c4371c38..356348aa64c89 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
@@ -11098,7 +11098,7 @@ entry:
   %tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo)
 
 ; allocate enough scratch to go beyond 2^12 addressing
-  %scratch = alloca <1280 x i32>, align 8, addrspace(5)
+  %scratch = alloca <1280 x i32>, align 16, addrspace(5)
 
 ; load VGPR data
   %aptr = getelementptr <64 x i32>, ptr addrspace(1) %in, i32 %tid

diff  --git a/llvm/test/CodeGen/ARM/ssp-data-layout.ll b/llvm/test/CodeGen/ARM/ssp-data-layout.ll
index 9dd62a936c4c8..c5f13a66c11ca 100644
--- a/llvm/test/CodeGen/ARM/ssp-data-layout.ll
+++ b/llvm/test/CodeGen/ARM/ssp-data-layout.ll
@@ -386,8 +386,8 @@ entry:
 ; CHECK: bl get_struct_large_char2
 ; CHECK: strb r0, [sp, #106]
 ; CHECK: bl end_struct_large_char2
-  %a = alloca %struct.struct_small_char, align 1
-  %b = alloca %struct.struct_large_char2, align 1
+  %a = alloca %struct.struct_small_char, align 4
+  %b = alloca %struct.struct_large_char2, align 4
   %d1 = alloca %struct.struct_large_nonchar, align 8
   %d2 = alloca %struct.struct_small_nonchar, align 2
   %call = call signext i8 @get_struct_small_char()

diff  --git a/llvm/test/CodeGen/BPF/pr57872.ll b/llvm/test/CodeGen/BPF/pr57872.ll
index a9162496c9c15..34f9975d6f28b 100644
--- a/llvm/test/CodeGen/BPF/pr57872.ll
+++ b/llvm/test/CodeGen/BPF/pr57872.ll
@@ -180,7 +180,7 @@ define void @foo(ptr %g) {
 ; CHECK-NEXT:    call bar
 ; CHECK-NEXT:    exit
 entry:
-  %event = alloca %struct.event, align 1
+  %event = alloca %struct.event, align 8
   %hostname = getelementptr inbounds %struct.event, ptr %event, i64 0, i32 1
   %0 = load ptr, ptr %g, align 8
   call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(84) %hostname, ptr noundef nonnull align 1 dereferenceable(84) %0, i64 84, i1 false)

diff  --git a/llvm/test/CodeGen/BPF/undef.ll b/llvm/test/CodeGen/BPF/undef.ll
index 54c115193192f..0322f5972b7ee 100644
--- a/llvm/test/CodeGen/BPF/undef.ll
+++ b/llvm/test/CodeGen/BPF/undef.ll
@@ -40,7 +40,7 @@ define i32 @ebpf_filter(ptr nocapture readnone %ebpf_packet) #0 section "socket1
 ; CHECK: r1 = routing
 ; CHECK: call bpf_map_lookup_elem
 ; CHECK: exit
-  %key = alloca %struct.routing_key_2, align 1
+  %key = alloca %struct.routing_key_2, align 8
   store i8 5, ptr %key, align 1
   %1 = getelementptr inbounds %struct.routing_key_2, ptr %key, i64 0, i32 0, i64 1
   store i8 6, ptr %1, align 1

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
index d7913557c7d42..73e5f714802f6 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
@@ -10,8 +10,8 @@ entry:
 ; CHECK-LABEL: foobar:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
-  %a = alloca %struct.x, align 4
-  %c = alloca ptr, align 4
+  %a = alloca %struct.x, align 8
+  %c = alloca ptr, align 8
   store i32 %x, ptr %x.addr, align 4
   %0 = load i32, ptr %x.addr, align 4
   store i32 %0, ptr %a, align 4

diff  --git a/llvm/test/CodeGen/Mips/atomic64.ll b/llvm/test/CodeGen/Mips/atomic64.ll
index a454c442b62bd..8d9c1ca866a75 100644
--- a/llvm/test/CodeGen/Mips/atomic64.ll
+++ b/llvm/test/CodeGen/Mips/atomic64.ll
@@ -1145,7 +1145,7 @@ define i64 @AtomicSwap64(i64 signext %newval) nounwind {
 ; MIPS64EB-NEXT:    jr $ra
 ; MIPS64EB-NEXT:    daddiu $sp, $sp, 16
 entry:
-  %newval.addr = alloca i64, align 4
+  %newval.addr = alloca i64, align 8
   store i64 %newval, ptr %newval.addr, align 4
   %tmp = load i64, ptr %newval.addr, align 4
   %0 = atomicrmw xchg ptr @x, i64 %tmp monotonic
@@ -1359,7 +1359,7 @@ define i64 @AtomicCmpSwap64(i64 signext %oldval, i64 signext %newval) nounwind {
 ; MIPS64EB-NEXT:    jr $ra
 ; MIPS64EB-NEXT:    daddiu $sp, $sp, 16
 entry:
-  %newval.addr = alloca i64, align 4
+  %newval.addr = alloca i64, align 8
   store i64 %newval, ptr %newval.addr, align 4
   %tmp = load i64, ptr %newval.addr, align 4
   %0 = cmpxchg ptr @x, i64 %oldval, i64 %tmp monotonic monotonic

diff  --git a/llvm/test/CodeGen/Mips/cconv/byval.ll b/llvm/test/CodeGen/Mips/cconv/byval.ll
index 18e1914eda404..482c34727379f 100644
--- a/llvm/test/CodeGen/Mips/cconv/byval.ll
+++ b/llvm/test/CodeGen/Mips/cconv/byval.ll
@@ -151,7 +151,7 @@ define dso_local void @g() #0 {
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    daddu $sp, $sp, $1
 entry:
-  %a = alloca %struct.S1, align 4
+  %a = alloca %struct.S1, align 8
   call void @f2(ptr byval(%struct.S1) align 4 %a)
   ret void
 }
@@ -340,8 +340,8 @@ define dso_local void @g2(ptr %a) {
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    daddu $sp, $sp, $1
 entry:
-  %a.addr = alloca ptr, align 4
-  %byval-temp = alloca %struct.S1, align 4
+  %a.addr = alloca ptr
+  %byval-temp = alloca %struct.S1, align 8
   store ptr %a, ptr %a.addr, align 4
   %0 = load ptr, ptr %a.addr, align 4
   call void @llvm.memcpy.p0.p0.i32(ptr align 4 %byval-temp, ptr align 1 %0, i32 65520, i1 false)
@@ -410,8 +410,8 @@ define dso_local i32 @g3(ptr %a, ptr %b) #0 {
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %a.addr = alloca ptr, align 4
-  %b.addr = alloca ptr, align 4
+  %a.addr = alloca ptr
+  %b.addr = alloca ptr
   store ptr %a, ptr %a.addr, align 4
   store ptr %b, ptr %b.addr, align 4
   %0 = load ptr, ptr %a.addr, align 4

diff  --git a/llvm/test/CodeGen/Mips/cconv/return-struct.ll b/llvm/test/CodeGen/Mips/cconv/return-struct.ll
index 67c58274c3c2e..68f8127ae9f13 100644
--- a/llvm/test/CodeGen/Mips/cconv/return-struct.ll
+++ b/llvm/test/CodeGen/Mips/cconv/return-struct.ll
@@ -139,7 +139,7 @@ define inreg {i16} @ret_struct_i16() nounwind {
 ; N64-LE-NEXT:    jr $ra
 ; N64-LE-NEXT:    daddiu $sp, $sp, 16
 entry:
-        %retval = alloca {i8,i8}, align 1
+        %retval = alloca {i8,i8}, align 8
         call void @llvm.memcpy.p0.p0.i64(ptr %retval, ptr @struct_2byte, i64 2, i1 false)
         %0 = load volatile {i16}, ptr %retval
         ret {i16} %0

diff  --git a/llvm/test/CodeGen/Mips/largeimmprinting.ll b/llvm/test/CodeGen/Mips/largeimmprinting.ll
index 144e2bc2511cb..eed9e12eac8b0 100644
--- a/llvm/test/CodeGen/Mips/largeimmprinting.ll
+++ b/llvm/test/CodeGen/Mips/largeimmprinting.ll
@@ -24,7 +24,7 @@ entry:
 ; 64:  daddu   $[[R1]], $sp, $[[R1]]
 ; 64:  sd      $ra, 24($[[R1]])
 
-  %agg.tmp = alloca %struct.S1, align 1
+  %agg.tmp = alloca %struct.S1, align 8
   call void @llvm.memcpy.p0.p0.i32(ptr align 1 %agg.tmp, ptr align 1 @s1, i32 65536, i1 false)
   call void @f2(ptr byval(%struct.S1) %agg.tmp) nounwind
   ret void

diff  --git a/llvm/test/CodeGen/Mips/o32_cc_byval.ll b/llvm/test/CodeGen/Mips/o32_cc_byval.ll
index 727e6574b41c8..de6b4dd2ab71d 100644
--- a/llvm/test/CodeGen/Mips/o32_cc_byval.ll
+++ b/llvm/test/CodeGen/Mips/o32_cc_byval.ll
@@ -80,7 +80,7 @@ define void @f1() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    addiu $sp, $sp, 64
 entry:
-  %agg.tmp10 = alloca %struct.S3, align 4
+  %agg.tmp10 = alloca %struct.S3, align 8
   call void @callee1(float 2.000000e+01, ptr byval(%struct.S1) @f1.s1) nounwind
   call void @callee2(ptr byval(%struct.S2) @f1.s2) nounwind
   store i8 11, ptr %agg.tmp10, align 4

diff  --git a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
index ba24126d1544b..cc46657c30131 100644
--- a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
+++ b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
@@ -118,7 +118,7 @@ bb:
 
 ; Verify that if the pointer escapes, then we do fall back onto using a temp copy.
 ; CHECK-LABEL: .visible .entry pointer_escapes
-; CHECK: .local .align 8 .b8     __local_depot{{.*}}
+; CHECK: .local .align 4 .b8     __local_depot{{.*}}
 ; CHECK64: ld.param.u64    [[result_addr:%rd[0-9]+]], [{{.*}}_param_0]
 ; CHECK64: add.u64         %[[copy_addr:rd[0-9]+]], %SPL, 0;
 ; CHECK32: ld.param.u32    [[result_addr:%r[0-9]+]], [{{.*}}_param_0]

diff  --git a/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll b/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll
index 71054904f0ead..5e7a1bc81916e 100644
--- a/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll
@@ -353,7 +353,7 @@ entry:
 define void @call_test_byval_4Byte() {
 entry:
   %s0 = alloca %struct.S0, align 8
-  %s4a = alloca %struct.S4A, align 4
+  %s4a = alloca %struct.S4A, align 8
   %call = call signext i32 @test_byval_4Byte(ptr byval(%struct.S4) align 1 @gS4, ptr byval(%struct.S0) align 1 %s0, ptr byval(%struct.S4A) align 4 %s4a)
   ret void
 }
@@ -945,7 +945,7 @@ entry:
 
 define i32 @call_test_byval_homogeneous_float_struct() {
 entry:
-  %s = alloca %struct.F, align 4
+  %s = alloca %struct.F, align 8
   call void @llvm.memset.p0.i32(ptr align 4 %s, i8 0, i32 12, i1 false)
   %call = call i32 @test_byval_homogeneous_float_struct(ptr byval(%struct.F) align 4 %s)
   ret i32 %call

diff  --git a/llvm/test/CodeGen/PowerPC/aix-sret-param.ll b/llvm/test/CodeGen/PowerPC/aix-sret-param.ll
index 3c40fc3c3e881..10dbd0618e545 100644
--- a/llvm/test/CodeGen/PowerPC/aix-sret-param.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-sret-param.ll
@@ -17,7 +17,7 @@
 
 define void @test1() {
 entry:
-  %s = alloca %struct.S, align 4
+  %s = alloca %struct.S, align 8
   call void @foo(ptr sret(%struct.S) %s)
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/byval.ll b/llvm/test/CodeGen/PowerPC/byval.ll
index 8688f5a8993ef..24fefbd0d2281 100644
--- a/llvm/test/CodeGen/PowerPC/byval.ll
+++ b/llvm/test/CodeGen/PowerPC/byval.ll
@@ -34,7 +34,7 @@ define dso_local i32 @bar() {
 ; CHECK-NEXT:    mtlr 0
 ; CHECK-NEXT:    blr
 entry:
-  %x = alloca %struct, align 4
+  %x = alloca %struct, align 8
   call void @foo(ptr %x)
   %r = call i32 @foo1(ptr byval(%struct) %x)
   ret i32 %r

diff  --git a/llvm/test/CodeGen/PowerPC/structsinregs.ll b/llvm/test/CodeGen/PowerPC/structsinregs.ll
index eb804c93bf1ef..884aa39c7caa3 100644
--- a/llvm/test/CodeGen/PowerPC/structsinregs.ll
+++ b/llvm/test/CodeGen/PowerPC/structsinregs.ll
@@ -35,13 +35,13 @@ target triple = "powerpc64-unknown-linux-gnu"
 
 define i32 @caller1() nounwind {
 entry:
-  %p1 = alloca %struct.s1, align 1
-  %p2 = alloca %struct.s2, align 2
-  %p3 = alloca %struct.s3, align 2
-  %p4 = alloca %struct.s4, align 4
-  %p5 = alloca %struct.s5, align 4
-  %p6 = alloca %struct.s6, align 4
-  %p7 = alloca %struct.s7, align 4
+  %p1 = alloca %struct.s1
+  %p2 = alloca %struct.s2
+  %p3 = alloca %struct.s3
+  %p4 = alloca %struct.s4
+  %p5 = alloca %struct.s5
+  %p6 = alloca %struct.s6
+  %p7 = alloca %struct.s7
   call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller1.p1, i64 1, i1 false)
   call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p2, ptr align 2 @caller1.p2, i64 2, i1 false)
   call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p3, ptr align 2 @caller1.p3, i64 4, i1 false)
@@ -103,13 +103,13 @@ entry:
 
 define i32 @caller2() nounwind {
 entry:
-  %p1 = alloca %struct.t1, align 1
-  %p2 = alloca %struct.t2, align 1
-  %p3 = alloca %struct.t3, align 1
-  %p4 = alloca %struct.t4, align 1
-  %p5 = alloca %struct.t5, align 1
-  %p6 = alloca %struct.t6, align 1
-  %p7 = alloca %struct.t7, align 1
+  %p1 = alloca %struct.t1
+  %p2 = alloca %struct.t2
+  %p3 = alloca %struct.t3
+  %p4 = alloca %struct.t4
+  %p5 = alloca %struct.t5
+  %p6 = alloca %struct.t6
+  %p7 = alloca %struct.t7
   call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller2.p1, i64 1, i1 false)
   call void @llvm.memcpy.p0.p0.i64(ptr %p2, ptr @caller2.p2, i64 2, i1 false)
   call void @llvm.memcpy.p0.p0.i64(ptr %p3, ptr @caller2.p3, i64 3, i1 false)

diff  --git a/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll b/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
index 37ef8ec8d7ad2..cdd2e904da44b 100644
--- a/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
+++ b/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
@@ -7,7 +7,7 @@ target triple = "powerpc64-unknown-linux-gnu"
 
 define void @foo(float inreg %s.coerce) nounwind {
 entry:
-  %s = alloca %struct.Sf1, align 4
+  %s = alloca %struct.Sf1, align 8
   store float %s.coerce, ptr %s, align 1
   %0 = load float, ptr %s, align 1
   call void (i32, ...) @testvaSf1(i32 1, float inreg %0)

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index fbec0089f0d38..0e4702d13a8cd 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -594,7 +594,7 @@ define i32 @caller_large_struct() nounwind {
 ; RV32I-WITHFP-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 48
 ; RV32I-WITHFP-NEXT:    ret
-  %ls = alloca %struct.large, align 4
+  %ls = alloca %struct.large, align 8
   store i32 1, ptr %ls
   %b = getelementptr inbounds %struct.large, ptr %ls, i32 0, i32 1
   store i32 2, ptr %b

diff  --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll
index ef5df8ea96f92..183a0f47c68c8 100644
--- a/llvm/test/CodeGen/RISCV/frame.ll
+++ b/llvm/test/CodeGen/RISCV/frame.ll
@@ -41,7 +41,7 @@ define i32 @test() nounwind {
 ; RV32I-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 32
 ; RV32I-WITHFP-NEXT:    ret
-  %key = alloca %struct.key_t, align 4
+  %key = alloca %struct.key_t, align 8
   call void @llvm.memset.p0.i64(ptr align 4 %key, i8 0, i64 20, i1 false)
   %1 = getelementptr inbounds %struct.key_t, ptr %key, i64 0, i32 1, i64 0
   call void @test1(ptr %1)

diff  --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll
index a7ea053c7dcd0..ab775481accc3 100644
--- a/llvm/test/CodeGen/RISCV/mem64.ll
+++ b/llvm/test/CodeGen/RISCV/mem64.ll
@@ -368,7 +368,7 @@ define void @addi_fold_crash(i64 %arg) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 bb:
-  %tmp = alloca %struct.quux, align 4
+  %tmp = alloca %struct.quux, align 8
   %tmp1 = getelementptr inbounds %struct.quux, ptr %tmp, i64 0, i32 1
   %tmp2 = getelementptr inbounds %struct.quux, ptr %tmp, i64 0, i32 1, i64 %arg
   store i8 0, ptr %tmp2, align 1

diff  --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index df8a6706997b7..dd3c3d865c961 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -138,7 +138,7 @@ define i32 @va1(ptr %fmt, ...) {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
-  %va = alloca ptr, align 4
+  %va = alloca ptr
   call void @llvm.va_start(ptr %va)
   %argp.cur = load ptr, ptr %va, align 4
   %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
@@ -238,7 +238,7 @@ define i32 @va1_va_arg(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
-  %va = alloca ptr, align 4
+  %va = alloca ptr
   call void @llvm.va_start(ptr %va)
   %1 = va_arg ptr %va, i32
   call void @llvm.va_end(ptr %va)
@@ -401,7 +401,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
-  %va = alloca ptr, align 4
+  %va = alloca ptr
   call void @llvm.va_start(ptr %va)
   %1 = va_arg ptr %va, i32
   %2 = alloca i8, i32 %1
@@ -599,7 +599,7 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
-  %va = alloca ptr, align 4
+  %va = alloca ptr
   call void @llvm.va_start(ptr %va)
   %argp.cur = load i32, ptr %va, align 4
   %1 = add i32 %argp.cur, 7
@@ -719,7 +719,7 @@ define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
-  %va = alloca ptr, align 4
+  %va = alloca ptr
   call void @llvm.va_start(ptr %va)
   %1 = va_arg ptr %va, double
   call void @llvm.va_end(ptr %va)
@@ -916,7 +916,7 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 80
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
-  %va = alloca ptr, align 4
+  %va = alloca ptr
   call void @llvm.va_start(ptr %va)
   %argp.cur = load i32, ptr %va, align 4
   %1 = add i32 %argp.cur, 7
@@ -1041,7 +1041,7 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 80
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
-  %va = alloca ptr, align 4
+  %va = alloca ptr
   call void @llvm.va_start(ptr %va)
   %1 = va_arg ptr %va, double
   call void @llvm.va_end(ptr %va)
@@ -1341,8 +1341,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 112
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
-  %vargs = alloca ptr, align 4
-  %wargs = alloca ptr, align 4
+  %vargs = alloca ptr
+  %wargs = alloca ptr
   call void @llvm.va_start(ptr %vargs)
   %1 = va_arg ptr %vargs, i32
   call void @llvm.va_copy(ptr %wargs, ptr %vargs)
@@ -1660,7 +1660,7 @@ define i32 @va6_no_fixed_args(...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
-  %va = alloca ptr, align 4
+  %va = alloca ptr
   call void @llvm.va_start(ptr %va)
   %1 = va_arg ptr %va, i32
   call void @llvm.va_end(ptr %va)
@@ -1849,7 +1849,7 @@ define i32 @va_large_stack(ptr %fmt, ...) {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 2032
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %large = alloca [ 100000000 x i8 ]
-  %va = alloca ptr, align 4
+  %va = alloca ptr
   call void @llvm.va_start(ptr %va)
   %argp.cur = load ptr, ptr %va, align 4
   %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4

diff  --git a/llvm/test/CodeGen/Thumb2/mve-stack.ll b/llvm/test/CodeGen/Thumb2/mve-stack.ll
index 68beebc5968bc..2cd8e011368f1 100644
--- a/llvm/test/CodeGen/Thumb2/mve-stack.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-stack.ll
@@ -15,7 +15,7 @@ define arm_aapcs_vfpcc void @vstrw32() {
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %d = alloca [4 x i32], align 2
+  %d = alloca [4 x i32], align 4
   %g = getelementptr inbounds [4 x i32], ptr %d, i32 0, i32 2
   store <4 x i32> zeroinitializer, ptr %g, align 2
   call arm_aapcs_vfpcc void @func(ptr %d)
@@ -57,7 +57,7 @@ define arm_aapcs_vfpcc void @vstrb8() {
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %d = alloca [16 x i8], align 2
+  %d = alloca [16 x i8], align 4
   %g = getelementptr inbounds [16 x i8], ptr %d, i32 0, i32 2
   store <16 x i8> zeroinitializer, ptr %g, align 2
   call arm_aapcs_vfpcc void @func(ptr %d)
@@ -78,7 +78,7 @@ define arm_aapcs_vfpcc void @vstrh32() {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %d = alloca [4 x i16], align 2
+  %d = alloca [4 x i16], align 4
   %g = getelementptr inbounds [4 x i16], ptr %d, i32 0, i32 2
   store <4 x i16> <i16 6, i16 6, i16 6, i16 6>, ptr %g, align 2
   call arm_aapcs_vfpcc void @func(ptr %d)
@@ -99,7 +99,7 @@ define arm_aapcs_vfpcc void @vstrb32() {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %d = alloca [4 x i8], align 2
+  %d = alloca [4 x i8], align 4
   %g = getelementptr inbounds [4 x i8], ptr %d, i32 0, i32 2
   store <4 x i8> <i8 6, i8 6, i8 6, i8 6>, ptr %g, align 2
   call arm_aapcs_vfpcc void @func(ptr %d)
@@ -120,7 +120,7 @@ define arm_aapcs_vfpcc void @vstrb16() {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %d = alloca [8 x i8], align 2
+  %d = alloca [8 x i8], align 4
   %g = getelementptr inbounds [8 x i8], ptr %d, i32 0, i32 2
   store <8 x i8> zeroinitializer, ptr %g, align 2
   call arm_aapcs_vfpcc void @func(ptr %d)
@@ -141,7 +141,7 @@ define arm_aapcs_vfpcc <4 x i32> @vldrw32() {
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %d = alloca [4 x i32], align 2
+  %d = alloca [4 x i32], align 4
   call arm_aapcs_vfpcc void @func(ptr %d)
   %g = getelementptr inbounds [4 x i32], ptr %d, i32 0, i32 2
   %l = load <4 x i32>, ptr %g, align 2
@@ -181,7 +181,7 @@ define arm_aapcs_vfpcc <16 x i8> @vldrb8() {
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %d = alloca [16 x i8], align 2
+  %d = alloca [16 x i8], align 4
   call arm_aapcs_vfpcc void @func(ptr %d)
   %g = getelementptr inbounds [16 x i8], ptr %d, i32 0, i32 2
   %l = load <16 x i8>, ptr %g, align 2
@@ -202,7 +202,7 @@ define arm_aapcs_vfpcc <4 x i16> @vldrh32() {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %d = alloca [4 x i16], align 2
+  %d = alloca [4 x i16], align 4
   call arm_aapcs_vfpcc void @func(ptr %d)
   %g = getelementptr inbounds [4 x i16], ptr %d, i32 0, i32 2
   %l = load <4 x i16>, ptr %g, align 2
@@ -223,7 +223,7 @@ define arm_aapcs_vfpcc <4 x i8> @vldrb32() {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %d = alloca [4 x i8], align 2
+  %d = alloca [4 x i8], align 4
   call arm_aapcs_vfpcc void @func(ptr %d)
   %g = getelementptr inbounds [4 x i8], ptr %d, i32 0, i32 2
   %l = load <4 x i8>, ptr %g, align 2
@@ -244,7 +244,7 @@ define arm_aapcs_vfpcc <8 x i8> @vldrb16() {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %d = alloca [8 x i8], align 2
+  %d = alloca [8 x i8], align 4
   call arm_aapcs_vfpcc void @func(ptr %d)
   %g = getelementptr inbounds [8 x i8], ptr %d, i32 0, i32 2
   %l = load <8 x i8>, ptr %g, align 2

diff  --git a/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll b/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll
index ef64a2bbb5857..b70f0ea602d0b 100644
--- a/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll
+++ b/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll
@@ -1462,7 +1462,7 @@ define zeroext i1 @_Z30atomic_cmp_swap_relaxed_stk_i1Rbb(ptr nocapture nonnull a
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
 bb:
-  %i = alloca %"struct.std::__1::atomic", align 1
+  %i = alloca %"struct.std::__1::atomic", align 8
   call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %i)
   %i3 = zext i1 %arg1 to i8
   %i4 = load i8, ptr %arg, align 1
@@ -1525,7 +1525,7 @@ define signext i8 @_Z30atomic_cmp_swap_relaxed_stk_i8Rcc(ptr nocapture nonnull a
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
 bb:
-  %i = alloca %"struct.std::__1::atomic.0", align 1
+  %i = alloca %"struct.std::__1::atomic.0", align 8
   call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %i)
   %i3 = load i8, ptr %arg, align 1
   %i4 = cmpxchg weak volatile ptr %i, i8 %i3, i8 %arg1 monotonic monotonic, align 1
@@ -1581,7 +1581,7 @@ define zeroext i8 @_Z30atomic_cmp_swap_relaxed_stk_u8Rhh(ptr nocapture nonnull a
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
 bb:
-  %i = alloca %"struct.std::__1::atomic.5", align 1
+  %i = alloca %"struct.std::__1::atomic.5", align 8
   call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %i)
   %i3 = load i8, ptr %arg, align 1
   %i4 = cmpxchg weak volatile ptr %i, i8 %i3, i8 %arg1 monotonic monotonic, align 1
@@ -1638,7 +1638,7 @@ define signext i16 @_Z31atomic_cmp_swap_relaxed_stk_i16Rss(ptr nocapture nonnull
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
 bb:
-  %i = alloca %"struct.std::__1::atomic.10", align 2
+  %i = alloca %"struct.std::__1::atomic.10", align 8
   call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %i)
   %i4 = load i16, ptr %arg, align 2
   %i5 = cmpxchg weak volatile ptr %i, i16 %i4, i16 %arg1 monotonic monotonic, align 2
@@ -1694,7 +1694,7 @@ define zeroext i16 @_Z31atomic_cmp_swap_relaxed_stk_u16Rtt(ptr nocapture nonnull
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
 bb:
-  %i = alloca %"struct.std::__1::atomic.15", align 2
+  %i = alloca %"struct.std::__1::atomic.15", align 8
   call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %i)
   %i4 = load i16, ptr %arg, align 2
   %i5 = cmpxchg weak volatile ptr %i, i16 %i4, i16 %arg1 monotonic monotonic, align 2
@@ -1741,7 +1741,7 @@ define signext i32 @_Z31atomic_cmp_swap_relaxed_stk_i32Rii(ptr nocapture nonnull
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
 bb:
-  %i = alloca %"struct.std::__1::atomic.20", align 4
+  %i = alloca %"struct.std::__1::atomic.20", align 8
   call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i)
   %i4 = load i32, ptr %arg, align 4
   %i5 = cmpxchg weak volatile ptr %i, i32 %i4, i32 %arg1 monotonic monotonic, align 4
@@ -1788,7 +1788,7 @@ define zeroext i32 @_Z31atomic_cmp_swap_relaxed_stk_u32Rjj(ptr nocapture nonnull
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
 bb:
-  %i = alloca %"struct.std::__1::atomic.25", align 4
+  %i = alloca %"struct.std::__1::atomic.25", align 8
   call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i)
   %i4 = load i32, ptr %arg, align 4
   %i5 = cmpxchg weak volatile ptr %i, i32 %i4, i32 %arg1 monotonic monotonic, align 4

diff  --git a/llvm/test/CodeGen/VE/Scalar/atomic_load.ll b/llvm/test/CodeGen/VE/Scalar/atomic_load.ll
index 82be06b8f1f44..aff4061f46075 100644
--- a/llvm/test/CodeGen/VE/Scalar/atomic_load.ll
+++ b/llvm/test/CodeGen/VE/Scalar/atomic_load.ll
@@ -521,7 +521,7 @@ define zeroext i1 @_Z26atomic_load_relaxed_stk_i1v() {
 ; CHECK-NEXT:    ld1b.zx %s0, 248(, %s11)
 ; CHECK-NEXT:    and %s0, 1, %s0
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %1 = alloca %"struct.std::__1::atomic", align 1
+  %1 = alloca %"struct.std::__1::atomic", align 8
   call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
   call void @_Z6fun_i1RNSt3__16atomicIbEE(ptr nonnull align 1 dereferenceable(1) %1)
   %2 = load atomic i8, ptr %1 monotonic, align 1
@@ -550,7 +550,7 @@ define signext i8 @_Z26atomic_load_relaxed_stk_i8v() {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    ld1b.sx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %1 = alloca %"struct.std::__1::atomic.0", align 1
+  %1 = alloca %"struct.std::__1::atomic.0", align 8
   call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
   call void @_Z6fun_i8RNSt3__16atomicIcEE(ptr nonnull align 1 dereferenceable(1) %1)
   %2 = load atomic i8, ptr %1 monotonic, align 1
@@ -571,7 +571,7 @@ define zeroext i8 @_Z26atomic_load_relaxed_stk_u8v() {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    ld1b.zx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %1 = alloca %"struct.std::__1::atomic.5", align 1
+  %1 = alloca %"struct.std::__1::atomic.5", align 8
   call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
   call void @_Z6fun_u8RNSt3__16atomicIhEE(ptr nonnull align 1 dereferenceable(1) %1)
   %2 = load atomic i8, ptr %1 monotonic, align 1
@@ -592,7 +592,7 @@ define signext i16 @_Z27atomic_load_relaxed_stk_i16v() {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    ld2b.sx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %1 = alloca %"struct.std::__1::atomic.10", align 2
+  %1 = alloca %"struct.std::__1::atomic.10", align 8
   call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %1)
   call void @_Z7fun_i16RNSt3__16atomicIsEE(ptr nonnull align 2 dereferenceable(2) %1)
   %2 = load atomic i16, ptr %1 monotonic, align 2
@@ -613,7 +613,7 @@ define zeroext i16 @_Z27atomic_load_relaxed_stk_u16v() {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    ld2b.zx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %1 = alloca %"struct.std::__1::atomic.15", align 2
+  %1 = alloca %"struct.std::__1::atomic.15", align 8
   call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %1)
   call void @_Z7fun_u16RNSt3__16atomicItEE(ptr nonnull align 2 dereferenceable(2) %1)
   %2 = load atomic i16, ptr %1 monotonic, align 2
@@ -634,7 +634,7 @@ define signext i32 @_Z27atomic_load_relaxed_stk_i32v() {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    ldl.sx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %1 = alloca %"struct.std::__1::atomic.20", align 4
+  %1 = alloca %"struct.std::__1::atomic.20", align 8
   call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1)
   call void @_Z7fun_i32RNSt3__16atomicIiEE(ptr nonnull align 4 dereferenceable(4) %1)
   %2 = load atomic i32, ptr %1 monotonic, align 4
@@ -655,7 +655,7 @@ define zeroext i32 @_Z27atomic_load_relaxed_stk_u32v() {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    ldl.zx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %1 = alloca %"struct.std::__1::atomic.25", align 4
+  %1 = alloca %"struct.std::__1::atomic.25", align 8
   call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1)
   call void @_Z7fun_u32RNSt3__16atomicIjEE(ptr nonnull align 4 dereferenceable(4) %1)
   %2 = load atomic i32, ptr %1 monotonic, align 4

diff  --git a/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll b/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
index 87017db2af112..f241906888356 100644
--- a/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
+++ b/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
@@ -723,7 +723,7 @@ define zeroext i1 @_Z26atomic_swap_relaxed_stk_i1b(i1 zeroext %0) {
 ; CHECK-NEXT:    and %s0, 1, %s0
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = alloca %"struct.std::__1::atomic", align 1
+  %2 = alloca %"struct.std::__1::atomic", align 8
   call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2)
   %3 = zext i1 %0 to i8
   %4 = atomicrmw volatile xchg ptr %2, i8 %3 monotonic
@@ -751,7 +751,7 @@ define signext i8 @_Z26atomic_swap_relaxed_stk_i8c(i8 signext %0) {
 ; CHECK-NEXT:    sra.l %s0, %s0, 56
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = alloca %"struct.std::__1::atomic.0", align 1
+  %2 = alloca %"struct.std::__1::atomic.0", align 8
   call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2)
   %3 = atomicrmw volatile xchg ptr %2, i8 %0 monotonic
   call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %2)
@@ -769,7 +769,7 @@ define zeroext i8 @_Z26atomic_swap_relaxed_stk_u8h(i8 zeroext %0) {
 ; CHECK-NEXT:    and %s0, %s0, (56)0
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = alloca %"struct.std::__1::atomic.5", align 1
+  %2 = alloca %"struct.std::__1::atomic.5", align 8
   call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2)
   %3 = atomicrmw volatile xchg ptr %2, i8 %0 monotonic
   call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %2)
@@ -788,7 +788,7 @@ define signext i16 @_Z27atomic_swap_relaxed_stk_i16s(i16 signext %0) {
 ; CHECK-NEXT:    sra.l %s0, %s0, 48
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = alloca %"struct.std::__1::atomic.10", align 2
+  %2 = alloca %"struct.std::__1::atomic.10", align 8
   call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %2)
   %3 = atomicrmw volatile xchg ptr %2, i16 %0 monotonic
   call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %2)
@@ -806,7 +806,7 @@ define zeroext i16 @_Z27atomic_swap_relaxed_stk_u16t(i16 zeroext %0) {
 ; CHECK-NEXT:    and %s0, %s0, (48)0
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = alloca %"struct.std::__1::atomic.15", align 2
+  %2 = alloca %"struct.std::__1::atomic.15", align 8
   call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %2)
   %3 = atomicrmw volatile xchg ptr %2, i16 %0 monotonic
   call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %2)
@@ -821,7 +821,7 @@ define signext i32 @_Z27atomic_swap_relaxed_stk_i32i(i32 signext %0) {
 ; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = alloca %"struct.std::__1::atomic.20", align 4
+  %2 = alloca %"struct.std::__1::atomic.20", align 8
   call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %2)
   %3 = atomicrmw volatile xchg ptr %2, i32 %0 monotonic
   call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %2)
@@ -836,7 +836,7 @@ define zeroext i32 @_Z27atomic_swap_relaxed_stk_u32j(i32 zeroext %0) {
 ; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = alloca %"struct.std::__1::atomic.25", align 4
+  %2 = alloca %"struct.std::__1::atomic.25", align 8
   call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %2)
   %3 = atomicrmw volatile xchg ptr %2, i32 %0 monotonic
   call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %2)

diff  --git a/llvm/test/CodeGen/WebAssembly/PR40172.ll b/llvm/test/CodeGen/WebAssembly/PR40172.ll
index f409b99c0c3bb..ed1630c4926d4 100644
--- a/llvm/test/CodeGen/WebAssembly/PR40172.ll
+++ b/llvm/test/CodeGen/WebAssembly/PR40172.ll
@@ -15,7 +15,7 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK:  i32.store8 8($[[BASE]]), $[[A1]]{{$}}
 
 define void @test(i8 %byte) {
-  %t = alloca { i8, i8 }, align 1
+  %t = alloca { i8, i8 }, align 8
   %x4 = and i8 %byte, 1
   %x5 = icmp eq i8 %x4, 1
   %x6 = and i8 %byte, 2

diff  --git a/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll b/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
index c2f5ff9530ba9..deba5a8fe6727 100644
--- a/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
+++ b/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
@@ -50,8 +50,8 @@
 ; Function Attrs: uwtable
 define void @_Z3barii(i32 %param1, i32 %param2) #0 !dbg !24 {
 entry:
-  %var1 = alloca %struct.AAA3, align 1
-  %var2 = alloca %struct.AAA3, align 1
+  %var1 = alloca %struct.AAA3, align 8
+  %var2 = alloca %struct.AAA3, align 8
   tail call void @llvm.dbg.value(metadata i32 %param1, i64 0, metadata !29, metadata !46), !dbg !47
   tail call void @llvm.dbg.value(metadata i32 %param2, i64 0, metadata !30, metadata !46), !dbg !48
   tail call void @llvm.dbg.value(metadata ptr null, i64 0, metadata !31, metadata !46), !dbg !49

diff  --git a/llvm/test/CodeGen/X86/fast-isel-call.ll b/llvm/test/CodeGen/X86/fast-isel-call.ll
index 9fec09818c743..a9d2bff923a44 100644
--- a/llvm/test/CodeGen/X86/fast-isel-call.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-call.ll
@@ -59,7 +59,7 @@ define void @test4(ptr %a, ptr %b) {
 %struct.S = type { i8 }
 define void @test5() {
 entry:
-  %s = alloca %struct.S, align 1
+  %s = alloca %struct.S, align 8
 ; CHECK-LABEL: test5:
 ; CHECK: subl $12, %esp
 ; CHECK: leal 8(%esp), %ecx

diff  --git a/llvm/test/CodeGen/X86/load-local-v3i129.ll b/llvm/test/CodeGen/X86/load-local-v3i129.ll
index 090f8d972094c..8fa7ce0664537 100644
--- a/llvm/test/CodeGen/X86/load-local-v3i129.ll
+++ b/llvm/test/CodeGen/X86/load-local-v3i129.ll
@@ -29,7 +29,7 @@ define void @_start() nounwind {
 ; SLOW-SHLD-NEXT:    movq $-1, -48(%rsp)
 ; SLOW-SHLD-NEXT:    retq
 Entry:
-  %y = alloca <3 x i129>, align 4
+  %y = alloca <3 x i129>, align 16
   %L = load <3 x i129>, ptr %y
   %I1 = insertelement <3 x i129> %L, i129 340282366920938463463374607431768211455, i32 1
   store <3 x i129> %I1, ptr %y

diff  --git a/llvm/test/CodeGen/X86/pr44140.ll b/llvm/test/CodeGen/X86/pr44140.ll
index 68ac3663a4cd8..a218c9d4dcea8 100644
--- a/llvm/test/CodeGen/X86/pr44140.ll
+++ b/llvm/test/CodeGen/X86/pr44140.ll
@@ -59,7 +59,7 @@ start:
   %dummy1 = alloca [22 x i64], align 8
   %dummy2 = alloca [22 x i64], align 8
 
-  %data = alloca <2 x i64>, align 8
+  %data = alloca <2 x i64>, align 16
 
   br label %fake-loop
 

diff  --git a/llvm/test/CodeGen/X86/ssp-data-layout.ll b/llvm/test/CodeGen/X86/ssp-data-layout.ll
index 0a08582822e27..bda2598384db8 100644
--- a/llvm/test/CodeGen/X86/ssp-data-layout.ll
+++ b/llvm/test/CodeGen/X86/ssp-data-layout.ll
@@ -93,14 +93,14 @@ entry:
   %y = alloca i32, align 4
   %z = alloca i32, align 4
   %ptr = alloca i32, align 4
-  %small2 = alloca [2 x i16], align 2
+  %small2 = alloca [2 x i16], align 4
   %large2 = alloca [8 x i32], align 16
-  %small = alloca [2 x i8], align 1
-  %large = alloca [8 x i8], align 1
-  %a = alloca %struct.struct_large_char, align 1
-  %b = alloca %struct.struct_small_char, align 1
+  %small = alloca [2 x i8], align 2
+  %large = alloca [8 x i8], align 8
+  %a = alloca %struct.struct_large_char, align 8
+  %b = alloca %struct.struct_small_char, align 8
   %c = alloca %struct.struct_large_nonchar, align 8
-  %d = alloca %struct.struct_small_nonchar, align 2
+  %d = alloca %struct.struct_small_nonchar, align 8
   %call = call i32 @get_scalar1()
   store i32 %call, ptr %x, align 4
   call void @end_scalar1()
@@ -217,12 +217,12 @@ entry:
   %ptr = alloca i32, align 4
   %small2 = alloca [2 x i16], align 2
   %large2 = alloca [8 x i32], align 16
-  %small = alloca [2 x i8], align 1
-  %large = alloca [8 x i8], align 1
-  %a = alloca %struct.struct_large_char, align 1
-  %b = alloca %struct.struct_small_char, align 1
+  %small = alloca [2 x i8], align 2
+  %large = alloca [8 x i8], align 8
+  %a = alloca %struct.struct_large_char, align 8
+  %b = alloca %struct.struct_small_char, align 8
   %c = alloca %struct.struct_large_nonchar, align 8
-  %d = alloca %struct.struct_small_nonchar, align 2
+  %d = alloca %struct.struct_small_nonchar, align 8
   %call = call i32 @get_scalar1()
   store i32 %call, ptr %x, align 4
   call void @end_scalar1()
@@ -325,14 +325,14 @@ entry:
   %y = alloca i32, align 4
   %z = alloca i32, align 4
   %ptr = alloca i32, align 4
-  %small2 = alloca [2 x i16], align 2
+  %small2 = alloca [2 x i16], align 4
   %large2 = alloca [8 x i32], align 16
-  %small = alloca [2 x i8], align 1
-  %large = alloca [8 x i8], align 1
-  %a = alloca %struct.struct_large_char, align 1
-  %b = alloca %struct.struct_small_char, align 1
+  %small = alloca [2 x i8], align 2
+  %large = alloca [8 x i8], align 8
+  %a = alloca %struct.struct_large_char, align 8
+  %b = alloca %struct.struct_small_char, align 8
   %c = alloca %struct.struct_large_nonchar, align 8
-  %d = alloca %struct.struct_small_nonchar, align 2
+  %d = alloca %struct.struct_small_nonchar, align 8
   %call = call i32 @get_scalar1()
   store i32 %call, ptr %x, align 4
   call void @end_scalar1()

diff  --git a/llvm/test/CodeGen/X86/win-cleanuppad.ll b/llvm/test/CodeGen/X86/win-cleanuppad.ll
index 4d420f5fc3cd1..452f0a8e36d8d 100644
--- a/llvm/test/CodeGen/X86/win-cleanuppad.ll
+++ b/llvm/test/CodeGen/X86/win-cleanuppad.ll
@@ -58,8 +58,8 @@ declare x86_thiscallcc void @"\01??1Dtor@@QAE at XZ"(ptr) #1
 
 define void @nested_cleanup() #0 personality ptr @__CxxFrameHandler3 {
 entry:
-  %o1 = alloca %struct.Dtor, align 1
-  %o2 = alloca %struct.Dtor, align 1
+  %o1 = alloca %struct.Dtor, align 8
+  %o2 = alloca %struct.Dtor, align 8
   invoke void @f(i32 1)
           to label %invoke.cont unwind label %cleanup.outer
 

diff  --git a/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll b/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
index 1451a64d1ef7d..017dc3c0582ec 100644
--- a/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
+++ b/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
@@ -13,8 +13,8 @@ define void @test1(i1 %cmp) align 2 {
 ; CHECK-NEXT:    subq $40, %rsp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    testb $1, %dil
-; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rax
-; CHECK-NEXT:    movq %rsp, %rcx
+; CHECK-NEXT:    movq %rsp, %rax
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
 ; CHECK-NEXT:    cmovneq %rax, %rcx
 ; CHECK-NEXT:    movups (%rcx), %xmm0
 ; CHECK-NEXT:    callq _sink
@@ -36,8 +36,8 @@ define void @test2(i1 %cmp) align 2 {
 ; CHECK-NEXT:    subq $40, %rsp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    testb $1, %dil
-; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rax
-; CHECK-NEXT:    movq %rsp, %rcx
+; CHECK-NEXT:    movq %rsp, %rax
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
 ; CHECK-NEXT:    cmovneq %rax, %rcx
 ; CHECK-NEXT:    movaps (%rcx), %xmm0
 ; CHECK-NEXT:    callq _sink

diff  --git a/llvm/test/DebugInfo/AArch64/frameindices.ll b/llvm/test/DebugInfo/AArch64/frameindices.ll
index b53fbf6fd0883..8f736a07034cf 100644
--- a/llvm/test/DebugInfo/AArch64/frameindices.ll
+++ b/llvm/test/DebugInfo/AArch64/frameindices.ll
@@ -86,7 +86,7 @@ entry:
 define void @_Z3f16v() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg !68 {
 entry:
   %agg.tmp.i.i = alloca %struct.A, align 8
-  %d = alloca %struct.B, align 1
+  %d = alloca %struct.B, align 8
   %agg.tmp.sroa.2 = alloca [15 x i8], align 1
   %agg.tmp.sroa.4 = alloca [7 x i8], align 1
   tail call void @llvm.dbg.declare(metadata [15 x i8]* %agg.tmp.sroa.2, metadata !56, metadata !74), !dbg !75

diff  --git a/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll b/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll
index 65feae836ea7a..8d7608ead38ed 100644
--- a/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll
+++ b/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll
@@ -221,7 +221,7 @@
 ; Function Attrs: noinline nounwind uwtable
 define void @use_dbg_declare() #0 !dbg !7 {
 entry:
-  %o = alloca %struct.Foo, align 4
+  %o = alloca %struct.Foo, align 8
   call void @llvm.dbg.declare(metadata ptr %o, metadata !10, metadata !15), !dbg !16
   call void @escape_foo(ptr %o), !dbg !17
   ret void, !dbg !18

diff  --git a/llvm/test/DebugInfo/X86/dbg-addr.ll b/llvm/test/DebugInfo/X86/dbg-addr.ll
index 0c6a54a65f3a4..d6c8e4bf1ce2e 100644
--- a/llvm/test/DebugInfo/X86/dbg-addr.ll
+++ b/llvm/test/DebugInfo/X86/dbg-addr.ll
@@ -44,7 +44,7 @@ target triple = "x86_64--linux"
 ; Function Attrs: noinline nounwind uwtable
 define void @use_dbg_addr() #0 !dbg !7 {
 entry:
-  %o = alloca %struct.Foo, align 4
+  %o = alloca %struct.Foo, align 8
   call void @llvm.dbg.addr(metadata ptr %o, metadata !10, metadata !15), !dbg !16
   call void @escape_foo(ptr %o), !dbg !17
   ret void, !dbg !18
@@ -52,7 +52,7 @@ entry:
 
 define void @test_dbg_addr_and_dbg_val_undef() #0 !dbg !117 {
 entry:
-  %o = alloca %struct.Foo, align 4
+  %o = alloca %struct.Foo, align 8
   call void @llvm.dbg.addr(metadata ptr %o, metadata !1110, metadata !1115), !dbg !1116
   call void @escape_foo(ptr %o), !dbg !1117
   call void @llvm.dbg.value(metadata ptr undef, metadata !1110, metadata !1115), !dbg !1116

diff  --git a/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll b/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll
index 7fd345e322b62..40188f505b182 100644
--- a/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll
+++ b/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll
@@ -23,7 +23,7 @@ target triple = "x86_64--linux"
 ; Function Attrs: noinline nounwind uwtable
 define void @use_dbg_declare() #0 !dbg !7 {
 entry:
-  %o = alloca %struct.Foo, align 4
+  %o = alloca %struct.Foo, align 8
   call void @llvm.dbg.declare(metadata ptr %o, metadata !10, metadata !15), !dbg !16
   call void @escape_foo(ptr %o), !dbg !17
   ret void, !dbg !18

diff  --git a/llvm/test/DebugInfo/X86/sret.ll b/llvm/test/DebugInfo/X86/sret.ll
index 644f0c6bcd25f..017fc968d4276 100644
--- a/llvm/test/DebugInfo/X86/sret.ll
+++ b/llvm/test/DebugInfo/X86/sret.ll
@@ -102,7 +102,7 @@ entry:
 define void @_ZN1B9AInstanceEv(ptr noalias sret(%class.A) %agg.result, ptr %this) #2 align 2 !dbg !53 {
 entry:
   %this.addr = alloca ptr, align 8
-  %nrvo = alloca i1
+  %nrvo = alloca i1, align 1
   %cleanup.dest.slot = alloca i32
   store ptr %this, ptr %this.addr, align 8
   call void @llvm.dbg.declare(metadata ptr %this.addr, metadata !89, metadata !DIExpression()), !dbg !91
@@ -139,7 +139,7 @@ entry:
   %retval = alloca i32, align 4
   %argc.addr = alloca i32, align 4
   %argv.addr = alloca ptr, align 8
-  %b = alloca %class.B, align 1
+  %b = alloca %class.B, align 8
   %return_val = alloca i32, align 4
   %temp.lvalue = alloca %class.A, align 8
   %exn.slot = alloca ptr
@@ -226,7 +226,7 @@ define linkonce_odr void @_ZN1AD0Ev(ptr %this) unnamed_addr #2 align 2 personali
 entry:
   %this.addr = alloca ptr, align 8
   %exn.slot = alloca ptr
-  %ehselector.slot = alloca i32
+  %ehselector.slot = alloca i32, align 4
   store ptr %this, ptr %this.addr, align 8
   call void @llvm.dbg.declare(metadata ptr %this.addr, metadata !126, metadata !DIExpression()), !dbg !127
   %this1 = load ptr, ptr %this.addr

diff  --git a/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll b/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll
index 65a6d3edbf622..989abb363c501 100644
--- a/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll
+++ b/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll
@@ -86,17 +86,17 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define dso_local noundef i32 @_Z3funii(i32 noundef %a, i32 noundef %b) local_unnamed_addr #0 !dbg !17 {
 entry:
-  %a.addr = alloca i64, align 4, !DIAssignID !58 ; VAR:a
+  %a.addr = alloca i64, align 8, !DIAssignID !58 ; VAR:a
   call void @llvm.dbg.assign(metadata i1 undef, metadata !21, metadata !DIExpression(), metadata !58, metadata ptr %a.addr, metadata !DIExpression()), !dbg !27 ; VAR:a
-  %b.addr = alloca i64, align 4, !DIAssignID !64 ; VAR:b
+  %b.addr = alloca i64, align 8, !DIAssignID !64 ; VAR:b
   call void @llvm.dbg.assign(metadata i1 undef, metadata !22, metadata !DIExpression(), metadata !64, metadata ptr %b.addr, metadata !DIExpression()), !dbg !27 ; VAR:b
-  %c.addr = alloca i64, align 4, !DIAssignID !68 ; VAR:c
+  %c.addr = alloca i64, align 8, !DIAssignID !68 ; VAR:c
   call void @llvm.dbg.assign(metadata i1 undef, metadata !67, metadata !DIExpression(), metadata !68, metadata ptr %c.addr, metadata !DIExpression()), !dbg !27 ; VAR:c
-  %d.addr = alloca i64, align 4, !DIAssignID !73 ; VAR:d
+  %d.addr = alloca i64, align 8, !DIAssignID !73 ; VAR:d
   call void @llvm.dbg.assign(metadata i1 undef, metadata !72, metadata !DIExpression(), metadata !73, metadata ptr %d.addr, metadata !DIExpression()), !dbg !27 ; VAR:d
-  %e.addr = alloca i64, align 4, !DIAssignID !76 ; VAR:e
+  %e.addr = alloca i64, align 8, !DIAssignID !76 ; VAR:e
   call void @llvm.dbg.assign(metadata i1 undef, metadata !75, metadata !DIExpression(), metadata !76, metadata ptr %e.addr, metadata !DIExpression()), !dbg !27 ; VAR:e
-  ;%f.addr = alloca i64, align 4, !DIAssignID !80 ; VAR:f
+  ;%f.addr = alloca i64, align 8, !DIAssignID !80 ; VAR:f
   ;call void @llvm.dbg.assign(metadata i1 undef, metadata !79, metadata !DIExpression(), metadata !80, metadata ptr %f.addr, metadata !DIExpression()), !dbg !27 ; VAR:f
   store i64 1, ptr %a.addr, !DIAssignID !70 ; VAR:a
   call void @llvm.dbg.assign(metadata i64 1, metadata !21, metadata !DIExpression(), metadata !70, metadata ptr %a.addr, metadata !DIExpression()), !dbg !27 ; VAR:a


        


More information about the llvm-commits mailing list