[llvm] 38f1abe - Revert "[SelectionDAG] Do not second-guess alignment for alloca"
Ron Lieberman via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 15 08:55:38 PST 2022
Author: Ron Lieberman
Date: 2022-12-15T10:55:18-06:00
New Revision: 38f1abef860418f85b1011601d20e9d49aa85adb
URL: https://github.com/llvm/llvm-project/commit/38f1abef860418f85b1011601d20e9d49aa85adb
DIFF: https://github.com/llvm/llvm-project/commit/38f1abef860418f85b1011601d20e9d49aa85adb.diff
LOG: Revert "[SelectionDAG] Do not second-guess alignment for alloca"
Breaks amdgpu buildbot https://lab.llvm.org/buildbot/#/builders/193
23491
This reverts commit ffedf47d8b793e07317f82f9c2a5f5425ebb71ad.
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
llvm/test/CodeGen/AArch64/preferred-alignment.ll
llvm/test/CodeGen/AArch64/seh-finally.ll
llvm/test/CodeGen/AMDGPU/call-argument-types.ll
llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
llvm/test/CodeGen/ARM/ssp-data-layout.ll
llvm/test/CodeGen/BPF/pr57872.ll
llvm/test/CodeGen/BPF/undef.ll
llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
llvm/test/CodeGen/Mips/atomic64.ll
llvm/test/CodeGen/Mips/cconv/byval.ll
llvm/test/CodeGen/Mips/cconv/return-struct.ll
llvm/test/CodeGen/Mips/largeimmprinting.ll
llvm/test/CodeGen/Mips/o32_cc_byval.ll
llvm/test/CodeGen/NVPTX/lower-byval-args.ll
llvm/test/CodeGen/PowerPC/aix-cc-byval.ll
llvm/test/CodeGen/PowerPC/aix-sret-param.ll
llvm/test/CodeGen/PowerPC/byval.ll
llvm/test/CodeGen/PowerPC/structsinregs.ll
llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
llvm/test/CodeGen/RISCV/frame.ll
llvm/test/CodeGen/RISCV/mem64.ll
llvm/test/CodeGen/RISCV/vararg.ll
llvm/test/CodeGen/Thumb2/mve-stack.ll
llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll
llvm/test/CodeGen/VE/Scalar/atomic_load.ll
llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
llvm/test/CodeGen/WebAssembly/PR40172.ll
llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
llvm/test/CodeGen/X86/fast-isel-call.ll
llvm/test/CodeGen/X86/load-local-v3i129.ll
llvm/test/CodeGen/X86/pr44140.ll
llvm/test/CodeGen/X86/ssp-data-layout.ll
llvm/test/CodeGen/X86/win-cleanuppad.ll
llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
llvm/test/DebugInfo/AArch64/frameindices.ll
llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll
llvm/test/DebugInfo/X86/dbg-addr.ll
llvm/test/DebugInfo/X86/dbg-declare-alloca.ll
llvm/test/DebugInfo/X86/sret.ll
llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 3a1a23713f450..3e59d0d2b753d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -128,7 +128,20 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
for (const Instruction &I : BB) {
if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
Type *Ty = AI->getAllocatedType();
- Align Alignment = AI->getAlign();
+ Align TyPrefAlign = MF->getDataLayout().getPrefTypeAlign(Ty);
+ // The "specified" alignment is the alignment written on the alloca,
+ // or the preferred alignment of the type if none is specified.
+ //
+ // (Unspecified alignment on allocas will be going away soon.)
+ Align SpecifiedAlign = AI->getAlign();
+
+ // If the preferred alignment of the type is higher than the specified
+ // alignment of the alloca, promote the alignment, as long as it doesn't
+ // require realigning the stack.
+ //
+ // FIXME: Do we really want to second-guess the IR in isel?
+ Align Alignment =
+ std::max(std::min(TyPrefAlign, StackAlign), SpecifiedAlign);
// Static allocas can be folded into the initial stack frame
// adjustment. For targets that don't realign the stack, don't
diff --git a/llvm/test/CodeGen/AArch64/preferred-alignment.ll b/llvm/test/CodeGen/AArch64/preferred-alignment.ll
index c430b08a62ad0..ffff7e1d02fb3 100644
--- a/llvm/test/CodeGen/AArch64/preferred-alignment.ll
+++ b/llvm/test/CodeGen/AArch64/preferred-alignment.ll
@@ -3,33 +3,13 @@
; Function Attrs: nounwind
define i32 @foo() #0 {
entry:
- %c = alloca i8
+ %c = alloca i8, align 1
; CHECK: add x0, sp, #12
- %s = alloca i16
-; CHECK-NEXT: add x1, sp, #8
- %i = alloca i32
-; CHECK-NEXT: add x2, sp, #4
- %call = call i32 @baz(i8* %c, i16* %s, i32* %i)
- %0 = load i8, i8* %c, align 1
- %conv = zext i8 %0 to i32
- %add = add nsw i32 %call, %conv
- %1 = load i16, i16* %s, align 2
- %conv1 = sext i16 %1 to i32
- %add2 = add nsw i32 %add, %conv1
- %2 = load i32, i32* %i, align 4
- %add3 = add nsw i32 %add2, %2
- ret i32 %add3
-}
-
-define i32 @bar() #0 {
-entry:
- %c = alloca i8, align 4
-; CHECK: add x0, sp, #12
- %s = alloca i16, align 4
+ %s = alloca i16, align 2
; CHECK-NEXT: add x1, sp, #8
%i = alloca i32, align 4
; CHECK-NEXT: add x2, sp, #4
- %call = call i32 @baz(i8* %c, i16* %s, i32* %i)
+ %call = call i32 @bar(i8* %c, i16* %s, i32* %i)
%0 = load i8, i8* %c, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %call, %conv
@@ -41,7 +21,7 @@ entry:
ret i32 %add3
}
-declare i32 @baz(i8*, i16*, i32*) #1
+declare i32 @bar(i8*, i16*, i32*) #1
attributes #0 = { nounwind "frame-pointer"="none" }
attributes #1 = { "frame-pointer"="none" }
diff --git a/llvm/test/CodeGen/AArch64/seh-finally.ll b/llvm/test/CodeGen/AArch64/seh-finally.ll
index 4cf7c535de78d..72487e5bf4d7a 100644
--- a/llvm/test/CodeGen/AArch64/seh-finally.ll
+++ b/llvm/test/CodeGen/AArch64/seh-finally.ll
@@ -42,7 +42,7 @@ entry:
; CHECK: ldur w0, [x29, #-8]
; CHECK: bl foo
- %o = alloca %struct.S, align 8
+ %o = alloca %struct.S, align 4
call void (...) @llvm.localescape(%struct.S* %o)
%x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0
%0 = load i32, i32* %x, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
index 1d00ab14c0d85..6f16dcb3c47da 100644
--- a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
@@ -671,7 +671,7 @@ define amdgpu_kernel void @test_call_external_void_func_struct_i8_i32() #0 {
; GCN-NEXT: s_swappc_b64
; GCN-NOT: [[SP]]
define amdgpu_kernel void @test_call_external_void_func_byval_struct_i8_i32() #0 {
- %val = alloca { i8, i32 }, align 8, addrspace(5)
+ %val = alloca { i8, i32 }, align 4, addrspace(5)
%gep0 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %val, i32 0, i32 0
%gep1 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %val, i32 0, i32 1
store i8 3, ptr addrspace(5) %gep0
@@ -702,8 +702,8 @@ define amdgpu_kernel void @test_call_external_void_func_byval_struct_i8_i32() #0
; GCN: buffer_store_byte [[LOAD_OUT_VAL0]], off
; GCN: buffer_store_dword [[LOAD_OUT_VAL1]], off
define amdgpu_kernel void @test_call_external_void_func_sret_struct_i8_i32_byval_struct_i8_i32(i32) #0 {
- %in.val = alloca { i8, i32 }, align 8, addrspace(5)
- %out.val = alloca { i8, i32 }, align 8, addrspace(5)
+ %in.val = alloca { i8, i32 }, align 4, addrspace(5)
+ %out.val = alloca { i8, i32 }, align 4, addrspace(5)
%in.gep0 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %in.val, i32 0, i32 0
%in.gep1 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %in.val, i32 0, i32 1
store i8 3, ptr addrspace(5) %in.gep0
diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
index 7b42f22f454f4..77fe9d3817c61 100644
--- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
+++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
@@ -291,7 +291,7 @@ bb5:
; GCN: ds_write_b32 v{{[0-9]+}}, [[PTR]]
define void @alloca_ptr_nonentry_block(i32 %arg0) #0 {
- %alloca0 = alloca { i8, i32 }, align 8, addrspace(5)
+ %alloca0 = alloca { i8, i32 }, align 4, addrspace(5)
%cmp = icmp eq i32 %arg0, 0
br i1 %cmp, label %bb, label %ret
diff --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
index 4a95474ffef26..ab8efa9f21a0b 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
@@ -11098,7 +11098,7 @@ entry:
%tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo)
; allocate enough scratch to go beyond 2^12 addressing
- %scratch = alloca <1280 x i32>, align 16, addrspace(5)
+ %scratch = alloca <1280 x i32>, align 8, addrspace(5)
; load VGPR data
%aptr = getelementptr <64 x i32>, <64 x i32> addrspace(1)* %in, i32 %tid
diff --git a/llvm/test/CodeGen/ARM/ssp-data-layout.ll b/llvm/test/CodeGen/ARM/ssp-data-layout.ll
index cb3978e57ff43..ccae951d5c6cf 100644
--- a/llvm/test/CodeGen/ARM/ssp-data-layout.ll
+++ b/llvm/test/CodeGen/ARM/ssp-data-layout.ll
@@ -452,8 +452,8 @@ entry:
; CHECK: bl get_struct_large_char2
; CHECK: strb r0, [sp, #106]
; CHECK: bl end_struct_large_char2
- %a = alloca %struct.struct_small_char, align 4
- %b = alloca %struct.struct_large_char2, align 4
+ %a = alloca %struct.struct_small_char, align 1
+ %b = alloca %struct.struct_large_char2, align 1
%d1 = alloca %struct.struct_large_nonchar, align 8
%d2 = alloca %struct.struct_small_nonchar, align 2
%call = call signext i8 @get_struct_small_char()
diff --git a/llvm/test/CodeGen/BPF/pr57872.ll b/llvm/test/CodeGen/BPF/pr57872.ll
index 34f9975d6f28b..a9162496c9c15 100644
--- a/llvm/test/CodeGen/BPF/pr57872.ll
+++ b/llvm/test/CodeGen/BPF/pr57872.ll
@@ -180,7 +180,7 @@ define void @foo(ptr %g) {
; CHECK-NEXT: call bar
; CHECK-NEXT: exit
entry:
- %event = alloca %struct.event, align 8
+ %event = alloca %struct.event, align 1
%hostname = getelementptr inbounds %struct.event, ptr %event, i64 0, i32 1
%0 = load ptr, ptr %g, align 8
call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(84) %hostname, ptr noundef nonnull align 1 dereferenceable(84) %0, i64 84, i1 false)
diff --git a/llvm/test/CodeGen/BPF/undef.ll b/llvm/test/CodeGen/BPF/undef.ll
index 1f1a246fac525..099c2f8ac7649 100644
--- a/llvm/test/CodeGen/BPF/undef.ll
+++ b/llvm/test/CodeGen/BPF/undef.ll
@@ -40,7 +40,7 @@ define i32 @ebpf_filter(%struct.__sk_buff* nocapture readnone %ebpf_packet) #0 s
; CHECK: r1 = routing
; CHECK: call bpf_map_lookup_elem
; CHECK: exit
- %key = alloca %struct.routing_key_2, align 8
+ %key = alloca %struct.routing_key_2, align 1
%1 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 0
store i8 5, i8* %1, align 1
%2 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 1
diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
index 86a55a0af49f2..a7a537e68a7eb 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
@@ -10,8 +10,8 @@ entry:
; CHECK-LABEL: foobar:
%retval = alloca i32, align 4
%x.addr = alloca i32, align 4
- %a = alloca %struct.x, align 8
- %c = alloca %struct.x*, align 8
+ %a = alloca %struct.x, align 4
+ %c = alloca %struct.x*, align 4
store i32 %x, i32* %x.addr, align 4
%x1 = getelementptr inbounds %struct.x, %struct.x* %a, i32 0, i32 0
%0 = load i32, i32* %x.addr, align 4
diff --git a/llvm/test/CodeGen/Mips/atomic64.ll b/llvm/test/CodeGen/Mips/atomic64.ll
index e8e124c4b88ac..d27c9ac42e059 100644
--- a/llvm/test/CodeGen/Mips/atomic64.ll
+++ b/llvm/test/CodeGen/Mips/atomic64.ll
@@ -1145,7 +1145,7 @@ define i64 @AtomicSwap64(i64 signext %newval) nounwind {
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: daddiu $sp, $sp, 16
entry:
- %newval.addr = alloca i64, align 8
+ %newval.addr = alloca i64, align 4
store i64 %newval, i64* %newval.addr, align 4
%tmp = load i64, i64* %newval.addr, align 4
%0 = atomicrmw xchg i64* @x, i64 %tmp monotonic
@@ -1359,7 +1359,7 @@ define i64 @AtomicCmpSwap64(i64 signext %oldval, i64 signext %newval) nounwind {
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: daddiu $sp, $sp, 16
entry:
- %newval.addr = alloca i64, align 8
+ %newval.addr = alloca i64, align 4
store i64 %newval, i64* %newval.addr, align 4
%tmp = load i64, i64* %newval.addr, align 4
%0 = cmpxchg i64* @x, i64 %oldval, i64 %tmp monotonic monotonic
diff --git a/llvm/test/CodeGen/Mips/cconv/byval.ll b/llvm/test/CodeGen/Mips/cconv/byval.ll
index 5a390feaca059..5d77107d5966a 100644
--- a/llvm/test/CodeGen/Mips/cconv/byval.ll
+++ b/llvm/test/CodeGen/Mips/cconv/byval.ll
@@ -151,7 +151,7 @@ define dso_local void @g() #0 {
; N64-NEXT: jr $ra
; N64-NEXT: daddu $sp, $sp, $1
entry:
- %a = alloca %struct.S1, align 8
+ %a = alloca %struct.S1, align 4
call void @f2(%struct.S1* byval(%struct.S1) align 4 %a)
ret void
}
@@ -340,8 +340,8 @@ define dso_local void @g2(%struct.S1* %a) {
; N64-NEXT: jr $ra
; N64-NEXT: daddu $sp, $sp, $1
entry:
- %a.addr = alloca %struct.S1*
- %byval-temp = alloca %struct.S1, align 8
+ %a.addr = alloca %struct.S1*, align 4
+ %byval-temp = alloca %struct.S1, align 4
store %struct.S1* %a, %struct.S1** %a.addr, align 4
%0 = load %struct.S1*, %struct.S1** %a.addr, align 4
%1 = bitcast %struct.S1* %byval-temp to i8*
@@ -412,8 +412,8 @@ define dso_local i32 @g3(%struct.S1* %a, %struct.S1* %b) #0 {
; N64-NEXT: jr $ra
; N64-NEXT: daddiu $sp, $sp, 32
entry:
- %a.addr = alloca %struct.S1*
- %b.addr = alloca %struct.S1*
+ %a.addr = alloca %struct.S1*, align 4
+ %b.addr = alloca %struct.S1*, align 4
store %struct.S1* %a, %struct.S1** %a.addr, align 4
store %struct.S1* %b, %struct.S1** %b.addr, align 4
%0 = load %struct.S1*, %struct.S1** %a.addr, align 4
diff --git a/llvm/test/CodeGen/Mips/cconv/return-struct.ll b/llvm/test/CodeGen/Mips/cconv/return-struct.ll
index 99a0e08b947ea..49964d69a70d5 100644
--- a/llvm/test/CodeGen/Mips/cconv/return-struct.ll
+++ b/llvm/test/CodeGen/Mips/cconv/return-struct.ll
@@ -139,7 +139,7 @@ define inreg {i16} @ret_struct_i16() nounwind {
; N64-LE-NEXT: jr $ra
; N64-LE-NEXT: daddiu $sp, $sp, 16
entry:
- %retval = alloca {i8,i8}, align 8
+ %retval = alloca {i8,i8}, align 1
%0 = bitcast {i8,i8}* %retval to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds ({i8,i8}, {i8,i8}* @struct_2byte, i32 0, i32 0), i64 2, i1 false)
%1 = bitcast {i8,i8}* %retval to {i16}*
diff --git a/llvm/test/CodeGen/Mips/largeimmprinting.ll b/llvm/test/CodeGen/Mips/largeimmprinting.ll
index edece72c2c358..1d5b9c47b7df2 100644
--- a/llvm/test/CodeGen/Mips/largeimmprinting.ll
+++ b/llvm/test/CodeGen/Mips/largeimmprinting.ll
@@ -24,7 +24,7 @@ entry:
; 64: daddu $[[R1]], $sp, $[[R1]]
; 64: sd $ra, 24($[[R1]])
- %agg.tmp = alloca %struct.S1, align 8
+ %agg.tmp = alloca %struct.S1, align 1
%tmp = getelementptr inbounds %struct.S1, %struct.S1* %agg.tmp, i32 0, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp, i8* align 1 getelementptr inbounds (%struct.S1, %struct.S1* @s1, i32 0, i32 0, i32 0), i32 65536, i1 false)
call void @f2(%struct.S1* byval(%struct.S1) %agg.tmp) nounwind
diff --git a/llvm/test/CodeGen/Mips/o32_cc_byval.ll b/llvm/test/CodeGen/Mips/o32_cc_byval.ll
index 3559774686c18..de17a1e502f43 100644
--- a/llvm/test/CodeGen/Mips/o32_cc_byval.ll
+++ b/llvm/test/CodeGen/Mips/o32_cc_byval.ll
@@ -80,7 +80,7 @@ define void @f1() nounwind {
; CHECK-NEXT: jr $ra
; CHECK-NEXT: addiu $sp, $sp, 64
entry:
- %agg.tmp10 = alloca %struct.S3, align 8
+ %agg.tmp10 = alloca %struct.S3, align 4
call void @callee1(float 2.000000e+01, %struct.S1* byval(%struct.S1) bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
call void @callee2(%struct.S2* byval(%struct.S2) @f1.s2) nounwind
%tmp11 = getelementptr inbounds %struct.S3, %struct.S3* %agg.tmp10, i32 0, i32 0
diff --git a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
index b1a45b90412f0..95dc45a039914 100644
--- a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
+++ b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
@@ -120,7 +120,7 @@ bb:
; Verify that if the pointer escapes, then we do fall back onto using a temp copy.
; CHECK-LABEL: .visible .entry pointer_escapes
-; CHECK: .local .align 4 .b8 __local_depot{{.*}}
+; CHECK: .local .align 8 .b8 __local_depot{{.*}}
; CHECK64: ld.param.u64 [[result_addr:%rd[0-9]+]], [{{.*}}_param_0]
; CHECK64: add.u64 %[[copy_addr:rd[0-9]+]], %SPL, 0;
; CHECK32: ld.param.u32 [[result_addr:%r[0-9]+]], [{{.*}}_param_0]
diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll b/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll
index 508dd633d3750..e5a816feff441 100644
--- a/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll
@@ -353,7 +353,7 @@ entry:
define void @call_test_byval_4Byte() {
entry:
%s0 = alloca %struct.S0, align 8
- %s4a = alloca %struct.S4A, align 8
+ %s4a = alloca %struct.S4A, align 4
%call = call signext i32 @test_byval_4Byte(ptr byval(%struct.S4) align 1 @gS4, ptr byval(%struct.S0) align 1 %s0, ptr byval(%struct.S4A) align 4 %s4a)
ret void
}
@@ -945,7 +945,7 @@ entry:
define i32 @call_test_byval_homogeneous_float_struct() {
entry:
- %s = alloca %struct.F, align 8
+ %s = alloca %struct.F, align 4
call void @llvm.memset.p0.i32(ptr align 4 %s, i8 0, i32 12, i1 false)
%call = call i32 @test_byval_homogeneous_float_struct(ptr byval(%struct.F) align 4 %s)
ret i32 %call
diff --git a/llvm/test/CodeGen/PowerPC/aix-sret-param.ll b/llvm/test/CodeGen/PowerPC/aix-sret-param.ll
index 10dbd0618e545..3c40fc3c3e881 100644
--- a/llvm/test/CodeGen/PowerPC/aix-sret-param.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-sret-param.ll
@@ -17,7 +17,7 @@
define void @test1() {
entry:
- %s = alloca %struct.S, align 8
+ %s = alloca %struct.S, align 4
call void @foo(ptr sret(%struct.S) %s)
ret void
}
diff --git a/llvm/test/CodeGen/PowerPC/byval.ll b/llvm/test/CodeGen/PowerPC/byval.ll
index 24fefbd0d2281..8688f5a8993ef 100644
--- a/llvm/test/CodeGen/PowerPC/byval.ll
+++ b/llvm/test/CodeGen/PowerPC/byval.ll
@@ -34,7 +34,7 @@ define dso_local i32 @bar() {
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %x = alloca %struct, align 8
+ %x = alloca %struct, align 4
call void @foo(ptr %x)
%r = call i32 @foo1(ptr byval(%struct) %x)
ret i32 %r
diff --git a/llvm/test/CodeGen/PowerPC/structsinregs.ll b/llvm/test/CodeGen/PowerPC/structsinregs.ll
index 884aa39c7caa3..eb804c93bf1ef 100644
--- a/llvm/test/CodeGen/PowerPC/structsinregs.ll
+++ b/llvm/test/CodeGen/PowerPC/structsinregs.ll
@@ -35,13 +35,13 @@ target triple = "powerpc64-unknown-linux-gnu"
define i32 @caller1() nounwind {
entry:
- %p1 = alloca %struct.s1
- %p2 = alloca %struct.s2
- %p3 = alloca %struct.s3
- %p4 = alloca %struct.s4
- %p5 = alloca %struct.s5
- %p6 = alloca %struct.s6
- %p7 = alloca %struct.s7
+ %p1 = alloca %struct.s1, align 1
+ %p2 = alloca %struct.s2, align 2
+ %p3 = alloca %struct.s3, align 2
+ %p4 = alloca %struct.s4, align 4
+ %p5 = alloca %struct.s5, align 4
+ %p6 = alloca %struct.s6, align 4
+ %p7 = alloca %struct.s7, align 4
call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller1.p1, i64 1, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p2, ptr align 2 @caller1.p2, i64 2, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p3, ptr align 2 @caller1.p3, i64 4, i1 false)
@@ -103,13 +103,13 @@ entry:
define i32 @caller2() nounwind {
entry:
- %p1 = alloca %struct.t1
- %p2 = alloca %struct.t2
- %p3 = alloca %struct.t3
- %p4 = alloca %struct.t4
- %p5 = alloca %struct.t5
- %p6 = alloca %struct.t6
- %p7 = alloca %struct.t7
+ %p1 = alloca %struct.t1, align 1
+ %p2 = alloca %struct.t2, align 1
+ %p3 = alloca %struct.t3, align 1
+ %p4 = alloca %struct.t4, align 1
+ %p5 = alloca %struct.t5, align 1
+ %p6 = alloca %struct.t6, align 1
+ %p7 = alloca %struct.t7, align 1
call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller2.p1, i64 1, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr %p2, ptr @caller2.p2, i64 2, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr %p3, ptr @caller2.p3, i64 3, i1 false)
diff --git a/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll b/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
index cdd2e904da44b..37ef8ec8d7ad2 100644
--- a/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
+++ b/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll
@@ -7,7 +7,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define void @foo(float inreg %s.coerce) nounwind {
entry:
- %s = alloca %struct.Sf1, align 8
+ %s = alloca %struct.Sf1, align 4
store float %s.coerce, ptr %s, align 1
%0 = load float, ptr %s, align 1
call void (i32, ...) @testvaSf1(i32 1, float inreg %0)
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index 8a1d93ba6714d..979690a46751c 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -595,7 +595,7 @@ define i32 @caller_large_struct() nounwind {
; RV32I-WITHFP-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 48
; RV32I-WITHFP-NEXT: ret
- %ls = alloca %struct.large, align 8
+ %ls = alloca %struct.large, align 4
%1 = bitcast %struct.large* %ls to i8*
%a = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 0
store i32 1, i32* %a
diff --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll
index e032c0a426595..6f24bea9d3523 100644
--- a/llvm/test/CodeGen/RISCV/frame.ll
+++ b/llvm/test/CodeGen/RISCV/frame.ll
@@ -41,7 +41,7 @@ define i32 @test() nounwind {
; RV32I-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 32
; RV32I-WITHFP-NEXT: ret
- %key = alloca %struct.key_t, align 8
+ %key = alloca %struct.key_t, align 4
%1 = bitcast %struct.key_t* %key to i8*
call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 20, i1 false)
%2 = getelementptr inbounds %struct.key_t, %struct.key_t* %key, i64 0, i32 1, i64 0
diff --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll
index 5009f1769a957..e1339e8ddd385 100644
--- a/llvm/test/CodeGen/RISCV/mem64.ll
+++ b/llvm/test/CodeGen/RISCV/mem64.ll
@@ -368,7 +368,7 @@ define void @addi_fold_crash(i64 %arg) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
bb:
- %tmp = alloca %struct.quux, align 8
+ %tmp = alloca %struct.quux, align 4
%tmp1 = getelementptr inbounds %struct.quux, %struct.quux* %tmp, i64 0, i32 1
%tmp2 = getelementptr inbounds %struct.quux, %struct.quux* %tmp, i64 0, i32 1, i64 %arg
store i8 0, i8* %tmp2, align 1
diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index e2ea35722130d..d7b5ec01023da 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -138,7 +138,7 @@ define i32 @va1(i8* %fmt, ...) {
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96
; LP64-LP64F-LP64D-WITHFP-NEXT: ret
- %va = alloca i8*
+ %va = alloca i8*, align 4
%1 = bitcast i8** %va to i8*
call void @llvm.va_start(i8* %1)
%argp.cur = load i8*, i8** %va, align 4
@@ -603,7 +603,7 @@ define i64 @va2(i8 *%fmt, ...) nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96
; LP64-LP64F-LP64D-WITHFP-NEXT: ret
- %va = alloca i8*
+ %va = alloca i8*, align 4
%1 = bitcast i8** %va to i8*
call void @llvm.va_start(i8* %1)
%2 = bitcast i8** %va to i32*
@@ -725,7 +725,7 @@ define i64 @va2_va_arg(i8 *%fmt, ...) nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96
; LP64-LP64F-LP64D-WITHFP-NEXT: ret
- %va = alloca i8*
+ %va = alloca i8*, align 4
%1 = bitcast i8** %va to i8*
call void @llvm.va_start(i8* %1)
%2 = va_arg i8** %va, double
@@ -923,7 +923,7 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 80
; LP64-LP64F-LP64D-WITHFP-NEXT: ret
- %va = alloca i8*
+ %va = alloca i8*, align 4
%1 = bitcast i8** %va to i8*
call void @llvm.va_start(i8* %1)
%2 = bitcast i8** %va to i32*
@@ -1050,7 +1050,7 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 80
; LP64-LP64F-LP64D-WITHFP-NEXT: ret
- %va = alloca i8*
+ %va = alloca i8*, align 4
%1 = bitcast i8** %va to i8*
call void @llvm.va_start(i8* %1)
%2 = va_arg i8** %va, double
@@ -1351,8 +1351,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 112
; LP64-LP64F-LP64D-WITHFP-NEXT: ret
- %vargs = alloca i8*
- %wargs = alloca i8*
+ %vargs = alloca i8*, align 4
+ %wargs = alloca i8*, align 4
%1 = bitcast i8** %vargs to i8*
%2 = bitcast i8** %wargs to i8*
call void @llvm.va_start(i8* %1)
@@ -1672,7 +1672,7 @@ define i32 @va6_no_fixed_args(...) nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96
; LP64-LP64F-LP64D-WITHFP-NEXT: ret
- %va = alloca i8*
+ %va = alloca i8*, align 4
%1 = bitcast i8** %va to i8*
call void @llvm.va_start(i8* %1)
%2 = va_arg i8** %va, i32
@@ -1862,7 +1862,7 @@ define i32 @va_large_stack(i8* %fmt, ...) {
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 2032
; LP64-LP64F-LP64D-WITHFP-NEXT: ret
%large = alloca [ 100000000 x i8 ]
- %va = alloca i8*
+ %va = alloca i8*, align 4
%1 = bitcast i8** %va to i8*
call void @llvm.va_start(i8* %1)
%argp.cur = load i8*, i8** %va, align 4
diff --git a/llvm/test/CodeGen/Thumb2/mve-stack.ll b/llvm/test/CodeGen/Thumb2/mve-stack.ll
index 91c33572549ab..ea272e19b23fc 100644
--- a/llvm/test/CodeGen/Thumb2/mve-stack.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-stack.ll
@@ -15,7 +15,7 @@ define arm_aapcs_vfpcc void @vstrw32() {
; CHECK-NEXT: add sp, #16
; CHECK-NEXT: pop {r7, pc}
entry:
- %d = alloca [4 x i32], align 4
+ %d = alloca [4 x i32], align 2
%g = getelementptr inbounds [4 x i32], [4 x i32]* %d, i32 0, i32 2
%b = bitcast i32* %g to <4 x i32>*
store <4 x i32> zeroinitializer, <4 x i32>* %b, align 2
@@ -153,7 +153,7 @@ define arm_aapcs_vfpcc <4 x i32> @vldrw32() {
; CHECK-NEXT: add sp, #16
; CHECK-NEXT: pop {r7, pc}
entry:
- %d = alloca [4 x i32], align 4
+ %d = alloca [4 x i32], align 2
%arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %d, i32 0, i32 0
call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i32*)*)(i32* %arraydecay)
%g = getelementptr inbounds [4 x i32], [4 x i32]* %d, i32 0, i32 2
diff --git a/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll b/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll
index 08e4c0c04102c..17d2e49ef71e1 100644
--- a/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll
+++ b/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll
@@ -1455,7 +1455,7 @@ define zeroext i1 @_Z30atomic_cmp_swap_relaxed_stk_i1Rbb(i8* nocapture nonnull a
; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %3 = alloca %"struct.std::__1::atomic", align 8
+ %3 = alloca %"struct.std::__1::atomic", align 1
%4 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %3, i64 0, i32 0, i32 0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %4)
%5 = zext i1 %1 to i8
@@ -1515,7 +1515,7 @@ define signext i8 @_Z30atomic_cmp_swap_relaxed_stk_i8Rcc(i8* nocapture nonnull a
; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %3 = alloca %"struct.std::__1::atomic.0", align 8
+ %3 = alloca %"struct.std::__1::atomic.0", align 1
%4 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %4)
%5 = load i8, i8* %0, align 1
@@ -1568,7 +1568,7 @@ define zeroext i8 @_Z30atomic_cmp_swap_relaxed_stk_u8Rhh(i8* nocapture nonnull a
; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %3 = alloca %"struct.std::__1::atomic.5", align 8
+ %3 = alloca %"struct.std::__1::atomic.5", align 1
%4 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %4)
%5 = load i8, i8* %0, align 1
@@ -1622,7 +1622,7 @@ define signext i16 @_Z31atomic_cmp_swap_relaxed_stk_i16Rss(i16* nocapture nonnul
; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %3 = alloca %"struct.std::__1::atomic.10", align 8
+ %3 = alloca %"struct.std::__1::atomic.10", align 2
%4 = bitcast %"struct.std::__1::atomic.10"* %3 to i8*
call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %4)
%5 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
@@ -1676,7 +1676,7 @@ define zeroext i16 @_Z31atomic_cmp_swap_relaxed_stk_u16Rtt(i16* nocapture nonnul
; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %3 = alloca %"struct.std::__1::atomic.15", align 8
+ %3 = alloca %"struct.std::__1::atomic.15", align 2
%4 = bitcast %"struct.std::__1::atomic.15"* %3 to i8*
call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %4)
%5 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
@@ -1724,7 +1724,7 @@ define signext i32 @_Z31atomic_cmp_swap_relaxed_stk_i32Rii(i32* nocapture nonnul
; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %3 = alloca %"struct.std::__1::atomic.20", align 8
+ %3 = alloca %"struct.std::__1::atomic.20", align 4
%4 = bitcast %"struct.std::__1::atomic.20"* %3 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %4)
%5 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
@@ -1772,7 +1772,7 @@ define zeroext i32 @_Z31atomic_cmp_swap_relaxed_stk_u32Rjj(i32* nocapture nonnul
; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %3 = alloca %"struct.std::__1::atomic.25", align 8
+ %3 = alloca %"struct.std::__1::atomic.25", align 4
%4 = bitcast %"struct.std::__1::atomic.25"* %3 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %4)
%5 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
diff --git a/llvm/test/CodeGen/VE/Scalar/atomic_load.ll b/llvm/test/CodeGen/VE/Scalar/atomic_load.ll
index dbb993fb2aa11..a9d3472c3d263 100644
--- a/llvm/test/CodeGen/VE/Scalar/atomic_load.ll
+++ b/llvm/test/CodeGen/VE/Scalar/atomic_load.ll
@@ -560,7 +560,7 @@ define zeroext i1 @_Z26atomic_load_relaxed_stk_i1v() {
; CHECK-NEXT: ld1b.zx %s0, 248(, %s11)
; CHECK-NEXT: and %s0, 1, %s0
; CHECK-NEXT: or %s11, 0, %s9
- %1 = alloca %"struct.std::__1::atomic", align 8
+ %1 = alloca %"struct.std::__1::atomic", align 1
%2 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %1, i64 0, i32 0, i32 0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2)
call void @_Z6fun_i1RNSt3__16atomicIbEE(%"struct.std::__1::atomic"* nonnull align 1 dereferenceable(1) %1)
@@ -590,7 +590,7 @@ define signext i8 @_Z26atomic_load_relaxed_stk_i8v() {
; CHECK-NEXT: bsic %s10, (, %s12)
; CHECK-NEXT: ld1b.sx %s0, 248(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
- %1 = alloca %"struct.std::__1::atomic.0", align 8
+ %1 = alloca %"struct.std::__1::atomic.0", align 1
%2 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2)
call void @_Z6fun_i8RNSt3__16atomicIcEE(%"struct.std::__1::atomic.0"* nonnull align 1 dereferenceable(1) %1)
@@ -612,7 +612,7 @@ define zeroext i8 @_Z26atomic_load_relaxed_stk_u8v() {
; CHECK-NEXT: bsic %s10, (, %s12)
; CHECK-NEXT: ld1b.zx %s0, 248(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
- %1 = alloca %"struct.std::__1::atomic.5", align 8
+ %1 = alloca %"struct.std::__1::atomic.5", align 1
%2 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2)
call void @_Z6fun_u8RNSt3__16atomicIhEE(%"struct.std::__1::atomic.5"* nonnull align 1 dereferenceable(1) %1)
@@ -634,7 +634,7 @@ define signext i16 @_Z27atomic_load_relaxed_stk_i16v() {
; CHECK-NEXT: bsic %s10, (, %s12)
; CHECK-NEXT: ld2b.sx %s0, 248(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
- %1 = alloca %"struct.std::__1::atomic.10", align 8
+ %1 = alloca %"struct.std::__1::atomic.10", align 2
%2 = bitcast %"struct.std::__1::atomic.10"* %1 to i8*
call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %2)
call void @_Z7fun_i16RNSt3__16atomicIsEE(%"struct.std::__1::atomic.10"* nonnull align 2 dereferenceable(2) %1)
@@ -657,7 +657,7 @@ define zeroext i16 @_Z27atomic_load_relaxed_stk_u16v() {
; CHECK-NEXT: bsic %s10, (, %s12)
; CHECK-NEXT: ld2b.zx %s0, 248(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
- %1 = alloca %"struct.std::__1::atomic.15", align 8
+ %1 = alloca %"struct.std::__1::atomic.15", align 2
%2 = bitcast %"struct.std::__1::atomic.15"* %1 to i8*
call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %2)
call void @_Z7fun_u16RNSt3__16atomicItEE(%"struct.std::__1::atomic.15"* nonnull align 2 dereferenceable(2) %1)
@@ -680,7 +680,7 @@ define signext i32 @_Z27atomic_load_relaxed_stk_i32v() {
; CHECK-NEXT: bsic %s10, (, %s12)
; CHECK-NEXT: ldl.sx %s0, 248(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
- %1 = alloca %"struct.std::__1::atomic.20", align 8
+ %1 = alloca %"struct.std::__1::atomic.20", align 4
%2 = bitcast %"struct.std::__1::atomic.20"* %1 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2)
call void @_Z7fun_i32RNSt3__16atomicIiEE(%"struct.std::__1::atomic.20"* nonnull align 4 dereferenceable(4) %1)
@@ -703,7 +703,7 @@ define zeroext i32 @_Z27atomic_load_relaxed_stk_u32v() {
; CHECK-NEXT: bsic %s10, (, %s12)
; CHECK-NEXT: ldl.zx %s0, 248(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
- %1 = alloca %"struct.std::__1::atomic.25", align 8
+ %1 = alloca %"struct.std::__1::atomic.25", align 4
%2 = bitcast %"struct.std::__1::atomic.25"* %1 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2)
call void @_Z7fun_u32RNSt3__16atomicIjEE(%"struct.std::__1::atomic.25"* nonnull align 4 dereferenceable(4) %1)
diff --git a/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll b/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
index e48004073be4a..fb03fc92e933b 100644
--- a/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
+++ b/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
@@ -768,7 +768,7 @@ define zeroext i1 @_Z26atomic_swap_relaxed_stk_i1b(i1 zeroext %0) {
; CHECK-NEXT: and %s0, 1, %s0
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %2 = alloca %"struct.std::__1::atomic", align 8
+ %2 = alloca %"struct.std::__1::atomic", align 1
%3 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %2, i64 0, i32 0, i32 0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %3)
%4 = zext i1 %0 to i8
@@ -797,7 +797,7 @@ define signext i8 @_Z26atomic_swap_relaxed_stk_i8c(i8 signext %0) {
; CHECK-NEXT: sra.l %s0, %s0, 56
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %2 = alloca %"struct.std::__1::atomic.0", align 8
+ %2 = alloca %"struct.std::__1::atomic.0", align 1
%3 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %3)
%4 = atomicrmw volatile xchg i8* %3, i8 %0 monotonic
@@ -816,7 +816,7 @@ define zeroext i8 @_Z26atomic_swap_relaxed_stk_u8h(i8 zeroext %0) {
; CHECK-NEXT: and %s0, %s0, (56)0
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %2 = alloca %"struct.std::__1::atomic.5", align 8
+ %2 = alloca %"struct.std::__1::atomic.5", align 1
%3 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %3)
%4 = atomicrmw volatile xchg i8* %3, i8 %0 monotonic
@@ -836,7 +836,7 @@ define signext i16 @_Z27atomic_swap_relaxed_stk_i16s(i16 signext %0) {
; CHECK-NEXT: sra.l %s0, %s0, 48
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %2 = alloca %"struct.std::__1::atomic.10", align 8
+ %2 = alloca %"struct.std::__1::atomic.10", align 2
%3 = bitcast %"struct.std::__1::atomic.10"* %2 to i8*
call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %3)
%4 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
@@ -856,7 +856,7 @@ define zeroext i16 @_Z27atomic_swap_relaxed_stk_u16t(i16 zeroext %0) {
; CHECK-NEXT: and %s0, %s0, (48)0
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %2 = alloca %"struct.std::__1::atomic.15", align 8
+ %2 = alloca %"struct.std::__1::atomic.15", align 2
%3 = bitcast %"struct.std::__1::atomic.15"* %2 to i8*
call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %3)
%4 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
@@ -873,7 +873,7 @@ define signext i32 @_Z27atomic_swap_relaxed_stk_i32i(i32 signext %0) {
; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %2 = alloca %"struct.std::__1::atomic.20", align 8
+ %2 = alloca %"struct.std::__1::atomic.20", align 4
%3 = bitcast %"struct.std::__1::atomic.20"* %2 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %3)
%4 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
@@ -890,7 +890,7 @@ define zeroext i32 @_Z27atomic_swap_relaxed_stk_u32j(i32 zeroext %0) {
; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1
; CHECK-NEXT: adds.l %s11, 16, %s11
; CHECK-NEXT: b.l.t (, %s10)
- %2 = alloca %"struct.std::__1::atomic.25", align 8
+ %2 = alloca %"struct.std::__1::atomic.25", align 4
%3 = bitcast %"struct.std::__1::atomic.25"* %2 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %3)
%4 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
diff --git a/llvm/test/CodeGen/WebAssembly/PR40172.ll b/llvm/test/CodeGen/WebAssembly/PR40172.ll
index 2585a86459e18..752f3e4dfbb2c 100644
--- a/llvm/test/CodeGen/WebAssembly/PR40172.ll
+++ b/llvm/test/CodeGen/WebAssembly/PR40172.ll
@@ -15,7 +15,7 @@ target triple = "wasm32-unknown-unknown"
; CHECK: i32.store8 8($[[BASE]]), $[[A1]]{{$}}
define void @test(i8 %byte) {
- %t = alloca { i8, i8 }, align 8
+ %t = alloca { i8, i8 }, align 1
%x4 = and i8 %byte, 1
%x5 = icmp eq i8 %x4, 1
%x6 = and i8 %byte, 2
diff --git a/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll b/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
index deba5a8fe6727..c2f5ff9530ba9 100644
--- a/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
+++ b/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
@@ -50,8 +50,8 @@
; Function Attrs: uwtable
define void @_Z3barii(i32 %param1, i32 %param2) #0 !dbg !24 {
entry:
- %var1 = alloca %struct.AAA3, align 8
- %var2 = alloca %struct.AAA3, align 8
+ %var1 = alloca %struct.AAA3, align 1
+ %var2 = alloca %struct.AAA3, align 1
tail call void @llvm.dbg.value(metadata i32 %param1, i64 0, metadata !29, metadata !46), !dbg !47
tail call void @llvm.dbg.value(metadata i32 %param2, i64 0, metadata !30, metadata !46), !dbg !48
tail call void @llvm.dbg.value(metadata ptr null, i64 0, metadata !31, metadata !46), !dbg !49
diff --git a/llvm/test/CodeGen/X86/fast-isel-call.ll b/llvm/test/CodeGen/X86/fast-isel-call.ll
index a9d2bff923a44..9fec09818c743 100644
--- a/llvm/test/CodeGen/X86/fast-isel-call.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-call.ll
@@ -59,7 +59,7 @@ define void @test4(ptr %a, ptr %b) {
%struct.S = type { i8 }
define void @test5() {
entry:
- %s = alloca %struct.S, align 8
+ %s = alloca %struct.S, align 1
; CHECK-LABEL: test5:
; CHECK: subl $12, %esp
; CHECK: leal 8(%esp), %ecx
diff --git a/llvm/test/CodeGen/X86/load-local-v3i129.ll b/llvm/test/CodeGen/X86/load-local-v3i129.ll
index 8fa7ce0664537..090f8d972094c 100644
--- a/llvm/test/CodeGen/X86/load-local-v3i129.ll
+++ b/llvm/test/CodeGen/X86/load-local-v3i129.ll
@@ -29,7 +29,7 @@ define void @_start() nounwind {
; SLOW-SHLD-NEXT: movq $-1, -48(%rsp)
; SLOW-SHLD-NEXT: retq
Entry:
- %y = alloca <3 x i129>, align 16
+ %y = alloca <3 x i129>, align 4
%L = load <3 x i129>, ptr %y
%I1 = insertelement <3 x i129> %L, i129 340282366920938463463374607431768211455, i32 1
store <3 x i129> %I1, ptr %y
diff --git a/llvm/test/CodeGen/X86/pr44140.ll b/llvm/test/CodeGen/X86/pr44140.ll
index a218c9d4dcea8..68ac3663a4cd8 100644
--- a/llvm/test/CodeGen/X86/pr44140.ll
+++ b/llvm/test/CodeGen/X86/pr44140.ll
@@ -59,7 +59,7 @@ start:
%dummy1 = alloca [22 x i64], align 8
%dummy2 = alloca [22 x i64], align 8
- %data = alloca <2 x i64>, align 16
+ %data = alloca <2 x i64>, align 8
br label %fake-loop
diff --git a/llvm/test/CodeGen/X86/ssp-data-layout.ll b/llvm/test/CodeGen/X86/ssp-data-layout.ll
index bda2598384db8..0a08582822e27 100644
--- a/llvm/test/CodeGen/X86/ssp-data-layout.ll
+++ b/llvm/test/CodeGen/X86/ssp-data-layout.ll
@@ -93,14 +93,14 @@ entry:
%y = alloca i32, align 4
%z = alloca i32, align 4
%ptr = alloca i32, align 4
- %small2 = alloca [2 x i16], align 4
+ %small2 = alloca [2 x i16], align 2
%large2 = alloca [8 x i32], align 16
- %small = alloca [2 x i8], align 2
- %large = alloca [8 x i8], align 8
- %a = alloca %struct.struct_large_char, align 8
- %b = alloca %struct.struct_small_char, align 8
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
%c = alloca %struct.struct_large_nonchar, align 8
- %d = alloca %struct.struct_small_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
%call = call i32 @get_scalar1()
store i32 %call, ptr %x, align 4
call void @end_scalar1()
@@ -217,12 +217,12 @@ entry:
%ptr = alloca i32, align 4
%small2 = alloca [2 x i16], align 2
%large2 = alloca [8 x i32], align 16
- %small = alloca [2 x i8], align 2
- %large = alloca [8 x i8], align 8
- %a = alloca %struct.struct_large_char, align 8
- %b = alloca %struct.struct_small_char, align 8
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
%c = alloca %struct.struct_large_nonchar, align 8
- %d = alloca %struct.struct_small_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
%call = call i32 @get_scalar1()
store i32 %call, ptr %x, align 4
call void @end_scalar1()
@@ -325,14 +325,14 @@ entry:
%y = alloca i32, align 4
%z = alloca i32, align 4
%ptr = alloca i32, align 4
- %small2 = alloca [2 x i16], align 4
+ %small2 = alloca [2 x i16], align 2
%large2 = alloca [8 x i32], align 16
- %small = alloca [2 x i8], align 2
- %large = alloca [8 x i8], align 8
- %a = alloca %struct.struct_large_char, align 8
- %b = alloca %struct.struct_small_char, align 8
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
%c = alloca %struct.struct_large_nonchar, align 8
- %d = alloca %struct.struct_small_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
%call = call i32 @get_scalar1()
store i32 %call, ptr %x, align 4
call void @end_scalar1()
diff --git a/llvm/test/CodeGen/X86/win-cleanuppad.ll b/llvm/test/CodeGen/X86/win-cleanuppad.ll
index 452f0a8e36d8d..4d420f5fc3cd1 100644
--- a/llvm/test/CodeGen/X86/win-cleanuppad.ll
+++ b/llvm/test/CodeGen/X86/win-cleanuppad.ll
@@ -58,8 +58,8 @@ declare x86_thiscallcc void @"\01??1Dtor@@QAE at XZ"(ptr) #1
define void @nested_cleanup() #0 personality ptr @__CxxFrameHandler3 {
entry:
- %o1 = alloca %struct.Dtor, align 8
- %o2 = alloca %struct.Dtor, align 8
+ %o1 = alloca %struct.Dtor, align 1
+ %o2 = alloca %struct.Dtor, align 1
invoke void @f(i32 1)
to label %invoke.cont unwind label %cleanup.outer
diff --git a/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll b/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
index 017dc3c0582ec..1451a64d1ef7d 100644
--- a/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
+++ b/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
@@ -13,8 +13,8 @@ define void @test1(i1 %cmp) align 2 {
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: movq %rsp, %rax
-; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: movq %rsp, %rcx
; CHECK-NEXT: cmovneq %rax, %rcx
; CHECK-NEXT: movups (%rcx), %xmm0
; CHECK-NEXT: callq _sink
@@ -36,8 +36,8 @@ define void @test2(i1 %cmp) align 2 {
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: movq %rsp, %rax
-; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: movq %rsp, %rcx
; CHECK-NEXT: cmovneq %rax, %rcx
; CHECK-NEXT: movaps (%rcx), %xmm0
; CHECK-NEXT: callq _sink
diff --git a/llvm/test/DebugInfo/AArch64/frameindices.ll b/llvm/test/DebugInfo/AArch64/frameindices.ll
index 8f736a07034cf..b53fbf6fd0883 100644
--- a/llvm/test/DebugInfo/AArch64/frameindices.ll
+++ b/llvm/test/DebugInfo/AArch64/frameindices.ll
@@ -86,7 +86,7 @@ entry:
define void @_Z3f16v() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg !68 {
entry:
%agg.tmp.i.i = alloca %struct.A, align 8
- %d = alloca %struct.B, align 8
+ %d = alloca %struct.B, align 1
%agg.tmp.sroa.2 = alloca [15 x i8], align 1
%agg.tmp.sroa.4 = alloca [7 x i8], align 1
tail call void @llvm.dbg.declare(metadata [15 x i8]* %agg.tmp.sroa.2, metadata !56, metadata !74), !dbg !75
diff --git a/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll b/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll
index c5ca6c983468a..b5c13da083f44 100644
--- a/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll
+++ b/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll
@@ -221,7 +221,7 @@
; Function Attrs: noinline nounwind uwtable
define void @use_dbg_declare() #0 !dbg !7 {
entry:
- %o = alloca %struct.Foo, align 8
+ %o = alloca %struct.Foo, align 4
call void @llvm.dbg.declare(metadata ptr %o, metadata !10, metadata !15), !dbg !16
call void @escape_foo(ptr %o), !dbg !17
ret void, !dbg !18
diff --git a/llvm/test/DebugInfo/X86/dbg-addr.ll b/llvm/test/DebugInfo/X86/dbg-addr.ll
index d6c8e4bf1ce2e..0c6a54a65f3a4 100644
--- a/llvm/test/DebugInfo/X86/dbg-addr.ll
+++ b/llvm/test/DebugInfo/X86/dbg-addr.ll
@@ -44,7 +44,7 @@ target triple = "x86_64--linux"
; Function Attrs: noinline nounwind uwtable
define void @use_dbg_addr() #0 !dbg !7 {
entry:
- %o = alloca %struct.Foo, align 8
+ %o = alloca %struct.Foo, align 4
call void @llvm.dbg.addr(metadata ptr %o, metadata !10, metadata !15), !dbg !16
call void @escape_foo(ptr %o), !dbg !17
ret void, !dbg !18
@@ -52,7 +52,7 @@ entry:
define void @test_dbg_addr_and_dbg_val_undef() #0 !dbg !117 {
entry:
- %o = alloca %struct.Foo, align 8
+ %o = alloca %struct.Foo, align 4
call void @llvm.dbg.addr(metadata ptr %o, metadata !1110, metadata !1115), !dbg !1116
call void @escape_foo(ptr %o), !dbg !1117
call void @llvm.dbg.value(metadata ptr undef, metadata !1110, metadata !1115), !dbg !1116
diff --git a/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll b/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll
index 40188f505b182..7fd345e322b62 100644
--- a/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll
+++ b/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll
@@ -23,7 +23,7 @@ target triple = "x86_64--linux"
; Function Attrs: noinline nounwind uwtable
define void @use_dbg_declare() #0 !dbg !7 {
entry:
- %o = alloca %struct.Foo, align 8
+ %o = alloca %struct.Foo, align 4
call void @llvm.dbg.declare(metadata ptr %o, metadata !10, metadata !15), !dbg !16
call void @escape_foo(ptr %o), !dbg !17
ret void, !dbg !18
diff --git a/llvm/test/DebugInfo/X86/sret.ll b/llvm/test/DebugInfo/X86/sret.ll
index 017fc968d4276..644f0c6bcd25f 100644
--- a/llvm/test/DebugInfo/X86/sret.ll
+++ b/llvm/test/DebugInfo/X86/sret.ll
@@ -102,7 +102,7 @@ entry:
define void @_ZN1B9AInstanceEv(ptr noalias sret(%class.A) %agg.result, ptr %this) #2 align 2 !dbg !53 {
entry:
%this.addr = alloca ptr, align 8
- %nrvo = alloca i1, align 1
+ %nrvo = alloca i1
%cleanup.dest.slot = alloca i32
store ptr %this, ptr %this.addr, align 8
call void @llvm.dbg.declare(metadata ptr %this.addr, metadata !89, metadata !DIExpression()), !dbg !91
@@ -139,7 +139,7 @@ entry:
%retval = alloca i32, align 4
%argc.addr = alloca i32, align 4
%argv.addr = alloca ptr, align 8
- %b = alloca %class.B, align 8
+ %b = alloca %class.B, align 1
%return_val = alloca i32, align 4
%temp.lvalue = alloca %class.A, align 8
%exn.slot = alloca ptr
@@ -226,7 +226,7 @@ define linkonce_odr void @_ZN1AD0Ev(ptr %this) unnamed_addr #2 align 2 personali
entry:
%this.addr = alloca ptr, align 8
%exn.slot = alloca ptr
- %ehselector.slot = alloca i32, align 4
+ %ehselector.slot = alloca i32
store ptr %this, ptr %this.addr, align 8
call void @llvm.dbg.declare(metadata ptr %this.addr, metadata !126, metadata !DIExpression()), !dbg !127
%this1 = load ptr, ptr %this.addr
diff --git a/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll b/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll
index bc3633c1afd23..09d39cb9416a9 100644
--- a/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll
+++ b/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll
@@ -86,17 +86,17 @@ target triple = "x86_64-unknown-linux-gnu"
define dso_local noundef i32 @_Z3funii(i32 noundef %a, i32 noundef %b) local_unnamed_addr #0 !dbg !17 {
entry:
- %a.addr = alloca i64, align 8, !DIAssignID !58 ; VAR:a
+ %a.addr = alloca i64, align 4, !DIAssignID !58 ; VAR:a
call void @llvm.dbg.assign(metadata i1 undef, metadata !21, metadata !DIExpression(), metadata !58, metadata ptr %a.addr, metadata !DIExpression()), !dbg !27 ; VAR:a
- %b.addr = alloca i64, align 8, !DIAssignID !64 ; VAR:b
+ %b.addr = alloca i64, align 4, !DIAssignID !64 ; VAR:b
call void @llvm.dbg.assign(metadata i1 undef, metadata !22, metadata !DIExpression(), metadata !64, metadata ptr %b.addr, metadata !DIExpression()), !dbg !27 ; VAR:b
- %c.addr = alloca i64, align 8, !DIAssignID !68 ; VAR:c
+ %c.addr = alloca i64, align 4, !DIAssignID !68 ; VAR:c
call void @llvm.dbg.assign(metadata i1 undef, metadata !67, metadata !DIExpression(), metadata !68, metadata ptr %c.addr, metadata !DIExpression()), !dbg !27 ; VAR:c
- %d.addr = alloca i64, align 8, !DIAssignID !73 ; VAR:d
+ %d.addr = alloca i64, align 4, !DIAssignID !73 ; VAR:d
call void @llvm.dbg.assign(metadata i1 undef, metadata !72, metadata !DIExpression(), metadata !73, metadata ptr %d.addr, metadata !DIExpression()), !dbg !27 ; VAR:d
- %e.addr = alloca i64, align 8, !DIAssignID !76 ; VAR:e
+ %e.addr = alloca i64, align 4, !DIAssignID !76 ; VAR:e
call void @llvm.dbg.assign(metadata i1 undef, metadata !75, metadata !DIExpression(), metadata !76, metadata ptr %e.addr, metadata !DIExpression()), !dbg !27 ; VAR:e
- ;%f.addr = alloca i64, align 8, !DIAssignID !80 ; VAR:f
+ ;%f.addr = alloca i64, align 4, !DIAssignID !80 ; VAR:f
;call void @llvm.dbg.assign(metadata i1 undef, metadata !79, metadata !DIExpression(), metadata !80, metadata ptr %f.addr, metadata !DIExpression()), !dbg !27 ; VAR:f
store i64 1, ptr %a.addr, !DIAssignID !70 ; VAR:a
call void @llvm.dbg.assign(metadata i64 1, metadata !21, metadata !DIExpression(), metadata !70, metadata ptr %a.addr, metadata !DIExpression()), !dbg !27 ; VAR:a
More information about the llvm-commits
mailing list