[llvm-branch-commits] [clang] [clang][DirectX] Specify element-aligned vectors in TargetInfo (PR #185954)

Justin Bogner via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Mar 11 15:46:52 PDT 2026


https://github.com/bogner updated https://github.com/llvm/llvm-project/pull/185954

>From 1833695fa6bc0e628cd267b51b52d6c224c9a872 Mon Sep 17 00:00:00 2001
From: Justin Bogner <mail at justinbogner.com>
Date: Thu, 29 Jan 2026 16:05:28 -0700
Subject: [PATCH 1/2] [clang][DirectX] Specify element-aligned vectors in
 TargetInfo

Add a bit to TargetInfo to specify that vectors are element-aligned
rather than naturally aligned. This is needed to match DirectX's Data
Layout in LLVM.

Resolves #123968
---
 clang/include/clang/Basic/TargetInfo.h        |  4 +
 clang/lib/AST/ASTContext.cpp                  |  3 +-
 clang/lib/Basic/TargetInfo.cpp                |  1 +
 clang/lib/Basic/Targets/DirectX.h             |  1 +
 clang/test/CodeGenDirectX/Builtins/dot2add.c  | 12 +--
 clang/test/CodeGenHLSL/ArrayAssignable.hlsl   |  4 +-
 .../BasicFeatures/AggregateSplatCast.hlsl     |  4 +-
 .../BasicFeatures/ArrayElementwiseCast.hlsl   | 14 +--
 .../CodeGenHLSL/BasicFeatures/InitLists.hlsl  | 52 +++++-----
 .../BasicFeatures/MatrixConstructor.hlsl      | 20 ++--
 .../BasicFeatures/MatrixElementTypeCast.hlsl  | 18 ++--
 .../MatrixSingleSubscriptConstSwizzle.hlsl    | 24 ++---
 .../MatrixSingleSubscriptDynamicSwizzle.hlsl  |  6 +-
 .../MatrixSingleSubscriptSetter.hlsl          | 12 +--
 .../BasicFeatures/MatrixSplat.hlsl            | 18 ++--
 .../MatrixToAndFromVectorConstructors.hlsl    | 36 +++----
 .../BasicFeatures/OutputArguments.hlsl        |  8 +-
 .../BasicFeatures/StructElementwiseCast.hlsl  | 18 ++--
 .../BasicFeatures/VectorElementwiseCast.hlsl  | 60 ++++++------
 clang/test/CodeGenHLSL/BoolVector.hlsl        | 28 +++---
 clang/test/CodeGenHLSL/basic_types.hlsl       | 54 +++++------
 .../test/CodeGenHLSL/builtins/AddUint64.hlsl  | 24 ++---
 .../CodeGenHLSL/builtins/ScalarSwizzles.hlsl  | 36 +++----
 .../CodeGenHLSL/builtins/VectorSwizzles.hlsl  | 18 ++--
 clang/test/CodeGenHLSL/builtins/clip.hlsl     | 10 +-
 clang/test/CodeGenHLSL/builtins/mad.hlsl      | 96 +++++++++----------
 clang/test/CodeGenHLSL/builtins/select.hlsl   |  8 +-
 clang/test/CodeGenHLSL/float3.hlsl            |  6 +-
 .../CodeGenHLSL/groupsharedArgs/ArrTest.hlsl  |  4 +-
 .../groupsharedArgs/Overloads.hlsl            |  6 +-
 .../groupsharedArgs/TemplateTest.hlsl         | 24 ++---
 .../groupsharedArgs/VectorTest.hlsl           |  4 +-
 ...matrix-member-one-based-swizzle-store.hlsl | 24 ++---
 ...atrix-member-zero-based-swizzle-store.hlsl | 24 ++---
 .../resources/cbuffer-empty-struct-array.hlsl |  4 +-
 clang/test/CodeGenHLSL/resources/cbuffer.hlsl | 26 ++---
 .../CodeGenHLSL/resources/cbuffer_geps.hlsl   |  2 +-
 .../resources/cbuffer_with_packoffset.hlsl    |  4 +-
 .../default_cbuffer_with_layout.hlsl          |  4 +-
 .../resources/res-array-global-dyn-index.hlsl |  6 +-
 40 files changed, 367 insertions(+), 360 deletions(-)

diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index ec6cd2be7c3c5..f3a85737f6571 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -138,6 +138,7 @@ struct TransferrableTargetInfo {
   unsigned short NewAlign;
   unsigned MaxVectorAlign;
   unsigned MaxTLSAlign;
+  bool VectorsAreElementAligned;
 
   const llvm::fltSemantics *HalfFormat, *BFloat16Format, *FloatFormat,
       *DoubleFormat, *LongDoubleFormat, *Float128Format, *Ibm128Format;
@@ -869,6 +870,9 @@ class TargetInfo : public TransferrableTargetInfo,
             llvm::isPowerOf2_64(AtomicSizeInBits / getCharWidth()));
   }
 
+  /// True if vectors are element-aligned for this target.
+  bool vectorsAreElementAligned() const { return VectorsAreElementAligned; }
+
   /// Return the maximum vector alignment supported for the given target.
   unsigned getMaxVectorAlign() const { return MaxVectorAlign; }
 
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index d8d2fc23974c2..4c672a03eb855 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -2076,7 +2076,8 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
                 : EltInfo.Width * VT->getNumElements();
     // Enforce at least byte size and alignment.
     Width = std::max<unsigned>(8, Width);
-    Align = std::max<unsigned>(8, Width);
+    Align = std::max<unsigned>(
+        8, Target->vectorsAreElementAligned() ? EltInfo.Width : Width);
 
     // If the alignment is not a power of 2, round up to the next power of 2.
     // This happens for non-power-of-2 length vectors.
diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp
index 794621c4b3e1f..38edc65801384 100644
--- a/clang/lib/Basic/TargetInfo.cpp
+++ b/clang/lib/Basic/TargetInfo.cpp
@@ -130,6 +130,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
   MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 0;
   MaxVectorAlign = 0;
   MaxTLSAlign = 0;
+  VectorsAreElementAligned = false;
   SizeType = UnsignedLong;
   PtrDiffType = SignedLong;
   IntMaxType = SignedLongLong;
diff --git a/clang/lib/Basic/Targets/DirectX.h b/clang/lib/Basic/Targets/DirectX.h
index 7589b4309ebf5..3976238bede4a 100644
--- a/clang/lib/Basic/Targets/DirectX.h
+++ b/clang/lib/Basic/Targets/DirectX.h
@@ -63,6 +63,7 @@ class LLVM_LIBRARY_VISIBILITY DirectXTargetInfo : public TargetInfo {
     HasFastHalfType = true;
     HasFloat16 = true;
     NoAsmVariants = true;
+    VectorsAreElementAligned = true;
     PlatformMinVersion = Triple.getOSVersion();
     PlatformName = llvm::Triple::getOSTypeName(Triple.getOS());
     resetDataLayout();
diff --git a/clang/test/CodeGenDirectX/Builtins/dot2add.c b/clang/test/CodeGenDirectX/Builtins/dot2add.c
index 47c639b5986ce..4275a285012b0 100644
--- a/clang/test/CodeGenDirectX/Builtins/dot2add.c
+++ b/clang/test/CodeGenDirectX/Builtins/dot2add.c
@@ -8,14 +8,14 @@ typedef half half2 __attribute__((ext_vector_type(2)));
 // CHECK-LABEL: define float @test_dot2add(
 // CHECK-SAME: <2 x half> noundef [[X:%.*]], <2 x half> noundef [[Y:%.*]], float noundef [[Z:%.*]]) #[[ATTR0:[0-9]+]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca <2 x half>, align 4
-// CHECK-NEXT:    [[Y_ADDR:%.*]] = alloca <2 x half>, align 4
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca <2 x half>, align 2
+// CHECK-NEXT:    [[Y_ADDR:%.*]] = alloca <2 x half>, align 2
 // CHECK-NEXT:    [[Z_ADDR:%.*]] = alloca float, align 4
-// CHECK-NEXT:    store <2 x half> [[X]], ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    store <2 x half> [[Y]], ptr [[Y_ADDR]], align 4
+// CHECK-NEXT:    store <2 x half> [[X]], ptr [[X_ADDR]], align 2
+// CHECK-NEXT:    store <2 x half> [[Y]], ptr [[Y_ADDR]], align 2
 // CHECK-NEXT:    store float [[Z]], ptr [[Z_ADDR]], align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x half>, ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x half>, ptr [[Y_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x half>, ptr [[X_ADDR]], align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x half>, ptr [[Y_ADDR]], align 2
 // CHECK-NEXT:    [[TMP2:%.*]] = load float, ptr [[Z_ADDR]], align 4
 // CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x half> [[TMP0]], i32 0
 // CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x half> [[TMP0]], i32 1
diff --git a/clang/test/CodeGenHLSL/ArrayAssignable.hlsl b/clang/test/CodeGenHLSL/ArrayAssignable.hlsl
index a2f01f5847410..9a568fe3371d0 100644
--- a/clang/test/CodeGenHLSL/ArrayAssignable.hlsl
+++ b/clang/test/CodeGenHLSL/ArrayAssignable.hlsl
@@ -9,7 +9,7 @@ struct S {
 
 // CHECK: @CBArrays.cb = global target("dx.CBuffer", [[CBLayout]])
 // CHECK: @c1 = external hidden addrspace(2) global <{ [1 x <{ float, target("dx.Padding", 12) }>], float }>, align 4
-// CHECK: @c2 = external hidden addrspace(2) global [2 x <4 x i32>], align 16
+// CHECK: @c2 = external hidden addrspace(2) global [2 x <4 x i32>], align 4
 // CHECK: @c3 = external hidden addrspace(2) global <{ [1 x <{ <{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }>, target("dx.Padding", 12) }>], <{ [1 x <{ i32, target("dx.Padding", 12) }>], i32 }> }>, align 4
 // CHECK: @c4 = external hidden addrspace(2) global <{ [1 x <{ %S, target("dx.Padding", 8) }>], %S }>, align 1
 
@@ -157,7 +157,7 @@ void arr_assign8() {
 // See https://github.com/llvm/wg-hlsl/issues/351
 //
 // CHECK-LABEL: define hidden void {{.*}}arr_assign9
-// CHECK: [[C:%.*]] = alloca [2 x <4 x i32>], align 16
+// CHECK: [[C:%.*]] = alloca [2 x <4 x i32>], align 4
 // CHECK-NEXT: [[V0:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[C]], i32 0
 // CHECK-NEXT: [[L0:%.*]] = load <4 x i32>, ptr addrspace(2) @c2, align 4
 // CHECK-NEXT: store <4 x i32> [[L0]], ptr [[V0]], align 4
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/AggregateSplatCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/AggregateSplatCast.hlsl
index 9524f024e8d46..4e6c7537bcaa4 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/AggregateSplatCast.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/AggregateSplatCast.hlsl
@@ -34,14 +34,14 @@ export void call8() {
 // vector splat from vector of length 1
 // CHECK-LABEL: define void {{.*}}call1
 // CHECK: [[B:%.*]] = alloca <1 x float>, align 4
-// CHECK-NEXT: [[A:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT: [[A:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT: store <1 x float> splat (float 1.000000e+00), ptr [[B]], align 4
 // CHECK-NEXT: [[L:%.*]] = load <1 x float>, ptr [[B]], align 4
 // CHECK-NEXT: [[VL:%.*]] = extractelement <1 x float> [[L]], i32 0
 // CHECK-NEXT: [[C:%.*]] = fptosi float [[VL]] to i32
 // CHECK-NEXT: [[SI:%.*]] = insertelement <4 x i32> poison, i32 [[C]], i64 0
 // CHECK-NEXT: [[S:%.*]] = shufflevector <4 x i32> [[SI]], <4 x i32> poison, <4 x i32> zeroinitializer
-// CHECK-NEXT: store <4 x i32> [[S]], ptr [[A]], align 16
+// CHECK-NEXT: store <4 x i32> [[S]], ptr [[A]], align 4
 export void call1() {
   float1 B = {1.0};
   int4 A = (int4)B;
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl
index 5f2182e27285e..740b80afdb609 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl
@@ -74,20 +74,20 @@ export void call3() {
 
 // flatten array of vector to array with cast
 // CHECK-LABEL: define void {{.*}}call5
-// CHECK: [[A:%.*]] = alloca [1 x <2 x float>], align 8
+// CHECK: [[A:%.*]] = alloca [1 x <2 x float>], align 4
 // CHECK-NEXT: [[B:%.*]] = alloca [2 x i32], align 4
-// CHECK-NEXT: [[Tmp:%.*]] = alloca [1 x <2 x float>], align 8
-// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[A]], ptr align 8 {{.*}}, i32 8, i1 false)
+// CHECK-NEXT: [[Tmp:%.*]] = alloca [1 x <2 x float>], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false)
 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 8, i1 false)
-// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[Tmp]], ptr align 8 [[A]], i32 8, i1 false)
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false)
 // CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i32 0, i32 0
 // CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i32 0, i32 1
 // CHECK-NEXT: [[VG:%.*]] = getelementptr inbounds [1 x <2 x float>], ptr [[Tmp]], i32 0, i32 0
-// CHECK-NEXT: [[L:%.*]] = load <2 x float>, ptr [[VG]], align 8
+// CHECK-NEXT: [[L:%.*]] = load <2 x float>, ptr [[VG]], align 4
 // CHECK-NEXT: [[VL:%.*]] = extractelement <2 x float> [[L]], i32 0
 // CHECK-NEXT: [[C:%.*]] = fptosi float [[VL]] to i32
 // CHECK-NEXT: store i32 [[C]], ptr [[G1]], align 4
-// CHECK-NEXT: [[L4:%.*]] = load <2 x float>, ptr [[VG]], align 8
+// CHECK-NEXT: [[L4:%.*]] = load <2 x float>, ptr [[VG]], align 4
 // CHECK-NEXT: [[VL5:%.*]] = extractelement <2 x float> [[L4]], i32 1
 // CHECK-NEXT: [[C6:%.*]] = fptosi float [[VL5]] to i32
 // CHECK-NEXT: store i32 [[C6]], ptr [[G2]], align 4
@@ -183,5 +183,5 @@ struct Derived : BFields {
 // CHECK-NEXT: store i32 [[X]], ptr [[Gep3]], align 4
 // CHECK-NEXT: ret void
 export void call8(Derived D) {
-  int A[4] = (int[4])D;  
+  int A[4] = (int[4])D;
 }
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/InitLists.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/InitLists.hlsl
index 82ed7545cfdc5..b785a033b7ca7 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/InitLists.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/InitLists.hlsl
@@ -110,15 +110,15 @@ TwoFloats case3(int Val) {
 // CHECK-LABEL: define hidden void @_Z5case4Dv2_i(
 // CHECK-SAME: ptr dead_on_unwind noalias writable sret([[STRUCT_TWOFLOATS:%.*]]) align 1 [[AGG_RESULT:%.*]], <2 x i32> noundef [[TWOVALS:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TWOVALS_ADDR:%.*]] = alloca <2 x i32>, align 8
-// CHECK-NEXT:    store <2 x i32> [[TWOVALS]], ptr [[TWOVALS_ADDR]], align 8
+// CHECK-NEXT:    [[TWOVALS_ADDR:%.*]] = alloca <2 x i32>, align 4
+// CHECK-NEXT:    store <2 x i32> [[TWOVALS]], ptr [[TWOVALS_ADDR]], align 4
 // CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_TWOFLOATS]], ptr [[AGG_RESULT]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[TWOVALS_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[TWOVALS_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT:%.*]] = extractelement <2 x i32> [[TMP0]], i64 0
 // CHECK-NEXT:    [[CONV:%.*]] = sitofp i32 [[VECEXT]] to float
 // CHECK-NEXT:    store float [[CONV]], ptr [[X]], align 1
 // CHECK-NEXT:    [[Y:%.*]] = getelementptr inbounds nuw [[STRUCT_TWOFLOATS]], ptr [[AGG_RESULT]], i32 0, i32 1
-// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[TWOVALS_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[TWOVALS_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT1:%.*]] = extractelement <2 x i32> [[TMP1]], i64 1
 // CHECK-NEXT:    [[CONV2:%.*]] = sitofp i32 [[VECEXT1]] to float
 // CHECK-NEXT:    store float [[CONV2]], ptr [[Y]], align 1
@@ -133,14 +133,14 @@ TwoFloats case4(int2 TwoVals) {
 // CHECK-LABEL: define hidden void @_Z5case5Dv2_i(
 // CHECK-SAME: ptr dead_on_unwind noalias writable sret([[STRUCT_TWOINTS:%.*]]) align 1 [[AGG_RESULT:%.*]], <2 x i32> noundef [[TWOVALS:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TWOVALS_ADDR:%.*]] = alloca <2 x i32>, align 8
-// CHECK-NEXT:    store <2 x i32> [[TWOVALS]], ptr [[TWOVALS_ADDR]], align 8
+// CHECK-NEXT:    [[TWOVALS_ADDR:%.*]] = alloca <2 x i32>, align 4
+// CHECK-NEXT:    store <2 x i32> [[TWOVALS]], ptr [[TWOVALS_ADDR]], align 4
 // CHECK-NEXT:    [[Z:%.*]] = getelementptr inbounds nuw [[STRUCT_TWOINTS]], ptr [[AGG_RESULT]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[TWOVALS_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[TWOVALS_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT:%.*]] = extractelement <2 x i32> [[TMP0]], i64 0
 // CHECK-NEXT:    store i32 [[VECEXT]], ptr [[Z]], align 1
 // CHECK-NEXT:    [[W:%.*]] = getelementptr inbounds nuw [[STRUCT_TWOINTS]], ptr [[AGG_RESULT]], i32 0, i32 1
-// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[TWOVALS_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[TWOVALS_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT1:%.*]] = extractelement <2 x i32> [[TMP1]], i64 1
 // CHECK-NEXT:    store i32 [[VECEXT1]], ptr [[W]], align 1
 // CHECK-NEXT:    ret void
@@ -771,41 +771,41 @@ FourFloats case10(TwoFloats TF1, TwoFloats TF2) {
 // CHECK-SAME: ptr dead_on_unwind noalias writable sret([[STRUCT_FOURFLOATS:%.*]]) align 1 [[AGG_RESULT:%.*]], float noundef nofpclass(nan inf) [[F:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[F_ADDR:%.*]] = alloca float, align 4
-// CHECK-NEXT:    [[REF_TMP:%.*]] = alloca <4 x float>, align 16
-// CHECK-NEXT:    [[REF_TMP1:%.*]] = alloca <4 x float>, align 16
-// CHECK-NEXT:    [[REF_TMP4:%.*]] = alloca <4 x float>, align 16
-// CHECK-NEXT:    [[REF_TMP7:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[REF_TMP:%.*]] = alloca <4 x float>, align 4
+// CHECK-NEXT:    [[REF_TMP1:%.*]] = alloca <4 x float>, align 4
+// CHECK-NEXT:    [[REF_TMP4:%.*]] = alloca <4 x float>, align 4
+// CHECK-NEXT:    [[REF_TMP7:%.*]] = alloca <4 x float>, align 4
 // CHECK-NEXT:    store float [[F]], ptr [[F_ADDR]], align 4
 // CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_TWOFLOATS:%.*]], ptr [[AGG_RESULT]], i32 0, i32 0
 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
 // CHECK-NEXT:    [[CAST_SPLAT:%.*]] = insertelement <1 x float> poison, float [[TMP0]], i64 0
 // CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <1 x float> [[CAST_SPLAT]], <1 x float> poison, <4 x i32> zeroinitializer
-// CHECK-NEXT:    store <4 x float> [[TMP1]], ptr [[REF_TMP]], align 16
-// CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[REF_TMP]], align 16
+// CHECK-NEXT:    store <4 x float> [[TMP1]], ptr [[REF_TMP]], align 4
+// CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[REF_TMP]], align 4
 // CHECK-NEXT:    [[VECEXT:%.*]] = extractelement <4 x float> [[TMP2]], i64 0
 // CHECK-NEXT:    store float [[VECEXT]], ptr [[X]], align 1
 // CHECK-NEXT:    [[Y:%.*]] = getelementptr inbounds nuw [[STRUCT_TWOFLOATS]], ptr [[AGG_RESULT]], i32 0, i32 1
 // CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr [[F_ADDR]], align 4
 // CHECK-NEXT:    [[CAST_SPLAT2:%.*]] = insertelement <1 x float> poison, float [[TMP3]], i64 0
 // CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <1 x float> [[CAST_SPLAT2]], <1 x float> poison, <4 x i32> zeroinitializer
-// CHECK-NEXT:    store <4 x float> [[TMP4]], ptr [[REF_TMP1]], align 16
-// CHECK-NEXT:    [[TMP5:%.*]] = load <4 x float>, ptr [[REF_TMP1]], align 16
+// CHECK-NEXT:    store <4 x float> [[TMP4]], ptr [[REF_TMP1]], align 4
+// CHECK-NEXT:    [[TMP5:%.*]] = load <4 x float>, ptr [[REF_TMP1]], align 4
 // CHECK-NEXT:    [[VECEXT3:%.*]] = extractelement <4 x float> [[TMP5]], i64 1
 // CHECK-NEXT:    store float [[VECEXT3]], ptr [[Y]], align 1
 // CHECK-NEXT:    [[Z:%.*]] = getelementptr inbounds nuw [[STRUCT_FOURFLOATS]], ptr [[AGG_RESULT]], i32 0, i32 1
 // CHECK-NEXT:    [[TMP6:%.*]] = load float, ptr [[F_ADDR]], align 4
 // CHECK-NEXT:    [[CAST_SPLAT5:%.*]] = insertelement <1 x float> poison, float [[TMP6]], i64 0
 // CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <1 x float> [[CAST_SPLAT5]], <1 x float> poison, <4 x i32> zeroinitializer
-// CHECK-NEXT:    store <4 x float> [[TMP7]], ptr [[REF_TMP4]], align 16
-// CHECK-NEXT:    [[TMP8:%.*]] = load <4 x float>, ptr [[REF_TMP4]], align 16
+// CHECK-NEXT:    store <4 x float> [[TMP7]], ptr [[REF_TMP4]], align 4
+// CHECK-NEXT:    [[TMP8:%.*]] = load <4 x float>, ptr [[REF_TMP4]], align 4
 // CHECK-NEXT:    [[VECEXT6:%.*]] = extractelement <4 x float> [[TMP8]], i64 2
 // CHECK-NEXT:    store float [[VECEXT6]], ptr [[Z]], align 1
 // CHECK-NEXT:    [[W:%.*]] = getelementptr inbounds nuw [[STRUCT_FOURFLOATS]], ptr [[AGG_RESULT]], i32 0, i32 2
 // CHECK-NEXT:    [[TMP9:%.*]] = load float, ptr [[F_ADDR]], align 4
 // CHECK-NEXT:    [[CAST_SPLAT8:%.*]] = insertelement <1 x float> poison, float [[TMP9]], i64 0
 // CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <1 x float> [[CAST_SPLAT8]], <1 x float> poison, <4 x i32> zeroinitializer
-// CHECK-NEXT:    store <4 x float> [[TMP10]], ptr [[REF_TMP7]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = load <4 x float>, ptr [[REF_TMP7]], align 16
+// CHECK-NEXT:    store <4 x float> [[TMP10]], ptr [[REF_TMP7]], align 4
+// CHECK-NEXT:    [[TMP11:%.*]] = load <4 x float>, ptr [[REF_TMP7]], align 4
 // CHECK-NEXT:    [[VECEXT9:%.*]] = extractelement <4 x float> [[TMP11]], i64 3
 // CHECK-NEXT:    store float [[VECEXT9]], ptr [[W]], align 1
 // CHECK-NEXT:    ret void
@@ -976,12 +976,12 @@ int case17Helper(int x) {
 // CHECK-LABEL: define hidden void @_Z6case17v(
 // CHECK-SAME: ) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[X:%.*]] = alloca <2 x i32>, align 8
+// CHECK-NEXT:    [[X:%.*]] = alloca <2 x i32>, align 4
 // CHECK-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z12case17Helperi(i32 noundef 0) #[[ATTR2]]
 // CHECK-NEXT:    [[CALL1:%.*]] = call noundef i32 @_Z12case17Helperi(i32 noundef 1) #[[ATTR2]]
 // CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <2 x i32> poison, i32 [[CALL]], i32 0
 // CHECK-NEXT:    [[VECINIT2:%.*]] = insertelement <2 x i32> [[VECINIT]], i32 [[CALL1]], i32 1
-// CHECK-NEXT:    store <2 x i32> [[VECINIT2]], ptr [[X]], align 8
+// CHECK-NEXT:    store <2 x i32> [[VECINIT2]], ptr [[X]], align 4
 // CHECK-NEXT:    ret void
 //
 void case17() {
@@ -1098,8 +1098,8 @@ void case25(EmptyDerived ED, UnnamedDerived UD) {
 // CHECK-LABEL: define hidden void @_Z6case267TwoInts(
 // CHECK-SAME: ptr noundef byval([[STRUCT_TWOINTS:%.*]]) align 1 [[TI:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[F:%.*]] = alloca <4 x float>, align 16
-// CHECK-NEXT:    [[F2:%.*]] = alloca <3 x float>, align 16
+// CHECK-NEXT:    [[F:%.*]] = alloca <4 x float>, align 4
+// CHECK-NEXT:    [[F2:%.*]] = alloca <3 x float>, align 4
 // CHECK-NEXT:    [[Z:%.*]] = getelementptr inbounds nuw [[STRUCT_TWOINTS]], ptr [[TI]], i32 0, i32 0
 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[Z]], align 1
 // CHECK-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
@@ -1110,7 +1110,7 @@ void case25(EmptyDerived ED, UnnamedDerived UD) {
 // CHECK-NEXT:    [[VECINIT2:%.*]] = insertelement <4 x float> [[VECINIT]], float [[CONV1]], i32 1
 // CHECK-NEXT:    [[VECINIT3:%.*]] = insertelement <4 x float> [[VECINIT2]], float 1.000000e+00, i32 2
 // CHECK-NEXT:    [[VECINIT4:%.*]] = insertelement <4 x float> [[VECINIT3]], float 2.000000e+00, i32 3
-// CHECK-NEXT:    store <4 x float> [[VECINIT4]], ptr [[F]], align 16
+// CHECK-NEXT:    store <4 x float> [[VECINIT4]], ptr [[F]], align 4
 // CHECK-NEXT:    [[Z5:%.*]] = getelementptr inbounds nuw [[STRUCT_TWOINTS]], ptr [[TI]], i32 0, i32 0
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[Z5]], align 1
 // CHECK-NEXT:    [[CONV6:%.*]] = sitofp i32 [[TMP2]] to float
@@ -1119,7 +1119,7 @@ void case25(EmptyDerived ED, UnnamedDerived UD) {
 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[W8]], align 1
 // CHECK-NEXT:    [[CONV9:%.*]] = sitofp i32 [[TMP3]] to float
 // CHECK-NEXT:    [[VECINIT10:%.*]] = insertelement <3 x float> [[VECINIT7]], float [[CONV9]], i32 2
-// CHECK-NEXT:    store <3 x float> [[VECINIT10]], ptr [[F2]], align 16
+// CHECK-NEXT:    store <3 x float> [[VECINIT10]], ptr [[F2]], align 4
 // CHECK-NEXT:    ret void
 //
 void case26(TwoInts TI) {
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/MatrixConstructor.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/MatrixConstructor.hlsl
index 9263e46a08822..a5dd31d745f13 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/MatrixConstructor.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/MatrixConstructor.hlsl
@@ -61,26 +61,26 @@ float3x2 case2() {
 // CHECK-LABEL: define hidden noundef nofpclass(nan inf) <6 x float> @_Z5case3Dv3_fS_(
 // CHECK-SAME: <3 x float> noundef nofpclass(nan inf) [[A:%.*]], <3 x float> noundef nofpclass(nan inf) [[B:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <3 x float>, align 16
-// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <3 x float>, align 16
-// CHECK-NEXT:    store <3 x float> [[A]], ptr [[A_ADDR]], align 16
-// CHECK-NEXT:    store <3 x float> [[B]], ptr [[B_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <3 x float>, ptr [[A_ADDR]], align 16
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <3 x float>, align 4
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <3 x float>, align 4
+// CHECK-NEXT:    store <3 x float> [[A]], ptr [[A_ADDR]], align 4
+// CHECK-NEXT:    store <3 x float> [[B]], ptr [[B_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <3 x float>, ptr [[A_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT:%.*]] = extractelement <3 x float> [[TMP0]], i64 0
 // CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <6 x float> poison, float [[VECEXT]], i32 0
-// CHECK-NEXT:    [[TMP1:%.*]] = load <3 x float>, ptr [[A_ADDR]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <3 x float>, ptr [[A_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT1:%.*]] = extractelement <3 x float> [[TMP1]], i64 1
 // CHECK-NEXT:    [[VECINIT2:%.*]] = insertelement <6 x float> [[VECINIT]], float [[VECEXT1]], i32 1
-// CHECK-NEXT:    [[TMP2:%.*]] = load <3 x float>, ptr [[A_ADDR]], align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <3 x float>, ptr [[A_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT3:%.*]] = extractelement <3 x float> [[TMP2]], i64 2
 // CHECK-NEXT:    [[VECINIT4:%.*]] = insertelement <6 x float> [[VECINIT2]], float [[VECEXT3]], i32 2
-// CHECK-NEXT:    [[TMP3:%.*]] = load <3 x float>, ptr [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = load <3 x float>, ptr [[B_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT5:%.*]] = extractelement <3 x float> [[TMP3]], i64 0
 // CHECK-NEXT:    [[VECINIT6:%.*]] = insertelement <6 x float> [[VECINIT4]], float [[VECEXT5]], i32 3
-// CHECK-NEXT:    [[TMP4:%.*]] = load <3 x float>, ptr [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = load <3 x float>, ptr [[B_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT7:%.*]] = extractelement <3 x float> [[TMP4]], i64 1
 // CHECK-NEXT:    [[VECINIT8:%.*]] = insertelement <6 x float> [[VECINIT6]], float [[VECEXT7]], i32 4
-// CHECK-NEXT:    [[TMP5:%.*]] = load <3 x float>, ptr [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP5:%.*]] = load <3 x float>, ptr [[B_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT9:%.*]] = extractelement <3 x float> [[TMP5]], i64 2
 // CHECK-NEXT:    [[VECINIT10:%.*]] = insertelement <6 x float> [[VECINIT8]], float [[VECEXT9]], i32 5
 // CHECK-NEXT:    [[MATRIX_ROWMAJOR2COLMAJOR:%.*]] = shufflevector <6 x float> [[VECINIT10]], <6 x float> poison, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/MatrixElementTypeCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/MatrixElementTypeCast.hlsl
index f21d1ddb7386e..dd9dd706aae26 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/MatrixElementTypeCast.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/MatrixElementTypeCast.hlsl
@@ -202,27 +202,27 @@ void call4(Derived D) {
 // CHECK-LABEL: define hidden noundef nofpclass(nan inf) <4 x float> @_Z5call5Dv4_f(
 // CHECK-SAME: <4 x float> noundef nofpclass(nan inf) [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x float>, align 4
 // CHECK-NEXT:    [[M:%.*]] = alloca [2 x <2 x float>], align 4
-// CHECK-NEXT:    [[HLSL_EWCAST_SRC:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[HLSL_EWCAST_SRC:%.*]] = alloca <4 x float>, align 4
 // CHECK-NEXT:    [[FLATCAST_TMP:%.*]] = alloca <4 x float>, align 4
-// CHECK-NEXT:    store <4 x float> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    store <4 x float> [[TMP0]], ptr [[HLSL_EWCAST_SRC]], align 16
+// CHECK-NEXT:    store <4 x float> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    store <4 x float> [[TMP0]], ptr [[HLSL_EWCAST_SRC]], align 4
 // CHECK-NEXT:    [[VECTOR_GEP:%.*]] = getelementptr inbounds <4 x float>, ptr [[HLSL_EWCAST_SRC]], i32 0
 // CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[FLATCAST_TMP]], align 4
-// CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[VECTOR_GEP]], align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[VECTOR_GEP]], align 4
 // CHECK-NEXT:    [[VECEXT:%.*]] = extractelement <4 x float> [[TMP2]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x float> [[TMP1]], float [[VECEXT]], i64 0
-// CHECK-NEXT:    [[TMP4:%.*]] = load <4 x float>, ptr [[VECTOR_GEP]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = load <4 x float>, ptr [[VECTOR_GEP]], align 4
 // CHECK-NEXT:    [[VECEXT1:%.*]] = extractelement <4 x float> [[TMP4]], i32 1
 // ROW-CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x float> [[TMP3]], float [[VECEXT1]], i64 1
 // COL-CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x float> [[TMP3]], float [[VECEXT1]], i64 2
-// CHECK-NEXT:    [[TMP6:%.*]] = load <4 x float>, ptr [[VECTOR_GEP]], align 16
+// CHECK-NEXT:    [[TMP6:%.*]] = load <4 x float>, ptr [[VECTOR_GEP]], align 4
 // CHECK-NEXT:    [[VECEXT2:%.*]] = extractelement <4 x float> [[TMP6]], i32 2
 // ROW-CHECK-NEXT:    [[TMP7:%.*]] = insertelement <4 x float> [[TMP5]], float [[VECEXT2]], i64 2
 // COL-CHECK-NEXT:    [[TMP7:%.*]] = insertelement <4 x float> [[TMP5]], float [[VECEXT2]], i64 1
-// CHECK-NEXT:    [[TMP8:%.*]] = load <4 x float>, ptr [[VECTOR_GEP]], align 16
+// CHECK-NEXT:    [[TMP8:%.*]] = load <4 x float>, ptr [[VECTOR_GEP]], align 4
 // CHECK-NEXT:    [[VECEXT3:%.*]] = extractelement <4 x float> [[TMP8]], i32 3
 // CHECK-NEXT:    [[TMP9:%.*]] = insertelement <4 x float> [[TMP7]], float [[VECEXT3]], i64 3
 // CHECK-NEXT:    store <4 x float> [[TMP9]], ptr [[M]], align 4
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptConstSwizzle.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptConstSwizzle.hlsl
index 312eeed1c5107..378bd04bba859 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptConstSwizzle.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptConstSwizzle.hlsl
@@ -5,10 +5,10 @@
 // CHECK-SAME: ptr noalias noundef nonnull align 4 dereferenceable(64) [[M:%.*]], <4 x float> noundef nofpclass(nan inf) [[V:%.*]]) #[[ATTR0:[0-9]+]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[M_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x float>, align 4
 // CHECK-NEXT:    store ptr [[M]], ptr [[M_ADDR]], align 4
-// CHECK-NEXT:    store <4 x float> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <4 x float> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[M_ADDR]], align 4, !nonnull [[META3:![0-9]+]], !align [[META4:![0-9]+]]
 // CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x float> [[TMP0]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <16 x float>, ptr [[TMP1]], i32 0, i32 15
@@ -32,10 +32,10 @@ void setMatrix1(out float4x4 M, float4 V) {
 // CHECK-SAME: ptr noalias noundef nonnull align 4 dereferenceable(64) [[M:%.*]], <4 x i32> noundef [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[M_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT:    store ptr [[M]], ptr [[M_ADDR]], align 4
-// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[M_ADDR]], align 4, !nonnull [[META3]], !align [[META4]]
 // CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <16 x i32>, ptr [[TMP1]], i32 0, i32 2
@@ -59,10 +59,10 @@ void setMatrix2(out int4x4 M, int4 V) {
 // CHECK-SAME: ptr noalias noundef nonnull align 4 dereferenceable(24) [[M:%.*]], <3 x i32> noundef [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[M_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <3 x i32>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <3 x i32>, align 4
 // CHECK-NEXT:    store ptr [[M]], ptr [[M_ADDR]], align 4
-// CHECK-NEXT:    store <3 x i32> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <3 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <3 x i32> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <3 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <3 x i32> [[TMP0]], <3 x i32> poison, <3 x i32> <i32 2, i32 1, i32 0>
 // CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[M_ADDR]], align 4, !nonnull [[META3]], !align [[META4]]
 // CHECK-NEXT:    [[TMP3:%.*]] = extractelement <3 x i32> [[TMP1]], i32 0
@@ -84,10 +84,10 @@ void setMatrixVectorSwizzle(out int2x3 M, int3 V) {
 // CHECK-SAME: ptr noalias noundef nonnull align 4 dereferenceable(24) [[M:%.*]], <3 x i32> noundef [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[M_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <3 x i32>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <3 x i32>, align 4
 // CHECK-NEXT:    store ptr [[M]], ptr [[M_ADDR]], align 4
-// CHECK-NEXT:    store <3 x i32> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <3 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <3 x i32> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <3 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[M_ADDR]], align 4, !nonnull [[META3]], !align [[META4]]
 // CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i32> [[TMP0]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <6 x i32>, ptr [[TMP1]], i32 0, i32 1
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptDynamicSwizzle.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptDynamicSwizzle.hlsl
index 97921c785dc9d..36e351523d305 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptDynamicSwizzle.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptDynamicSwizzle.hlsl
@@ -6,11 +6,11 @@
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[M_ADDR:%.*]] = alloca ptr, align 4
 // CHECK-NEXT:    [[INDEX_ADDR:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x float>, align 4
 // CHECK-NEXT:    store ptr [[M]], ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    store i32 [[INDEX]], ptr [[INDEX_ADDR]], align 4
-// CHECK-NEXT:    store <4 x float> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <4 x float> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[M_ADDR]], align 4, !nonnull [[META3:![0-9]+]], !align [[META4:![0-9]+]]
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[INDEX_ADDR]], align 4
 // CHECK-NEXT:    [[TMP3:%.*]] = add i32 12, [[TMP2]]
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptSetter.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptSetter.hlsl
index ec362aa269986..8595bb932dac8 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptSetter.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/MatrixSingleSubscriptSetter.hlsl
@@ -6,11 +6,11 @@
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[M_ADDR:%.*]] = alloca ptr, align 4
 // CHECK-NEXT:    [[INDEX_ADDR:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x float>, align 4
 // CHECK-NEXT:    store ptr [[M]], ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    store i32 [[INDEX]], ptr [[INDEX_ADDR]], align 4
-// CHECK-NEXT:    store <4 x float> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <4 x float> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[M_ADDR]], align 4, !nonnull [[META3:![0-9]+]], !align [[META4:![0-9]+]]
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[INDEX_ADDR]], align 4
 // CHECK-NEXT:    [[TMP3:%.*]] = add i32 0, [[TMP2]]
@@ -64,12 +64,12 @@ void setMatrixScalar(out float2x1 M, int index, float S) {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[M_ADDR:%.*]] = alloca ptr, align 4
 // CHECK-NEXT:    [[INDEX_ADDR:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT:    store ptr [[M]], ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    store i32 [[INDEX]], ptr [[INDEX_ADDR]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = zext <4 x i1> [[V]] to <4 x i32>
-// CHECK-NEXT:    store <4 x i32> [[TMP0]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <4 x i32> [[TMP0]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[LOADEDV:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i1>
 // CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[M_ADDR]], align 4, !nonnull [[META3]], !align [[META4]]
 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[INDEX_ADDR]], align 4
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/MatrixSplat.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/MatrixSplat.hlsl
index 9ae13e3dc04b0..b65bcdf425a20 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/MatrixSplat.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/MatrixSplat.hlsl
@@ -83,10 +83,10 @@ void DynamicBoolSplat(bool Value) {
 // CHECK-LABEL: define hidden void @_Z13CastThenSplatDv4_f(
 // CHECK-SAME: <4 x float> noundef nofpclass(nan inf) [[VALUE:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VALUE_ADDR:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[VALUE_ADDR:%.*]] = alloca <4 x float>, align 4
 // CHECK-NEXT:    [[M:%.*]] = alloca [3 x <3 x float>], align 4
-// CHECK-NEXT:    store <4 x float> [[VALUE]], ptr [[VALUE_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[VALUE_ADDR]], align 16
+// CHECK-NEXT:    store <4 x float> [[VALUE]], ptr [[VALUE_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[VALUE_ADDR]], align 4
 // CHECK-NEXT:    [[CAST_VTRUNC:%.*]] = extractelement <4 x float> [[TMP0]], i32 0
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <9 x float> poison, float [[CAST_VTRUNC]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <9 x float> [[SPLAT_SPLATINSERT]], <9 x float> poison, <9 x i32> zeroinitializer
@@ -100,10 +100,10 @@ void CastThenSplat(float4 Value) {
 // CHECK-LABEL: define hidden void @_Z30ExplicitIntToBoolCastThenSplatDv3_i(
 // CHECK-SAME: <3 x i32> noundef [[VALUE:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VALUE_ADDR:%.*]] = alloca <3 x i32>, align 16
+// CHECK-NEXT:    [[VALUE_ADDR:%.*]] = alloca <3 x i32>, align 4
 // CHECK-NEXT:    [[M:%.*]] = alloca [2 x <2 x i32>], align 4
-// CHECK-NEXT:    store <3 x i32> [[VALUE]], ptr [[VALUE_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <3 x i32>, ptr [[VALUE_ADDR]], align 16
+// CHECK-NEXT:    store <3 x i32> [[VALUE]], ptr [[VALUE_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <3 x i32>, ptr [[VALUE_ADDR]], align 4
 // CHECK-NEXT:    [[TOBOOL:%.*]] = icmp ne <3 x i32> [[TMP0]], zeroinitializer
 // CHECK-NEXT:    [[CAST_VTRUNC:%.*]] = extractelement <3 x i1> [[TOBOOL]], i32 0
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[CAST_VTRUNC]], i64 0
@@ -119,10 +119,10 @@ void ExplicitIntToBoolCastThenSplat(int3 Value) {
 // CHECK-LABEL: define hidden void @_Z32ExplicitFloatToBoolCastThenSplatDv2_f(
 // CHECK-SAME: <2 x float> noundef nofpclass(nan inf) [[VALUE:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VALUE_ADDR:%.*]] = alloca <2 x float>, align 8
+// CHECK-NEXT:    [[VALUE_ADDR:%.*]] = alloca <2 x float>, align 4
 // CHECK-NEXT:    [[M:%.*]] = alloca [3 x <2 x i32>], align 4
-// CHECK-NEXT:    store <2 x float> [[VALUE]], ptr [[VALUE_ADDR]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[VALUE_ADDR]], align 8
+// CHECK-NEXT:    store <2 x float> [[VALUE]], ptr [[VALUE_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[VALUE_ADDR]], align 4
 // CHECK-NEXT:    [[TOBOOL:%.*]] = fcmp reassoc nnan ninf nsz arcp afn une <2 x float> [[TMP0]], zeroinitializer
 // CHECK-NEXT:    [[CAST_VTRUNC:%.*]] = extractelement <2 x i1> [[TOBOOL]], i32 0
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <6 x i1> poison, i1 [[CAST_VTRUNC]], i64 0
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/MatrixToAndFromVectorConstructors.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/MatrixToAndFromVectorConstructors.hlsl
index a515b91da01c2..4c31b23d1b3fa 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/MatrixToAndFromVectorConstructors.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/MatrixToAndFromVectorConstructors.hlsl
@@ -5,7 +5,7 @@
 // CHECK-SAME: <4 x float> noundef nofpclass(nan inf) [[M:%.*]]) #[[ATTR0:[0-9]+]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[M_ADDR:%.*]] = alloca [2 x <2 x float>], align 4
-// CHECK-NEXT:    [[V:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[V:%.*]] = alloca <4 x float>, align 4
 // CHECK-NEXT:    store <4 x float> [[M]], ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    [[MATRIXEXT:%.*]] = extractelement <4 x float> [[TMP0]], i32 0
@@ -21,8 +21,8 @@
 // CHECK-NEXT:    [[TMP3:%.*]] = load <4 x float>, ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    [[MATRIXEXT5:%.*]] = extractelement <4 x float> [[TMP3]], i32 3
 // CHECK-NEXT:    [[VECINIT6:%.*]] = insertelement <4 x float> [[VECINIT4]], float [[MATRIXEXT5]], i32 3
-// CHECK-NEXT:    store <4 x float> [[VECINIT6]], ptr [[V]], align 16
-// CHECK-NEXT:    [[TMP4:%.*]] = load <4 x float>, ptr [[V]], align 16
+// CHECK-NEXT:    store <4 x float> [[VECINIT6]], ptr [[V]], align 4
+// CHECK-NEXT:    [[TMP4:%.*]] = load <4 x float>, ptr [[V]], align 4
 // CHECK-NEXT:    ret <4 x float> [[TMP4]]
 //
 float4 fn(float2x2 m) {
@@ -33,19 +33,19 @@ float4 fn(float2x2 m) {
 // CHECK-LABEL: define hidden noundef <4 x i32> @_Z2fnDv4_i(
 // CHECK-SAME: <4 x i32> noundef [[V:%.*]]) #[[ATTR0:[0-9]+]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT:    [[M:%.*]] = alloca [2 x <2 x i32>], align 4
-// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT:%.*]] = extractelement <4 x i32> [[TMP0]], i64 0
 // CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <4 x i32> poison, i32 [[VECEXT]], i32 0
-// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT1:%.*]] = extractelement <4 x i32> [[TMP1]], i64 1
 // CHECK-NEXT:    [[VECINIT2:%.*]] = insertelement <4 x i32> [[VECINIT]], i32 [[VECEXT1]], i32 1
-// CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT3:%.*]] = extractelement <4 x i32> [[TMP2]], i64 2
 // CHECK-NEXT:    [[VECINIT4:%.*]] = insertelement <4 x i32> [[VECINIT2]], i32 [[VECEXT3]], i32 2
-// CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT5:%.*]] = extractelement <4 x i32> [[TMP3]], i64 3
 // CHECK-NEXT:    [[VECINIT6:%.*]] = insertelement <4 x i32> [[VECINIT4]], i32 [[VECEXT5]], i32 3
 // COL-CHECK-NEXT:    [[MATRIX_ROWMAJOR2COLMAJOR:%.*]] = shufflevector <4 x i32> [[VECINIT6]], <4 x i32> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -62,12 +62,12 @@ int2x2 fn(int4 v) {
 // CHECK-LABEL: define hidden noundef <2 x i32> @_Z3fn1Dv2_i(
 // CHECK-SAME: <2 x i32> noundef [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <2 x i32>, align 8
-// CHECK-NEXT:    store <2 x i32> [[V]], ptr [[V_ADDR]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[V_ADDR]], align 8
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <2 x i32>, align 4
+// CHECK-NEXT:    store <2 x i32> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT:%.*]] = extractelement <2 x i32> [[TMP0]], i64 0
 // CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <2 x i32> poison, i32 [[VECEXT]], i32 0
-// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[V_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[VECEXT1:%.*]] = extractelement <2 x i32> [[TMP1]], i64 1
 // CHECK-NEXT:    [[VECINIT2:%.*]] = insertelement <2 x i32> [[VECINIT]], i32 [[VECEXT1]], i32 1
 // COL-CHECK-NEXT:    [[MATRIX_ROWMAJOR2COLMAJOR:%.*]] = shufflevector <2 x i32> [[VECINIT2]], <2 x i32> poison, <2 x i32> <i32 0, i32 1>
@@ -81,18 +81,18 @@ int1x2 fn1(int2 v) {
 // CHECK-LABEL: define hidden noundef <3 x i1> @_Z3fn2Dv3_b(
 // CHECK-SAME: <3 x i1> noundef [[B:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <3 x i32>, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <3 x i32>, align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = zext <3 x i1> [[B]] to <3 x i32>
-// CHECK-NEXT:    store <3 x i32> [[TMP0]], ptr [[B_ADDR]], align 16
-// CHECK-NEXT:    [[TMP1:%.*]] = load <3 x i32>, ptr [[B_ADDR]], align 16
+// CHECK-NEXT:    store <3 x i32> [[TMP0]], ptr [[B_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = load <3 x i32>, ptr [[B_ADDR]], align 4
 // CHECK-NEXT:    [[LOADEDV:%.*]] = trunc <3 x i32> [[TMP1]] to <3 x i1>
 // CHECK-NEXT:    [[VECEXT:%.*]] = extractelement <3 x i1> [[LOADEDV]], i64 0
 // CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <3 x i1> poison, i1 [[VECEXT]], i32 0
-// CHECK-NEXT:    [[TMP2:%.*]] = load <3 x i32>, ptr [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <3 x i32>, ptr [[B_ADDR]], align 4
 // CHECK-NEXT:    [[LOADEDV1:%.*]] = trunc <3 x i32> [[TMP2]] to <3 x i1>
 // CHECK-NEXT:    [[VECEXT2:%.*]] = extractelement <3 x i1> [[LOADEDV1]], i64 1
 // CHECK-NEXT:    [[VECINIT3:%.*]] = insertelement <3 x i1> [[VECINIT]], i1 [[VECEXT2]], i32 1
-// CHECK-NEXT:    [[TMP3:%.*]] = load <3 x i32>, ptr [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = load <3 x i32>, ptr [[B_ADDR]], align 4
 // CHECK-NEXT:    [[LOADEDV4:%.*]] = trunc <3 x i32> [[TMP3]] to <3 x i1>
 // CHECK-NEXT:    [[VECEXT5:%.*]] = extractelement <3 x i1> [[LOADEDV4]], i64 2
 // CHECK-NEXT:    [[VECINIT6:%.*]] = insertelement <3 x i1> [[VECINIT3]], i1 [[VECEXT5]], i32 2
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/OutputArguments.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/OutputArguments.hlsl
index ec03804ad1a4c..2e01ddddc510c 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/OutputArguments.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/OutputArguments.hlsl
@@ -76,7 +76,7 @@ export int case3() {
 // Vector swizzles in HLSL produce lvalues, so they can be used as arguments to
 // inout parameters and the swizzle is reversed on writeback.
 
-// CHECK: define hidden void {{.*}}funky{{.*}}(ptr noalias noundef nonnull align 16 dereferenceable(16) {{%.*}})
+// CHECK: define hidden void {{.*}}funky{{.*}}(ptr noalias noundef nonnull align 4 dereferenceable(12) {{%.*}})
 void funky(inout int3 X) {
   X.x += 1;
   X.y += 2;
@@ -99,7 +99,7 @@ void funky(inout int3 X) {
 // CHECK:  store <3 x i32> [[Vyzx]], ptr [[ArgTmp]]
 
 // Call the function with the temporary.
-// CHECK: call void {{.*}}funky{{.*}}(ptr noalias noundef nonnull align 16 dereferenceable(16) [[ArgTmp]])
+// CHECK: call void {{.*}}funky{{.*}}(ptr noalias noundef nonnull align 4 dereferenceable(12) [[ArgTmp]])
 
 // Write it back.
 // CHECK:  [[RetVal:%.*]] = load <3 x i32>, ptr [[ArgTmp]]
@@ -200,7 +200,7 @@ export int case7() {
 
 // Case 8: Non-scalars with a cast expression.
 
-// CHECK: define hidden void {{.*}}trunc_vec{{.*}}(ptr noalias noundef nonnull align 16 dereferenceable(16) {{%.*}})
+// CHECK: define hidden void {{.*}}trunc_vec{{.*}}(ptr noalias noundef nonnull align 4 dereferenceable(12) {{%.*}})
 void trunc_vec(inout int3 V) {}
 
 // ALL-LABEL: define noundef nofpclass(nan inf) <3 x float> {{.*}}case8
@@ -210,7 +210,7 @@ void trunc_vec(inout int3 V) {}
 // CHECK: [[FVal:%.*]] = load <3 x float>, ptr [[V]]
 // CHECK: [[IVal:%.*]] = fptosi <3 x float> [[FVal]] to <3 x i32>
 // CHECK: store <3 x i32> [[IVal]], ptr [[Tmp]]
-// CHECK: call void {{.*}}trunc_vec{{.*}}(ptr noalias noundef nonnull align 16 dereferenceable(16) [[Tmp]])
+// CHECK: call void {{.*}}trunc_vec{{.*}}(ptr noalias noundef nonnull align 4 dereferenceable(12) [[Tmp]])
 // CHECK: [[IRet:%.*]] = load <3 x i32>, ptr [[Tmp]]
 // CHECK: [[FRet:%.*]] = sitofp <3 x i32> [[IRet]] to <3 x float>
 // CHECK: store <3 x float> [[FRet]], ptr [[V]]
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl
index bd9a62f4db359..ab5873bfa8296 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl
@@ -23,10 +23,10 @@ export void call0() {
 
 // struct from vector
 // CHECK-LABEL: define void {{.*}}call1
-// CHECK: [[A:%.*]] = alloca <2 x i32>, align 8
+// CHECK: [[A:%.*]] = alloca <2 x i32>, align 4
 // CHECK-NEXT: [[s:%.*]] = alloca %struct.S, align 1
-// CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, ptr [[A]], align 8
-// CHECK-NEXT: [[L:%.*]] = load <2 x i32>, ptr [[A]], align 8
+// CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, ptr [[A]], align 4
+// CHECK-NEXT: [[L:%.*]] = load <2 x i32>, ptr [[A]], align 4
 // CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 0
 // CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 1
 // CHECK-NEXT: [[VL:%.*]] = extractelement <2 x i32> [[L]], i64 0
@@ -169,10 +169,10 @@ export void call9(Derived D) {
 
 // Derived struct from vector
 // CHECK-LABEL: call10
-// CHECK: [[IAddr:%.*]] = alloca <4 x i32>, align 16
+// CHECK: [[IAddr:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT: [[D:%.*]] = alloca %struct.Derived, align 1
-// CHECK-NEXT: store <4 x i32> %I, ptr [[IAddr]], align 16
-// CHECK-NEXT: [[A:%.*]] = load <4 x i32>, ptr [[IAddr]], align 16
+// CHECK-NEXT: store <4 x i32> %I, ptr [[IAddr]], align 4
+// CHECK-NEXT: [[A:%.*]] = load <4 x i32>, ptr [[IAddr]], align 4
 // CHECK-NEXT: [[Gep:%.*]] = getelementptr inbounds %struct.Derived, ptr [[D]], i32 0, i32 0
 // CHECK-NEXT: [[E:%.*]] = getelementptr inbounds nuw %struct.BFields, ptr [[Gep]], i32 0, i32 1
 // CHECK-NEXT: [[Gep1:%.*]] = getelementptr inbounds %struct.Derived, ptr [[D]], i32 0, i32 0, i32 0
@@ -236,10 +236,10 @@ struct Empty {
 
 // cast to an empty struct
 // CHECK-LABEL: call12
-// CHECK: [[I:%.*]] = alloca <4 x i32>, align 16
+// CHECK: [[I:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT: [[E:%.*]] = alloca %struct.Empty, align 1
-// CHECK-NEXT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr [[I]], align 16
-// CHECK-NEXT: [[A:%.*]] = load <4 x i32>, ptr [[I]], align 16
+// CHECK-NEXT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr [[I]], align 4
+// CHECK-NEXT: [[A:%.*]] = load <4 x i32>, ptr [[I]], align 4
 // CHECK-NEXt: ret void
 export void call12() {
   int4 I = {1,2,3,4};
diff --git a/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl
index e232223b185c2..15dc2e0ed166b 100644
--- a/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl
+++ b/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl
@@ -4,19 +4,19 @@
 // vector flat cast from array
 // CHECK-LABEL: define void {{.*}}call2
 // CHECK: [[A:%.*]] = alloca [2 x [1 x i32]], align 4
-// CHECK-NEXT: [[B:%.*]] = alloca <2 x i32>, align 8
+// CHECK-NEXT: [[B:%.*]] = alloca <2 x i32>, align 4
 // CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x [1 x i32]], align 4
-// CHECK-NEXT: [[Tmp2:%.*]] = alloca <2 x i32>, align 8
+// CHECK-NEXT: [[Tmp2:%.*]] = alloca <2 x i32>, align 4
 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false)
 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false)
 // CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x [1 x i32]], ptr [[Tmp]], i32 0, i32 0, i32 0
 // CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x [1 x i32]], ptr [[Tmp]], i32 0, i32 1, i32 0
-// CHECK-NEXT: [[C:%.*]] = load <2 x i32>, ptr [[Tmp2]], align 8
+// CHECK-NEXT: [[C:%.*]] = load <2 x i32>, ptr [[Tmp2]], align 4
 // CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4
 // CHECK-NEXT: [[D:%.*]] = insertelement <2 x i32> [[C]], i32 [[L]], i64 0
 // CHECK-NEXT: [[L2:%.*]] = load i32, ptr [[G2]], align 4
 // CHECK-NEXT: [[E:%.*]] = insertelement <2 x i32> [[D]], i32 [[L2]], i64 1
-// CHECK-NEXT: store <2 x i32> [[E]], ptr [[B]], align 8
+// CHECK-NEXT: store <2 x i32> [[E]], ptr [[B]], align 4
 export void call2() {
   int A[2][1] = {{1},{2}};
   int2 B = (int2)A;
@@ -30,20 +30,20 @@ struct S {
 // vector flat cast from struct
 // CHECK-LABEL: define void {{.*}}call3
 // CHECK: [[s:%.*]] = alloca %struct.S, align 1
-// CHECK-NEXT: [[A:%.*]] = alloca <2 x i32>, align 8
+// CHECK-NEXT: [[A:%.*]] = alloca <2 x i32>, align 4
 // CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.S, align 1
-// CHECK-NEXT: [[Tmp2:%.*]] = alloca <2 x i32>, align 8
+// CHECK-NEXT: [[Tmp2:%.*]] = alloca <2 x i32>, align 4
 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[s]], ptr align 1 {{.*}}, i32 8, i1 false)
 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[Tmp]], ptr align 1 [[s]], i32 8, i1 false)
 // CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 0
 // CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 1
-// CHECK-NEXT: [[B:%.*]] = load <2 x i32>, ptr [[Tmp2]], align 8
+// CHECK-NEXT: [[B:%.*]] = load <2 x i32>, ptr [[Tmp2]], align 4
 // CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4
 // CHECK-NEXT: [[C:%.*]] = insertelement <2 x i32> [[B]], i32 [[L]], i64 0
 // CHECK-NEXT: [[L2:%.*]] = load float, ptr [[G2]], align 4
 // CHECK-NEXT: [[D:%.*]] = fptosi float [[L2]] to i32
 // CHECK-NEXT: [[E:%.*]] = insertelement <2 x i32> [[C]], i32 [[D]], i64 1
-// CHECK-NEXT: store <2 x i32> [[E]], ptr [[A]], align 8
+// CHECK-NEXT: store <2 x i32> [[E]], ptr [[A]], align 4
 export void call3() {
   S s = {1, 2.0};
   int2 A = (int2)s;
@@ -94,16 +94,16 @@ struct Derived : BFields {
 
 // vector flat cast from derived struct with bitfield
 // CHECK-LABEL: call6
-// CHECK: [[A:%.*]] = alloca <4 x i32>, align 16
+// CHECK: [[A:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.Derived, align 1
-// CHECK-NEXT: [[FlatTmp:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT: [[FlatTmp:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[Tmp]], ptr align 1 %D, i32 19, i1 false)
 // CHECK-NEXT: [[Gep:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0
 // CHECK-NEXT: [[E:%.*]] = getelementptr inbounds nuw %struct.BFields, ptr [[Gep]], i32 0, i32 1
 // CHECK-NEXT: [[Gep1:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0, i32 0
 // CHECK-NEXT: [[Gep2:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0, i32 2
 // CHECK-NEXT: [[Gep3:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 1
-// CHECK-NEXT: [[Z:%.*]] = load <4 x i32>, ptr [[FlatTmp]], align 16
+// CHECK-NEXT: [[Z:%.*]] = load <4 x i32>, ptr [[FlatTmp]], align 4
 // CHECK-NEXT: [[Y:%.*]] = load double, ptr [[Gep1]], align 8
 // CHECK-NEXT: [[C:%.*]] = fptosi double [[Y]] to i32
 // CHECK-NEXT: [[X:%.*]] = insertelement <4 x i32> [[Z]], i32 [[C]], i64 0
@@ -117,7 +117,7 @@ struct Derived : BFields {
 // CHECK-NEXT: [[U:%.*]] = insertelement <4 x i32> [[W]], i32 [[C4]], i64 2
 // CHECK-NEXT: [[T:%.*]] = load i32, ptr [[Gep3]], align 4
 // CHECK-NEXT: [[S:%.*]] = insertelement <4 x i32> [[U]], i32 [[T]], i64 3
-// CHECK-NEXT: store <4 x i32> [[S]], ptr [[A]], align 16
+// CHECK-NEXT: store <4 x i32> [[S]], ptr [[A]], align 4
 // CHECK-NEXT: ret void
 export void call6(Derived D) {
   int4 A = (int4)D;
@@ -126,14 +126,14 @@ export void call6(Derived D) {
 // vector flat cast from matrix of same size (float)
 // CHECK-LABEL: call7
 // CHECK:    [[M_ADDR:%.*]] = alloca [2 x <2 x float>], align 4
-// CHECK-NEXT:    [[V:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[V:%.*]] = alloca <4 x float>, align 4
 // CHECK-NEXT:    [[HLSL_EWCAST_SRC:%.*]] = alloca [2 x <2 x float>], align 4
-// CHECK-NEXT:    [[FLATCAST_TMP:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[FLATCAST_TMP:%.*]] = alloca <4 x float>, align 4
 // CHECK-NEXT:    store <4 x float> %M, ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    store <4 x float> [[TMP0]], ptr [[HLSL_EWCAST_SRC]], align 4
 // CHECK-NEXT:    [[MATRIX_GEP:%.*]] = getelementptr inbounds <4 x float>, ptr [[HLSL_EWCAST_SRC]], i32 0
-// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[FLATCAST_TMP]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[FLATCAST_TMP]], align 4
 // CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[MATRIX_GEP]], align 4
 // CHECK-NEXT:    [[MATRIXEXT:%.*]] = extractelement <4 x float> [[TMP2]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x float> [[TMP1]], float [[MATRIXEXT]], i64 0
@@ -148,7 +148,7 @@ export void call6(Derived D) {
 // CHECK-NEXT:    [[TMP8:%.*]] = load <4 x float>, ptr [[MATRIX_GEP]], align 4
 // CHECK-NEXT:    [[MATRIXEXT3:%.*]] = extractelement <4 x float> [[TMP8]], i32 3
 // CHECK-NEXT:    [[TMP9:%.*]] = insertelement <4 x float> [[TMP7]], float [[MATRIXEXT3]], i64 3
-// CHECK-NEXT:    store <4 x float> [[TMP9]], ptr [[V]], align 16
+// CHECK-NEXT:    store <4 x float> [[TMP9]], ptr [[V]], align 4
 // CHECK-NEXT:    ret void
 export void call7(float2x2 M) {
     float4 V = (float4)M;
@@ -158,15 +158,15 @@ export void call7(float2x2 M) {
 // CHECK-LABEL: call8
 // COL-CHECK:    [[M_ADDR:%.*]] = alloca [1 x <3 x i32>], align 4
 // ROW-CHECK:    [[M_ADDR:%.*]] = alloca [3 x <1 x i32>], align 4
-// CHECK-NEXT:    [[V:%.*]] = alloca <3 x i32>, align 16
+// CHECK-NEXT:    [[V:%.*]] = alloca <3 x i32>, align 4
 // COL-CHECK-NEXT:    [[HLSL_EWCAST_SRC:%.*]] = alloca [1 x <3 x i32>], align 4
 // ROW-CHECK-NEXT:    [[HLSL_EWCAST_SRC:%.*]] = alloca [3 x <1 x i32>], align 4
-// CHECK-NEXT:    [[FLATCAST_TMP:%.*]] = alloca <3 x i32>, align 16
+// CHECK-NEXT:    [[FLATCAST_TMP:%.*]] = alloca <3 x i32>, align 4
 // CHECK-NEXT:    store <3 x i32> %M, ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load <3 x i32>, ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    store <3 x i32> [[TMP0]], ptr [[HLSL_EWCAST_SRC]], align 4
 // CHECK-NEXT:    [[MATRIX_GEP:%.*]] = getelementptr inbounds <3 x i32>, ptr [[HLSL_EWCAST_SRC]], i32 0
-// CHECK-NEXT:    [[TMP1:%.*]] = load <3 x i32>, ptr [[FLATCAST_TMP]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <3 x i32>, ptr [[FLATCAST_TMP]], align 4
 // CHECK-NEXT:    [[TMP2:%.*]] = load <3 x i32>, ptr [[MATRIX_GEP]], align 4
 // CHECK-NEXT:    [[MATRIXEXT:%.*]] = extractelement <3 x i32> [[TMP2]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = insertelement <3 x i32> [[TMP1]], i32 [[MATRIXEXT]], i64 0
@@ -176,7 +176,7 @@ export void call7(float2x2 M) {
 // CHECK-NEXT:    [[TMP6:%.*]] = load <3 x i32>, ptr [[MATRIX_GEP]], align 4
 // CHECK-NEXT:    [[MATRIXEXT2:%.*]] = extractelement <3 x i32> [[TMP6]], i32 2
 // CHECK-NEXT:    [[TMP7:%.*]] = insertelement <3 x i32> [[TMP5]], i32 [[MATRIXEXT2]], i64 2
-// CHECK-NEXT:    store <3 x i32> [[TMP7]], ptr [[V]], align 16
+// CHECK-NEXT:    store <3 x i32> [[TMP7]], ptr [[V]], align 4
 // CHECK-NEXT:    ret void
 export void call8(int3x1 M) {
     int3 V = (int3)M;
@@ -186,16 +186,16 @@ export void call8(int3x1 M) {
 // CHECK-LABEL: call9
 // COL-CHECK:    [[M_ADDR:%.*]] = alloca [2 x <1 x i32>], align 4
 // ROW-CHECK:    [[M_ADDR:%.*]] = alloca [1 x <2 x i32>], align 4
-// CHECK-NEXT:    [[V:%.*]] = alloca <2 x i32>, align 8
+// CHECK-NEXT:    [[V:%.*]] = alloca <2 x i32>, align 4
 // COL-CHECK-NEXT:    [[HLSL_EWCAST_SRC:%.*]] = alloca [2 x <1 x i32>], align 4
 // ROW-CHECK-NEXT:    [[HLSL_EWCAST_SRC:%.*]] = alloca [1 x <2 x i32>], align 4
-// CHECK-NEXT:    [[FLATCAST_TMP:%.*]] = alloca <2 x i1>, align 8
+// CHECK-NEXT:    [[FLATCAST_TMP:%.*]] = alloca <2 x i1>, align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = zext <2 x i1> %M to <2 x i32>
 // CHECK-NEXT:    store <2 x i32> [[TMP0]], ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[M_ADDR]], align 4
 // CHECK-NEXT:    store <2 x i32> [[TMP1]], ptr [[HLSL_EWCAST_SRC]], align 4
 // CHECK-NEXT:    [[MATRIX_GEP:%.*]] = getelementptr inbounds <2 x i32>, ptr [[HLSL_EWCAST_SRC]], i32 0
-// CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i1>, ptr [[FLATCAST_TMP]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i1>, ptr [[FLATCAST_TMP]], align 4
 // CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr [[MATRIX_GEP]], align 4
 // CHECK-NEXT:    [[MATRIXEXT:%.*]] = extractelement <2 x i32> [[TMP3]], i32 0
 // CHECK-NEXT:    [[LOADEDV:%.*]] = trunc i32 [[MATRIXEXT]] to i1
@@ -205,7 +205,7 @@ export void call8(int3x1 M) {
 // CHECK-NEXT:    [[LOADEDV2:%.*]] = trunc i32 [[MATRIXEXT1]] to i1
 // CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i1> [[TMP4]], i1 [[LOADEDV2]], i64 1
 // CHECK-NEXT:    [[TMP7:%.*]] = zext <2 x i1> [[TMP6]] to <2 x i32>
-// CHECK-NEXT:    store <2 x i32> [[TMP7]], ptr [[V]], align 8
+// CHECK-NEXT:    store <2 x i32> [[TMP7]], ptr [[V]], align 4
 // CHECK-NEXT:    ret void
 export void call9(bool1x2 M) {
     bool2 V = (bool2)M;
@@ -217,22 +217,22 @@ struct BoolVecStruct {
 
 // vector flat cast from struct containing bool vector
 // CHECK-LABEL: call10
-// CHECK:    [[V:%.*]] = alloca <2 x i32>, align 8
+// CHECK:    [[V:%.*]] = alloca <2 x i32>, align 4
 // CHECK-NEXT:    [[AGG_TEMP:%.*]] = alloca %struct.BoolVecStruct, align 1
-// CHECK-NEXT:    [[FLATCAST_TMP:%.*]] = alloca <2 x i1>, align 8
+// CHECK-NEXT:    [[FLATCAST_TMP:%.*]] = alloca <2 x i1>, align 4
 // CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[AGG_TEMP]], ptr align 1 %s, i32 8, i1 false)
 // CHECK-NEXT:    [[VECTOR_GEP:%.*]] = getelementptr inbounds %struct.BoolVecStruct, ptr [[AGG_TEMP]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i1>, ptr [[FLATCAST_TMP]], align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[VECTOR_GEP]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i1>, ptr [[FLATCAST_TMP]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[VECTOR_GEP]], align 4
 // CHECK-NEXT:    [[VECEXT:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0
 // CHECK-NEXT:    [[LOADEDV:%.*]] = trunc i32 [[VECEXT]] to i1
 // CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x i1> [[TMP0]], i1 [[LOADEDV]], i64 0
-// CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr [[VECTOR_GEP]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr [[VECTOR_GEP]], align 4
 // CHECK-NEXT:    [[VECEXT1:%.*]] = extractelement <2 x i32> [[TMP3]], i32 1
 // CHECK-NEXT:    [[LOADEDV2:%.*]] = trunc i32 [[VECEXT1]] to i1
 // CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i1> [[TMP2]], i1 [[LOADEDV2]], i64 1
 // CHECK-NEXT:    [[TMP5:%.*]] = zext <2 x i1> [[TMP4]] to <2 x i32>
-// CHECK-NEXT:    store <2 x i32> [[TMP5]], ptr [[V]], align 8
+// CHECK-NEXT:    store <2 x i32> [[TMP5]], ptr [[V]], align 4
 // CHECK-NEXT:    ret void
 export void call10(BoolVecStruct s) {
     bool2 V = (bool2)s;
diff --git a/clang/test/CodeGenHLSL/BoolVector.hlsl b/clang/test/CodeGenHLSL/BoolVector.hlsl
index 246790dab176f..b25b6d0c5529d 100644
--- a/clang/test/CodeGenHLSL/BoolVector.hlsl
+++ b/clang/test/CodeGenHLSL/BoolVector.hlsl
@@ -2,7 +2,7 @@
 
 // CHECK: %struct.S = type { <2 x i32>, float }
 // CHECK: [[ConstS:@.*]] = private unnamed_addr constant %struct.S { <2 x i32> splat (i32 1), float 1.000000e+00 }, align 1
-// CHECK: [[ConstArr:.*]] = private unnamed_addr constant [2 x <2 x i32>] [<2 x i32> splat (i32 1), <2 x i32> zeroinitializer], align 8
+// CHECK: [[ConstArr:.*]] = private unnamed_addr constant [2 x <2 x i32>] [<2 x i32> splat (i32 1), <2 x i32> zeroinitializer], align 4
 
 struct S {
     bool2 bv;
@@ -10,9 +10,9 @@ struct S {
 };
 
 // CHECK-LABEL: define hidden noundef i1 {{.*}}fn1{{.*}}
-// CHECK: [[B:%.*]] = alloca <2 x i32>, align 8
-// CHECK-NEXT: store <2 x i32> splat (i32 1), ptr [[B]], align 8
-// CHECK-NEXT: [[BoolVec:%.*]] = load <2 x i32>, ptr [[B]], align 8
+// CHECK: [[B:%.*]] = alloca <2 x i32>, align 4
+// CHECK-NEXT: store <2 x i32> splat (i32 1), ptr [[B]], align 4
+// CHECK-NEXT: [[BoolVec:%.*]] = load <2 x i32>, ptr [[B]], align 4
 // CHECK-NEXT: [[L:%.*]] = trunc <2 x i32> [[BoolVec:%.*]] to <2 x i1>
 // CHECK-NEXT: [[VecExt:%.*]] = extractelement <2 x i1> [[L]], i32 0
 // CHECK-NEXT: ret i1 [[VecExt]]
@@ -23,7 +23,7 @@ bool fn1() {
 
 // CHECK-LABEL: define hidden noundef <2 x i1> {{.*}}fn2{{.*}}
 // CHECK: [[VAddr:%.*]] = alloca i32, align 4
-// CHECK-NEXT: [[A:%.*]] = alloca <2 x i32>, align 8
+// CHECK-NEXT: [[A:%.*]] = alloca <2 x i32>, align 4
 // CHECK-NEXT: [[StoreV:%.*]] = zext i1 {{.*}} to i32
 // CHECK-NEXT: store i32 [[StoreV]], ptr [[VAddr]], align 4
 // CHECK-NEXT: [[L:%.*]] = load i32, ptr [[VAddr]], align 4
@@ -31,8 +31,8 @@ bool fn1() {
 // CHECK-NEXT: [[Vec:%.*]] = insertelement <2 x i1> poison, i1 [[LoadV]], i32 0
 // CHECK-NEXT: [[Vec1:%.*]] = insertelement <2 x i1> [[Vec]], i1 true, i32 1
 // CHECK-NEXT: [[Z:%.*]] = zext <2 x i1> [[Vec1]] to <2 x i32>
-// CHECK-NEXT: store <2 x i32> [[Z]], ptr [[A]], align 8
-// CHECK-NEXT: [[LoadBV:%.*]] = load <2 x i32>, ptr [[A]], align 8
+// CHECK-NEXT: store <2 x i32> [[Z]], ptr [[A]], align 4
+// CHECK-NEXT: [[LoadBV:%.*]] = load <2 x i32>, ptr [[A]], align 4
 // CHECK-NEXT: [[LoadV2:%.*]] = trunc <2 x i32> [[LoadBV]] to <2 x i1>
 // CHECK-NEXT: ret <2 x i1> [[LoadV2]]
 bool2 fn2(bool V) {
@@ -54,10 +54,10 @@ bool fn3() {
 }
 
 // CHECK-LABEL: define hidden noundef i1 {{.*}}fn4{{.*}}
-// CHECK: [[Arr:%.*]] = alloca [2 x <2 x i32>], align 8
-// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[Arr]], ptr align 8 [[ConstArr]], i32 16, i1 false)
+// CHECK: [[Arr:%.*]] = alloca [2 x <2 x i32>], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Arr]], ptr align 4 [[ConstArr]], i32 16, i1 false)
 // CHECK-NEXT: [[Idx:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[Arr]], i32 0, i32 0
-// CHECK-NEXT: [[L:%.*]] = load <2 x i32>, ptr [[Idx]], align 8
+// CHECK-NEXT: [[L:%.*]] = load <2 x i32>, ptr [[Idx]], align 4
 // CHECK-NEXT: [[LV:%.*]] = trunc <2 x i32> [[L]] to <2 x i1>
 // CHECK-NEXT: [[VX:%.*]] = extractelement <2 x i1> [[LV]], i32 1
 // CHECK-NEXT: ret i1 [[VX]]
@@ -67,8 +67,8 @@ bool fn4() {
 }
 
 // CHECK-LABEL: define hidden void {{.*}}fn5{{.*}}
-// CHECK: [[Arr:%.*]] = alloca <2 x i32>, align 8
-// CHECK-NEXT: store <2 x i32> splat (i32 1), ptr [[Arr]], align 8
+// CHECK: [[Arr:%.*]] = alloca <2 x i32>, align 4
+// CHECK-NEXT: store <2 x i32> splat (i32 1), ptr [[Arr]], align 4
 // CHECK-NEXT: [[Ptr:%.*]] = getelementptr <2 x i32>, ptr [[Arr]]
 // CHECK-NEXT: store i32 0, ptr [[Ptr]], align 4
 // CHECK-NEXT: ret void
@@ -96,8 +96,8 @@ void fn6() {
 }
 
 // CHECK-LABEL: define hidden void {{.*}}fn7{{.*}}
-// CHECK: [[Arr:%.*]] = alloca [2 x <2 x i32>], align 8
-// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[Arr]], ptr align 8 {{.*}}, i32 16, i1 false)
+// CHECK: [[Arr:%.*]] = alloca [2 x <2 x i32>], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Arr]], ptr align 4 {{.*}}, i32 16, i1 false)
 // CHECK-NEXT: [[Idx:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[Arr]], i32 0, i32 0
 // CHECK-NEXT: %[[Ptr:.*]] = getelementptr <2 x i32>, ptr [[Idx]], i32 0, i32 1
 // CHECK-NEXT: store i32 0, ptr %[[Ptr]], align 4
diff --git a/clang/test/CodeGenHLSL/basic_types.hlsl b/clang/test/CodeGenHLSL/basic_types.hlsl
index 8836126934957..5b6d613ff741b 100644
--- a/clang/test/CodeGenHLSL/basic_types.hlsl
+++ b/clang/test/CodeGenHLSL/basic_types.hlsl
@@ -11,33 +11,33 @@
 // CHECK: @uint_Val = external hidden addrspace(2) global i32, align 4
 // CHECK: @uint64_t_Val = external hidden addrspace(2) global i64, align 8
 // CHECK: @int64_t_Val = external hidden addrspace(2) global i64, align 8
-// CHECK: @int16_t2_Val = external hidden addrspace(2) global <2 x i16>, align 4
-// CHECK: @int16_t3_Val = external hidden addrspace(2) global <3 x i16>, align 8
-// CHECK: @int16_t4_Val = external hidden addrspace(2) global <4 x i16>, align 8
-// CHECK: @uint16_t2_Val = external hidden addrspace(2) global <2 x i16>, align 4
-// CHECK: @uint16_t3_Val = external hidden addrspace(2) global <3 x i16>, align 8
-// CHECK: @uint16_t4_Val = external hidden addrspace(2) global <4 x i16>, align 8
-// CHECK: @int2_Val = external hidden addrspace(2) global <2 x i32>, align 8
-// CHECK: @int3_Val = external hidden addrspace(2) global <3 x i32>, align 16
-// CHECK: @int4_Val = external hidden addrspace(2) global <4 x i32>, align 16
-// CHECK: @uint2_Val = external hidden addrspace(2) global <2 x i32>, align 8
-// CHECK: @uint3_Val = external hidden addrspace(2) global <3 x i32>, align 16
-// CHECK: @uint4_Val = external hidden addrspace(2) global <4 x i32>, align 16
-// CHECK: @int64_t2_Val = external hidden addrspace(2) global <2 x i64>, align 16
-// CHECK: @int64_t3_Val = external hidden addrspace(2) global <3 x i64>, align 32
-// CHECK: @int64_t4_Val = external hidden addrspace(2) global <4 x i64>, align 32
-// CHECK: @uint64_t2_Val = external hidden addrspace(2) global <2 x i64>, align 16
-// CHECK: @uint64_t3_Val = external hidden addrspace(2) global <3 x i64>, align 32
-// CHECK: @uint64_t4_Val = external hidden addrspace(2) global <4 x i64>, align 32
-// CHECK: @half2_Val = external hidden addrspace(2) global <2 x half>, align 4
-// CHECK: @half3_Val = external hidden addrspace(2) global <3 x half>, align 8
-// CHECK: @half4_Val = external hidden addrspace(2) global <4 x half>, align 8
-// CHECK: @float2_Val = external hidden addrspace(2) global <2 x float>, align 8
-// CHECK: @float3_Val = external hidden addrspace(2) global <3 x float>, align 16
-// CHECK: @float4_Val = external hidden addrspace(2) global <4 x float>, align 16
-// CHECK: @double2_Val = external hidden addrspace(2) global <2 x double>, align 16
-// CHECK: @double3_Val = external hidden addrspace(2) global <3 x double>, align 32
-// CHECK: @double4_Val = external hidden addrspace(2) global <4 x double>, align 32
+// CHECK: @int16_t2_Val = external hidden addrspace(2) global <2 x i16>, align 2
+// CHECK: @int16_t3_Val = external hidden addrspace(2) global <3 x i16>, align 2
+// CHECK: @int16_t4_Val = external hidden addrspace(2) global <4 x i16>, align 2
+// CHECK: @uint16_t2_Val = external hidden addrspace(2) global <2 x i16>, align 2
+// CHECK: @uint16_t3_Val = external hidden addrspace(2) global <3 x i16>, align 2
+// CHECK: @uint16_t4_Val = external hidden addrspace(2) global <4 x i16>, align 2
+// CHECK: @int2_Val = external hidden addrspace(2) global <2 x i32>, align 4
+// CHECK: @int3_Val = external hidden addrspace(2) global <3 x i32>, align 4
+// CHECK: @int4_Val = external hidden addrspace(2) global <4 x i32>, align 4
+// CHECK: @uint2_Val = external hidden addrspace(2) global <2 x i32>, align 4
+// CHECK: @uint3_Val = external hidden addrspace(2) global <3 x i32>, align 4
+// CHECK: @uint4_Val = external hidden addrspace(2) global <4 x i32>, align 4
+// CHECK: @int64_t2_Val = external hidden addrspace(2) global <2 x i64>, align 8
+// CHECK: @int64_t3_Val = external hidden addrspace(2) global <3 x i64>, align 8
+// CHECK: @int64_t4_Val = external hidden addrspace(2) global <4 x i64>, align 8
+// CHECK: @uint64_t2_Val = external hidden addrspace(2) global <2 x i64>, align 8
+// CHECK: @uint64_t3_Val = external hidden addrspace(2) global <3 x i64>, align 8
+// CHECK: @uint64_t4_Val = external hidden addrspace(2) global <4 x i64>, align 8
+// CHECK: @half2_Val = external hidden addrspace(2) global <2 x half>, align 2
+// CHECK: @half3_Val = external hidden addrspace(2) global <3 x half>, align 2
+// CHECK: @half4_Val = external hidden addrspace(2) global <4 x half>, align 2
+// CHECK: @float2_Val = external hidden addrspace(2) global <2 x float>, align 4
+// CHECK: @float3_Val = external hidden addrspace(2) global <3 x float>, align 4
+// CHECK: @float4_Val = external hidden addrspace(2) global <4 x float>, align 4
+// CHECK: @double2_Val = external hidden addrspace(2) global <2 x double>, align 8
+// CHECK: @double3_Val = external hidden addrspace(2) global <3 x double>, align 8
+// CHECK: @double4_Val = external hidden addrspace(2) global <4 x double>, align 8
 
 #ifdef NAMESPACED
 #define TYPE_DECL(T)  hlsl::T T##_Val
diff --git a/clang/test/CodeGenHLSL/builtins/AddUint64.hlsl b/clang/test/CodeGenHLSL/builtins/AddUint64.hlsl
index 8457ad6da293f..5ec6720c67313 100644
--- a/clang/test/CodeGenHLSL/builtins/AddUint64.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/AddUint64.hlsl
@@ -7,12 +7,12 @@
 // CHECK-LABEL: define hidden noundef <2 x i32> @_Z20test_AddUint64_uint2Dv2_jS_(
 // CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) #[[ATTR0:[0-9]+]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <2 x i32>, align 8
-// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <2 x i32>, align 8
-// CHECK-NEXT:    store <2 x i32> [[A]], ptr [[A_ADDR]], align 8
-// CHECK-NEXT:    store <2 x i32> [[B]], ptr [[B_ADDR]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[A_ADDR]], align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[B_ADDR]], align 8
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <2 x i32>, align 4
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <2 x i32>, align 4
+// CHECK-NEXT:    store <2 x i32> [[A]], ptr [[A_ADDR]], align 4
+// CHECK-NEXT:    store <2 x i32> [[B]], ptr [[B_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[A_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[B_ADDR]], align 4
 // CHECK-NEXT:    [[LOWA:%.*]] = extractelement <2 x i32> [[TMP0]], i64 0
 // CHECK-NEXT:    [[HIGHA:%.*]] = extractelement <2 x i32> [[TMP0]], i64 1
 // CHECK-NEXT:    [[LOWB:%.*]] = extractelement <2 x i32> [[TMP1]], i64 0
@@ -34,12 +34,12 @@ uint2 test_AddUint64_uint2(uint2 a, uint2 b) {
 // CHECK-LABEL: define hidden noundef <4 x i32> @_Z20test_AddUint64_uint4Dv4_jS_(
 // CHECK-SAME: <4 x i32> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <4 x i32>, align 16
-// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <4 x i32>, align 16
-// CHECK-NEXT:    store <4 x i32> [[A]], ptr [[A_ADDR]], align 16
-// CHECK-NEXT:    store <4 x i32> [[B]], ptr [[B_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[A_ADDR]], align 16
-// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[B_ADDR]], align 16
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <4 x i32>, align 4
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <4 x i32>, align 4
+// CHECK-NEXT:    store <4 x i32> [[A]], ptr [[A_ADDR]], align 4
+// CHECK-NEXT:    store <4 x i32> [[B]], ptr [[B_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[A_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[B_ADDR]], align 4
 // CHECK-NEXT:    [[LOWA:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> poison, <2 x i32> <i32 0, i32 2>
 // CHECK-NEXT:    [[HIGHA:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> poison, <2 x i32> <i32 1, i32 3>
 // CHECK-NEXT:    [[LOWB:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <2 x i32> <i32 0, i32 2>
diff --git a/clang/test/CodeGenHLSL/builtins/ScalarSwizzles.hlsl b/clang/test/CodeGenHLSL/builtins/ScalarSwizzles.hlsl
index 270598265c660..484c3ba79cf8b 100644
--- a/clang/test/CodeGenHLSL/builtins/ScalarSwizzles.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/ScalarSwizzles.hlsl
@@ -119,12 +119,12 @@ bool2 FillTrue() {
 
 // CHECK-LABEL: HowManyFloats
 // CHECK: [[VAddr:%.*]] = alloca float, align 4
-// CHECK: [[vec2Ptr:%.*]] = alloca <2 x float>, align 8
+// CHECK: [[vec2Ptr:%.*]] = alloca <2 x float>, align 4
 // CHECK: [[VVal:%.*]] = load float, ptr [[VAddr]], align 4
 // CHECK: [[splat:%.*]] = insertelement <1 x float> poison, float [[VVal]], i64 0
 // CHECK: [[vec2:%.*]] = shufflevector <1 x float> [[splat]], <1 x float> poison, <2 x i32> zeroinitializer
-// CHECK: store <2 x float> [[vec2]], ptr [[vec2Ptr]], align 8
-// CHECK: [[vec2:%.*]] = load <2 x float>, ptr [[vec2Ptr]], align 8
+// CHECK: store <2 x float> [[vec2]], ptr [[vec2Ptr]], align 4
+// CHECK: [[vec2:%.*]] = load <2 x float>, ptr [[vec2Ptr]], align 4
 // CHECK: [[vec2Res:%.*]] = shufflevector <2 x float> [[vec2]], <2 x float> poison, <2 x i32> zeroinitializer
 // CHECK: ret <2 x float> [[vec2Res]]
 float2 HowManyFloats(float V) {
@@ -133,7 +133,7 @@ float2 HowManyFloats(float V) {
 
 // CHECK-LABEL: HowManyBools
 // CHECK: [[VAddr:%.*]] = alloca i32, align 4
-// CHECK-NEXT: [[Vec2Ptr:%.*]] = alloca <2 x i32>, align 8
+// CHECK-NEXT: [[Vec2Ptr:%.*]] = alloca <2 x i32>, align 4
 // CHECK-NEXT: [[Tmp:%.*]] = zext i1 {{.*}} to i32
 // CHECK-NEXT: store i32 [[Tmp]], ptr [[VAddr]], align 4
 // CHECK-NEXT: [[VVal:%.*]] = load i32, ptr [[VAddr]], align 4
@@ -141,8 +141,8 @@ float2 HowManyFloats(float V) {
 // CHECK-NEXT: [[Vec2:%.*]] = shufflevector <1 x i32> [[Splat]], <1 x i32> poison, <2 x i32> zeroinitializer
 // CHECK-NEXT: [[Trunc:%.*]] = trunc <2 x i32> [[Vec2]] to <2 x i1>
 // CHECK-NEXT: [[Ext:%.*]] = zext <2 x i1> [[Trunc]] to <2 x i32>
-// CHECK-NEXT: store <2 x i32> [[Ext]], ptr [[Vec2Ptr]], align 8
-// CHECK-NEXT: [[V2:%.*]] = load <2 x i32>, ptr [[Vec2Ptr]], align 8
+// CHECK-NEXT: store <2 x i32> [[Ext]], ptr [[Vec2Ptr]], align 4
+// CHECK-NEXT: [[V2:%.*]] = load <2 x i32>, ptr [[Vec2Ptr]], align 4
 // CHECK-NEXT: [[V3:%.*]] = shufflevector <2 x i32> [[V2]], <2 x i32> poison, <2 x i32> zeroinitializer
 // CHECK-NEXT: [[LV1:%.*]] = trunc <2 x i32> [[V3]] to <2 x i1>
 // CHECK-NEXT: ret <2 x i1> [[LV1]]
@@ -246,7 +246,7 @@ bool AssignBool(bool V) {
 
 // CHECK-LABEL: AssignBool2
 // CHECK: [[VAdddr:%.*]] = alloca i32, align 4
-// CHECK-NEXT: [[X:%.*]] = alloca <2 x i32>, align 8
+// CHECK-NEXT: [[X:%.*]] = alloca <2 x i32>, align 4
 // CHECK-NEXT: [[Tmp:%.*]] = alloca <1 x i32>, align 4
 // CHECK-NEXT: [[SV:%.*]] = zext i1 %V to i32
 // CHECK-NEXT: store i32 [[SV]], ptr [[VAddr]], align 4
@@ -255,7 +255,7 @@ bool AssignBool(bool V) {
 // CHECK-NEXT: [[Z:%.*]] = shufflevector <1 x i32> [[Y]], <1 x i32> poison, <2 x i32> zeroinitializer
 // CHECK-NEXT: [[LV:%.*]] = trunc <2 x i32> [[Z]] to <2 x i1>
 // CHECK-NEXT: [[A:%.*]] = zext <2 x i1> [[LV]] to <2 x i32>
-// CHECK-NEXT: store <2 x i32> [[A]], ptr [[X]], align 8
+// CHECK-NEXT: store <2 x i32> [[A]], ptr [[X]], align 4
 // CHECK-NEXT: [[B:%.*]] = load i32, ptr [[VAddr]], align 4
 // CHECK-NEXT: [[LV1:%.*]] = trunc i32 [[B]] to i1
 // CHECK-NEXT: [[D:%.*]] = zext i1 [[LV1]] to i32
@@ -268,12 +268,12 @@ void AssignBool2(bool V) {
 }
 
 // CHECK-LABEL: AssignBool3
-// CHECK: [[VAddr:%.*]] = alloca <2 x i32>, align 8
-// CHECK-NEXT: [[X:%.*]] = alloca <2 x i32>, align 8
+// CHECK: [[VAddr:%.*]] = alloca <2 x i32>, align 4
+// CHECK-NEXT: [[X:%.*]] = alloca <2 x i32>, align 4
 // CHECK-NEXT: [[Y:%.*]] = zext <2 x i1> %V to <2 x i32>
-// CHECK-NEXT: store <2 x i32> [[Y]], ptr [[VAddr]], align 8
-// CHECK-NEXT: store <2 x i32> splat (i32 1), ptr [[X]], align 8
-// CHECK-NEXT: [[Z:%.*]] = load <2 x i32>, ptr [[VAddr]], align 8
+// CHECK-NEXT: store <2 x i32> [[Y]], ptr [[VAddr]], align 4
+// CHECK-NEXT: store <2 x i32> splat (i32 1), ptr [[X]], align 4
+// CHECK-NEXT: [[Z:%.*]] = load <2 x i32>, ptr [[VAddr]], align 4
 // CHECK-NEXT: [[LV:%.*]] = trunc <2 x i32> [[Z]] to <2 x i1>
 // CHECK-NEXT: [[B:%.*]] = zext <2 x i1> [[LV]] to <2 x i32>
 // CHECK-NEXT: [[V1:%.*]] = extractelement <2 x i32> [[B]], i32 0
@@ -289,15 +289,15 @@ void AssignBool3(bool2 V) {
 }
 
 // CHECK-LABEL: AccessBools
-// CHECK: [[X:%.*]] = alloca <4 x i32>, align 16
+// CHECK: [[X:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT: [[Tmp:%.*]] = alloca <1 x i32>, align 4
 // CHECK-NEXT: store <1 x i32> splat (i32 1), ptr [[Tmp]], align 4
 // CHECK-NEXT: [[Y:%.*]] = load <1 x i32>, ptr [[Tmp]], align 4
 // CHECK-NEXT: [[Z:%.*]] = shufflevector <1 x i32> [[Y]], <1 x i32> poison, <4 x i32> zeroinitializer
 // CHECK-NEXT: [[LV:%.*]] = trunc <4 x i32> [[Z]] to <4 x i1>
 // CHECK-NEXT: [[A:%.*]] = zext <4 x i1> [[LV]] to <4 x i32>
-// CHECK-NEXT: store <4 x i32> [[A]], ptr [[X]], align 16
-// CHECK-NEXT: [[B:%.*]] = load <4 x i32>, ptr [[X]], align 16
+// CHECK-NEXT: store <4 x i32> [[A]], ptr [[X]], align 4
+// CHECK-NEXT: [[B:%.*]] = load <4 x i32>, ptr [[X]], align 4
 // CHECK-NEXT: [[C:%.*]] = shufflevector <4 x i32> [[B]], <4 x i32> poison, <2 x i32> <i32 2, i32 3>
 // CHECK-NEXT: [[LV1:%.*]] = trunc <2 x i32> [[C]] to <2 x i1>
 // CHECK-NEXT: ret <2 x i1> [[LV1]]
@@ -307,9 +307,9 @@ bool2 AccessBools() {
 }
 
 // CHECK-LABEL: define hidden void {{.*}}BoolSizeMismatch{{.*}}
-// CHECK: [[B:%.*]] = alloca <4 x i32>, align 16
+// CHECK: [[B:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT: [[Tmp:%.*]] = alloca <1 x i32>, align 4
-// CHECK-NEXT: store <4 x i32> splat (i32 1), ptr [[B]], align 16
+// CHECK-NEXT: store <4 x i32> splat (i32 1), ptr [[B]], align 4
 // CHECK-NEXT: store <1 x i32> zeroinitializer, ptr [[Tmp]], align 4
 // CHECK-NEXT: [[L0:%.*]] = load <1 x i32>, ptr [[Tmp]], align 4
 // CHECK-NEXT: [[L1:%.*]] = shufflevector <1 x i32> [[L0]], <1 x i32> poison, <3 x i32> zeroinitializer
diff --git a/clang/test/CodeGenHLSL/builtins/VectorSwizzles.hlsl b/clang/test/CodeGenHLSL/builtins/VectorSwizzles.hlsl
index c632e795098ea..06645f2294cde 100644
--- a/clang/test/CodeGenHLSL/builtins/VectorSwizzles.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/VectorSwizzles.hlsl
@@ -5,9 +5,9 @@
 // CHECK-LABEL: Single
 
 // Setup local vars.
-// CHECK: [[VecAddr:%.*]] = alloca <3 x i64>, align 32
+// CHECK: [[VecAddr:%.*]] = alloca <3 x i64>, align 8
 // CHECK-NEXT: [[AAddr:%.*]] = alloca i64, align 8
-// CHECK-NEXT: store <3 x i64> %vec, ptr [[VecAddr]], align 32
+// CHECK-NEXT: store <3 x i64> %vec, ptr [[VecAddr]], align 8
 // CHECK-NEXT: store i64 %a, ptr [[AAddr]], align 8
 
 // Update single element of the vector.
@@ -16,7 +16,7 @@
 // CHECK-NEXT: store i64 [[A]], ptr [[Vy]], align 8
 
 // Return.
-// CHECK-NEXT: [[RetVal:%.*]] = load <3 x i64>, ptr [[VecAddr]], align 32
+// CHECK-NEXT: [[RetVal:%.*]] = load <3 x i64>, ptr [[VecAddr]], align 8
 // CHECK-NEXT: ret <3 x i64> [[RetVal]]
 uint64_t3 Single(uint64_t3 vec, uint64_t a){
     vec.y = a;
@@ -26,10 +26,10 @@ uint64_t3 Single(uint64_t3 vec, uint64_t a){
 // CHECK-LABEL: Double
 
 // Setup local vars.
-// CHECK: [[VecAddr:%.*]] = alloca <3 x float>, align 16
+// CHECK: [[VecAddr:%.*]] = alloca <3 x float>, align 4
 // CHECK-NEXT: [[AAddr:%.*]] = alloca float, align 4
 // CHECK-NEXT: [[BAddr:%.*]] = alloca float, align 4
-// CHECK-NEXT: store <3 x float> %vec, ptr [[VecAddr]], align 16
+// CHECK-NEXT: store <3 x float> %vec, ptr [[VecAddr]], align 4
 // CHECK-NEXT: store float %a, ptr [[AAddr]], align 4
 // CHECK-NEXT: store float %b, ptr [[BAddr]], align 4
 
@@ -48,7 +48,7 @@ uint64_t3 Single(uint64_t3 vec, uint64_t a){
 // CHECK-NEXT: store float [[TmpY]], ptr [[VecY]], align 4
 
 // Return.
-// CHECK-NEXT: [[RetVal:%.*]] = load <3 x float>, ptr [[VecAddr]], align 16
+// CHECK-NEXT: [[RetVal:%.*]] = load <3 x float>, ptr [[VecAddr]], align 4
 // CHECK-NEXT: ret <3 x float> [[RetVal]]
 float3 Double(float3 vec, float a, float b) {
     vec.zy = {a, b};
@@ -58,10 +58,10 @@ float3 Double(float3 vec, float a, float b) {
 // CHECK-LABEL: Shuffle
 
 // Setup local vars.
-// CHECK: [[VecAddr:%.*]] = alloca <4 x half>, align 8
+// CHECK: [[VecAddr:%.*]] = alloca <4 x half>, align 2
 // CHECK-NEXT: [[AAddr:%.*]] = alloca half, align 2
 // CHECK-NEXT: [[BAddr:%.*]] = alloca half, align 2
-// CHECK-NEXT: store <4 x half> %vec, ptr [[VecAddr]], align 8
+// CHECK-NEXT: store <4 x half> %vec, ptr [[VecAddr]], align 2
 // CHECK-NEXT: store half %a, ptr [[AAddr]], align 2
 // CHECK-NEXT: store half %b, ptr [[BAddr]], align 2
 
@@ -88,7 +88,7 @@ float3 Double(float3 vec, float a, float b) {
 // CHECK-NEXT: store half [[TmpW]], ptr [[VecY]], align 2
 
 // Return.
-// CHECK-NEXT: [[RetVal:%.*]] = load <4 x half>, ptr [[VecAddr]], align 8
+// CHECK-NEXT: [[RetVal:%.*]] = load <4 x half>, ptr [[VecAddr]], align 2
 // CHECK-NEXT: ret <4 x half> [[RetVal]]
 half4 Shuffle(half4 vec, half a, half b) {
     vec.zwxy = {a, b, 13.74, a};
diff --git a/clang/test/CodeGenHLSL/builtins/clip.hlsl b/clang/test/CodeGenHLSL/builtins/clip.hlsl
index bb21f084deba5..2ab14641d0299 100644
--- a/clang/test/CodeGenHLSL/builtins/clip.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/clip.hlsl
@@ -4,13 +4,13 @@
 
 void test_scalar(float Buf) {
   // CHECK:      define hidden void @{{.*}}test_scalar{{.*}}(float {{.*}} [[VALP:%.*]])
-  // CHECK:      [[LOAD:%.*]] = load float, ptr [[VALP]].addr, align 4
+  // CHECK:      [[LOAD:%.*]] = load float, ptr [[VALP]].addr
   // CHECK-NEXT: [[FCMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn olt float [[LOAD]], 0.000000e+00
   // CHECK-NO:   call i1 @llvm.dx.any
   // CHECK-NEXT: call void @llvm.dx.discard(i1 [[FCMP]])
   //
   // SPIRV:      define hidden spir_func void @{{.*}}test_scalar{{.*}}(float {{.*}} [[VALP:%.*]])
-  // SPIRV:      [[LOAD:%.*]] = load float, ptr [[VALP]].addr, align 4
+  // SPIRV:      [[LOAD:%.*]] = load float, ptr [[VALP]].addr
   // SPIRV-NEXT: [[FCMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn olt float [[LOAD]], 0.000000e+00
   // SPIRV-NO:   call i1 @llvm.spv.any
   // SPIRV-NEXT: br i1 [[FCMP]], label %[[LTL:.*]], label %[[ENDL:.*]]
@@ -22,15 +22,15 @@ void test_scalar(float Buf) {
 
 void test_vector4(float4 Buf) {
   // CHECK:      define hidden void @{{.*}}test_vector{{.*}}(<4 x float> {{.*}} [[VALP:%.*]])
-  // CHECK:      [[LOAD:%.*]] = load <4 x float>, ptr [[VALP]].addr, align 16
+  // CHECK:      [[LOAD:%.*]] = load <4 x float>, ptr [[VALP]].addr
   // CHECK-NEXT: [[FCMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn olt <4 x float> [[LOAD]], zeroinitializer
   // CHECK-NEXT: [[ANYC:%.*]] = call i1 @llvm.dx.any.v4i1(<4 x i1> [[FCMP]])
   // CHECK-NEXT: call void @llvm.dx.discard(i1 [[ANYC]])
   //
   // SPIRV:      define hidden spir_func void @{{.*}}test_vector{{.*}}(<4 x float> {{.*}} [[VALP:%.*]])
-  // SPIRV:      [[LOAD:%.*]] = load <4 x float>, ptr [[VALP]].addr, align 16
+  // SPIRV:      [[LOAD:%.*]] = load <4 x float>, ptr [[VALP]].addr
   // SPIRV-NEXT: [[FCMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn olt <4 x float> [[LOAD]], zeroinitializer
-  // SPIRV-NEXT: [[ANYC:%.*]] = call i1 @llvm.spv.any.v4i1(<4 x i1> [[FCMP]]) 
+  // SPIRV-NEXT: [[ANYC:%.*]] = call i1 @llvm.spv.any.v4i1(<4 x i1> [[FCMP]])
   // SPIRV-NEXT: br i1 [[ANYC]], label %[[LTL:.*]], label %[[ENDL:.*]]
   // SPIRV:      [[LTL]]: ; preds = %entry
   // SPIRV-NEXT: call void @llvm.spv.discard()
diff --git a/clang/test/CodeGenHLSL/builtins/mad.hlsl b/clang/test/CodeGenHLSL/builtins/mad.hlsl
index 1116c1419997d..f501445415647 100644
--- a/clang/test/CodeGenHLSL/builtins/mad.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/mad.hlsl
@@ -64,106 +64,106 @@ int16_t3 test_mad_int16_t3(int16_t3 p0, int16_t3 p1, int16_t3 p2) { return mad(p
 int16_t4 test_mad_int16_t4(int16_t4 p0, int16_t4 p1, int16_t4 p2) { return mad(p0, p1, p2); }
 #endif // __HLSL_ENABLE_16_BIT
 
-// NATIVE_HALF: %[[p0:.*]] = load half, ptr %p0.addr, align 2
-// NATIVE_HALF: %[[p1:.*]] = load half, ptr %p1.addr, align 2
-// NATIVE_HALF: %[[p2:.*]] = load half, ptr %p2.addr, align 2
+// NATIVE_HALF: %[[p0:.*]] = load half, ptr %p0.addr
+// NATIVE_HALF: %[[p1:.*]] = load half, ptr %p1.addr
+// NATIVE_HALF: %[[p2:.*]] = load half, ptr %p2.addr
 // NATIVE_HALF: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn half @llvm.fmuladd.f16(half %[[p0]], half %[[p1]], half %[[p2]])
 // NATIVE_HALF: ret half %hlsl.fmad
-// NO_HALF: %[[p0:.*]] = load float, ptr %p0.addr, align 4
-// NO_HALF: %[[p1:.*]] = load float, ptr %p1.addr, align 4
-// NO_HALF: %[[p2:.*]] = load float, ptr %p2.addr, align 4
+// NO_HALF: %[[p0:.*]] = load float, ptr %p0.addr
+// NO_HALF: %[[p1:.*]] = load float, ptr %p1.addr
+// NO_HALF: %[[p2:.*]] = load float, ptr %p2.addr
 // NO_HALF: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn float @llvm.fmuladd.f32(float %[[p0]], float %[[p1]], float %[[p2]])
 // NO_HALF: ret float %hlsl.fmad
 half test_mad_half(half p0, half p1, half p2) { return mad(p0, p1, p2); }
 
-// NATIVE_HALF: %[[p0:.*]] = load <2 x half>, ptr %p0.addr, align 4
-// NATIVE_HALF: %[[p1:.*]] = load <2 x half>, ptr %p1.addr, align 4
-// NATIVE_HALF: %[[p2:.*]] = load <2 x half>, ptr %p2.addr, align 4
+// NATIVE_HALF: %[[p0:.*]] = load <2 x half>, ptr %p0.addr
+// NATIVE_HALF: %[[p1:.*]] = load <2 x half>, ptr %p1.addr
+// NATIVE_HALF: %[[p2:.*]] = load <2 x half>, ptr %p2.addr
 // NATIVE_HALF: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <2 x half>  @llvm.fmuladd.v2f16(<2 x half> %[[p0]], <2 x half> %[[p1]], <2 x half> %[[p2]])
 // NATIVE_HALF: ret <2 x half> %hlsl.fmad
-// NO_HALF: %[[p0:.*]] = load <2 x float>, ptr %p0.addr, align 8
-// NO_HALF: %[[p1:.*]] = load <2 x float>, ptr %p1.addr, align 8
-// NO_HALF: %[[p2:.*]] = load <2 x float>, ptr %p2.addr, align 8
+// NO_HALF: %[[p0:.*]] = load <2 x float>, ptr %p0.addr
+// NO_HALF: %[[p1:.*]] = load <2 x float>, ptr %p1.addr
+// NO_HALF: %[[p2:.*]] = load <2 x float>, ptr %p2.addr
 // NO_HALF: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <2 x float>  @llvm.fmuladd.v2f32(<2 x float> %[[p0]], <2 x float> %[[p1]], <2 x float> %[[p2]])
 // NO_HALF: ret <2 x float> %hlsl.fmad
 half2 test_mad_half2(half2 p0, half2 p1, half2 p2) { return mad(p0, p1, p2); }
 
-// NATIVE_HALF: %[[p0:.*]] = load <3 x half>, ptr %p0.addr, align 8
-// NATIVE_HALF: %[[p1:.*]] = load <3 x half>, ptr %p1.addr, align 8
-// NATIVE_HALF: %[[p2:.*]] = load <3 x half>, ptr %p2.addr, align 8
+// NATIVE_HALF: %[[p0:.*]] = load <3 x half>, ptr %p0.addr
+// NATIVE_HALF: %[[p1:.*]] = load <3 x half>, ptr %p1.addr
+// NATIVE_HALF: %[[p2:.*]] = load <3 x half>, ptr %p2.addr
 // NATIVE_HALF: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <3 x half>  @llvm.fmuladd.v3f16(<3 x half> %[[p0]], <3 x half> %[[p1]], <3 x half> %[[p2]])
 // NATIVE_HALF: ret <3 x half> %hlsl.fmad
-// NO_HALF: %[[p0:.*]] = load <3 x float>, ptr %p0.addr, align 16
-// NO_HALF: %[[p1:.*]] = load <3 x float>, ptr %p1.addr, align 16
-// NO_HALF: %[[p2:.*]] = load <3 x float>, ptr %p2.addr, align 16
+// NO_HALF: %[[p0:.*]] = load <3 x float>, ptr %p0.addr
+// NO_HALF: %[[p1:.*]] = load <3 x float>, ptr %p1.addr
+// NO_HALF: %[[p2:.*]] = load <3 x float>, ptr %p2.addr
 // NO_HALF: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <3 x float>  @llvm.fmuladd.v3f32(<3 x float> %[[p0]], <3 x float> %[[p1]], <3 x float> %[[p2]])
 // NO_HALF: ret <3 x float> %hlsl.fmad
 half3 test_mad_half3(half3 p0, half3 p1, half3 p2) { return mad(p0, p1, p2); }
 
-// NATIVE_HALF: %[[p0:.*]] = load <4 x half>, ptr %p0.addr, align 8
-// NATIVE_HALF: %[[p1:.*]] = load <4 x half>, ptr %p1.addr, align 8
-// NATIVE_HALF: %[[p2:.*]] = load <4 x half>, ptr %p2.addr, align 8
+// NATIVE_HALF: %[[p0:.*]] = load <4 x half>, ptr %p0.addr
+// NATIVE_HALF: %[[p1:.*]] = load <4 x half>, ptr %p1.addr
+// NATIVE_HALF: %[[p2:.*]] = load <4 x half>, ptr %p2.addr
 // NATIVE_HALF: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <4 x half>  @llvm.fmuladd.v4f16(<4 x half> %[[p0]], <4 x half> %[[p1]], <4 x half> %[[p2]])
 // NATIVE_HALF: ret <4 x half> %hlsl.fmad
-// NO_HALF: %[[p0:.*]] = load <4 x float>, ptr %p0.addr, align 16
-// NO_HALF: %[[p1:.*]] = load <4 x float>, ptr %p1.addr, align 16
-// NO_HALF: %[[p2:.*]] = load <4 x float>, ptr %p2.addr, align 16
+// NO_HALF: %[[p0:.*]] = load <4 x float>, ptr %p0.addr
+// NO_HALF: %[[p1:.*]] = load <4 x float>, ptr %p1.addr
+// NO_HALF: %[[p2:.*]] = load <4 x float>, ptr %p2.addr
 // NO_HALF: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <4 x float>  @llvm.fmuladd.v4f32(<4 x float> %[[p0]], <4 x float> %[[p1]], <4 x float> %[[p2]])
 // NO_HALF: ret <4 x float> %hlsl.fmad
 half4 test_mad_half4(half4 p0, half4 p1, half4 p2) { return mad(p0, p1, p2); }
 
-// CHECK: %[[p0:.*]] = load float, ptr %p0.addr, align 4
-// CHECK: %[[p1:.*]] = load float, ptr %p1.addr, align 4
-// CHECK: %[[p2:.*]] = load float, ptr %p2.addr, align 4
+// CHECK: %[[p0:.*]] = load float, ptr %p0.addr
+// CHECK: %[[p1:.*]] = load float, ptr %p1.addr
+// CHECK: %[[p2:.*]] = load float, ptr %p2.addr
 // CHECK: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn float @llvm.fmuladd.f32(float %[[p0]], float %[[p1]], float %[[p2]])
 // CHECK: ret float %hlsl.fmad
 float test_mad_float(float p0, float p1, float p2) { return mad(p0, p1, p2); }
 
-// CHECK: %[[p0:.*]] = load <2 x float>, ptr %p0.addr, align 8
-// CHECK: %[[p1:.*]] = load <2 x float>, ptr %p1.addr, align 8
-// CHECK: %[[p2:.*]] = load <2 x float>, ptr %p2.addr, align 8
+// CHECK: %[[p0:.*]] = load <2 x float>, ptr %p0.addr
+// CHECK: %[[p1:.*]] = load <2 x float>, ptr %p1.addr
+// CHECK: %[[p2:.*]] = load <2 x float>, ptr %p2.addr
 // CHECK: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <2 x float>  @llvm.fmuladd.v2f32(<2 x float> %[[p0]], <2 x float> %[[p1]], <2 x float> %[[p2]])
 // CHECK: ret <2 x float> %hlsl.fmad
 float2 test_mad_float2(float2 p0, float2 p1, float2 p2) { return mad(p0, p1, p2); }
 
-// CHECK: %[[p0:.*]] = load <3 x float>, ptr %p0.addr, align 16
-// CHECK: %[[p1:.*]] = load <3 x float>, ptr %p1.addr, align 16
-// CHECK: %[[p2:.*]] = load <3 x float>, ptr %p2.addr, align 16
+// CHECK: %[[p0:.*]] = load <3 x float>, ptr %p0.addr
+// CHECK: %[[p1:.*]] = load <3 x float>, ptr %p1.addr
+// CHECK: %[[p2:.*]] = load <3 x float>, ptr %p2.addr
 // CHECK: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <3 x float>  @llvm.fmuladd.v3f32(<3 x float> %[[p0]], <3 x float> %[[p1]], <3 x float> %[[p2]])
 // CHECK: ret <3 x float> %hlsl.fmad
 float3 test_mad_float3(float3 p0, float3 p1, float3 p2) { return mad(p0, p1, p2); }
 
-// CHECK: %[[p0:.*]] = load <4 x float>, ptr %p0.addr, align 16
-// CHECK: %[[p1:.*]] = load <4 x float>, ptr %p1.addr, align 16
-// CHECK: %[[p2:.*]] = load <4 x float>, ptr %p2.addr, align 16
+// CHECK: %[[p0:.*]] = load <4 x float>, ptr %p0.addr
+// CHECK: %[[p1:.*]] = load <4 x float>, ptr %p1.addr
+// CHECK: %[[p2:.*]] = load <4 x float>, ptr %p2.addr
 // CHECK: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <4 x float>  @llvm.fmuladd.v4f32(<4 x float> %[[p0]], <4 x float> %[[p1]], <4 x float> %[[p2]])
 // CHECK: ret <4 x float> %hlsl.fmad
 float4 test_mad_float4(float4 p0, float4 p1, float4 p2) { return mad(p0, p1, p2); }
 
-// CHECK: %[[p0:.*]] = load double, ptr %p0.addr, align 8
-// CHECK: %[[p1:.*]] = load double, ptr %p1.addr, align 8
-// CHECK: %[[p2:.*]] = load double, ptr %p2.addr, align 8
+// CHECK: %[[p0:.*]] = load double, ptr %p0.addr
+// CHECK: %[[p1:.*]] = load double, ptr %p1.addr
+// CHECK: %[[p2:.*]] = load double, ptr %p2.addr
 // CHECK: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn double @llvm.fmuladd.f64(double %[[p0]], double %[[p1]], double %[[p2]])
 // CHECK: ret double %hlsl.fmad
 double test_mad_double(double p0, double p1, double p2) { return mad(p0, p1, p2); }
 
-// CHECK: %[[p0:.*]] = load <2 x double>, ptr %p0.addr, align 16
-// CHECK: %[[p1:.*]] = load <2 x double>, ptr %p1.addr, align 16
-// CHECK: %[[p2:.*]] = load <2 x double>, ptr %p2.addr, align 16
+// CHECK: %[[p0:.*]] = load <2 x double>, ptr %p0.addr
+// CHECK: %[[p1:.*]] = load <2 x double>, ptr %p1.addr
+// CHECK: %[[p2:.*]] = load <2 x double>, ptr %p2.addr
 // CHECK: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <2 x double>  @llvm.fmuladd.v2f64(<2 x double> %[[p0]], <2 x double> %[[p1]], <2 x double> %[[p2]])
 // CHECK: ret <2 x double> %hlsl.fmad
 double2 test_mad_double2(double2 p0, double2 p1, double2 p2) { return mad(p0, p1, p2); }
 
-// CHECK: %[[p0:.*]] = load <3 x double>, ptr %p0.addr, align 32
-// CHECK: %[[p1:.*]] = load <3 x double>, ptr %p1.addr, align 32
-// CHECK: %[[p2:.*]] = load <3 x double>, ptr %p2.addr, align 32
+// CHECK: %[[p0:.*]] = load <3 x double>, ptr %p0.addr
+// CHECK: %[[p1:.*]] = load <3 x double>, ptr %p1.addr
+// CHECK: %[[p2:.*]] = load <3 x double>, ptr %p2.addr
 // CHECK: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <3 x double>  @llvm.fmuladd.v3f64(<3 x double> %[[p0]], <3 x double> %[[p1]], <3 x double> %[[p2]])
 // CHECK: ret <3 x double> %hlsl.fmad
 double3 test_mad_double3(double3 p0, double3 p1, double3 p2) { return mad(p0, p1, p2); }
 
-// CHECK: %[[p0:.*]] = load <4 x double>, ptr %p0.addr, align 32
-// CHECK: %[[p1:.*]] = load <4 x double>, ptr %p1.addr, align 32
-// CHECK: %[[p2:.*]] = load <4 x double>, ptr %p2.addr, align 32
+// CHECK: %[[p0:.*]] = load <4 x double>, ptr %p0.addr
+// CHECK: %[[p1:.*]] = load <4 x double>, ptr %p1.addr
+// CHECK: %[[p2:.*]] = load <4 x double>, ptr %p2.addr
 // CHECK: %hlsl.fmad = call reassoc nnan ninf nsz arcp afn <4 x double>  @llvm.fmuladd.v4f64(<4 x double> %[[p0]], <4 x double> %[[p1]], <4 x double> %[[p2]])
 // CHECK: ret <4 x double> %hlsl.fmad
 double4 test_mad_double4(double4 p0, double4 p1, double4 p2) { return mad(p0, p1, p2); }
diff --git a/clang/test/CodeGenHLSL/builtins/select.hlsl b/clang/test/CodeGenHLSL/builtins/select.hlsl
index e5169844cb3f2..dd74589af30c6 100644
--- a/clang/test/CodeGenHLSL/builtins/select.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/select.hlsl
@@ -85,7 +85,7 @@ int4 test_select_vector_scalar_scalar(bool4 cond0, int tVal, int fVal) {
 }
 
 // CHECK-LABEL: test_select_nonbool_cond_vector_4
-// CHECK: [[TMP0:%.*]] = load <4 x i32>, ptr %cond0.addr, align 16
+// CHECK: [[TMP0:%.*]] = load <4 x i32>, ptr %cond0.addr, align 4
 // CHECK: [[TOBOOL:%.*]] = icmp ne <4 x i32> [[TMP0]], zeroinitializer
 // CHECK: [[SELECT:%.*]] = select <4 x i1> [[TOBOOL]], <4 x i1> {{%.*}}, <4 x i1> {{%.*}}
 // CHECK: ret <4 x i1> [[SELECT]]
@@ -94,7 +94,7 @@ bool4 test_select_nonbool_cond_vector_4(int4 cond0, bool4 tVal, bool4 fVal) {
 }
 
 // CHECK-LABEL: test_select_nonbool_cond_vector_scalar_vector
-// CHECK: [[TMP0:%.*]] = load <3 x i32>, ptr %cond0.addr, align 16
+// CHECK: [[TMP0:%.*]] = load <3 x i32>, ptr %cond0.addr, align 4
 // CHECK: [[TOBOOL:%.*]] = icmp ne <3 x i32> [[TMP0]], zeroinitializer
 // CHECK: [[SPLAT_SRC1:%.*]] = insertelement <3 x i32> poison, i32 {{%.*}}, i64 0
 // CHECK: [[SPLAT1:%.*]] = shufflevector <3 x i32> [[SPLAT_SRC1]], <3 x i32> poison, <3 x i32> zeroinitializer
@@ -105,7 +105,7 @@ int3 test_select_nonbool_cond_vector_scalar_vector(int3 cond0, int tVal, int3 fV
 }
 
 // CHECK-LABEL: test_select_nonbool_cond_vector_vector_scalar
-// CHECK: [[TMP0:%.*]] = load <2 x i32>, ptr %cond0.addr, align 8
+// CHECK: [[TMP0:%.*]] = load <2 x i32>, ptr %cond0.addr, align 4
 // CHECK: [[TOBOOL:%.*]] = icmp ne <2 x i32> [[TMP0]], zeroinitializer
 // CHECK: [[SPLAT_SRC1:%.*]] = insertelement <2 x i32> poison, i32 {{%.*}}, i64 0
 // CHECK: [[SPLAT1:%.*]] = shufflevector <2 x i32> [[SPLAT_SRC1]], <2 x i32> poison, <2 x i32> zeroinitializer
@@ -116,7 +116,7 @@ int2 test_select_nonbool_cond_vector_vector_scalar(int2 cond0, int2 tVal, int fV
 }
 
 // CHECK-LABEL: test_select_nonbool_cond_vector_scalar_scalar
-// CHECK: [[TMP0:%.*]] = load <4 x i32>, ptr %cond0.addr, align 16
+// CHECK: [[TMP0:%.*]] = load <4 x i32>, ptr %cond0.addr, align 4
 // CHECK: [[TOBOOL:%.*]] = icmp ne <4 x i32> [[TMP0]], zeroinitializer
 // CHECK: [[SPLAT_SRC1:%.*]] = insertelement <4 x i32> poison, i32 {{%.*}}, i64 0
 // CHECK: [[SPLAT1:%.*]] = shufflevector <4 x i32> [[SPLAT_SRC1]], <4 x i32> poison, <4 x i32> zeroinitializer
diff --git a/clang/test/CodeGenHLSL/float3.hlsl b/clang/test/CodeGenHLSL/float3.hlsl
index 4abd18713e718..d580bef1c1b1d 100644
--- a/clang/test/CodeGenHLSL/float3.hlsl
+++ b/clang/test/CodeGenHLSL/float3.hlsl
@@ -4,9 +4,9 @@
 
 // Make sure float3 is not changed into float4.
 // CHECK:<3 x float> @_Z3fooDv3_f(<3 x float> noundef nofpclass(nan inf) %[[PARAM:[0-9a-zA-Z]+]])
-// CHECK:%[[A_ADDR:.+]] = alloca <3 x float>, align 16
-// CHECK-NEXT:store <3 x float> %[[PARAM]], ptr %[[A_ADDR]], align 16
-// CHECK-NEXT:%[[V:[0-9]+]] = load <3 x float>, ptr %[[A_ADDR]], align 16
+// CHECK:%[[A_ADDR:.+]] = alloca <3 x float>, align 4
+// CHECK-NEXT:store <3 x float> %[[PARAM]], ptr %[[A_ADDR]], align 4
+// CHECK-NEXT:%[[V:[0-9]+]] = load <3 x float>, ptr %[[A_ADDR]], align 4
 // CHECK-NEXT:ret <3 x float> %[[V]]
 float3 foo(float3 a) {
   return a;
diff --git a/clang/test/CodeGenHLSL/groupsharedArgs/ArrTest.hlsl b/clang/test/CodeGenHLSL/groupsharedArgs/ArrTest.hlsl
index 4989275e6ee6d..d7a1106f53a88 100644
--- a/clang/test/CodeGenHLSL/groupsharedArgs/ArrTest.hlsl
+++ b/clang/test/CodeGenHLSL/groupsharedArgs/ArrTest.hlsl
@@ -2,7 +2,7 @@
 
 groupshared float4 SharedArr[64];
 
-// CHECK-LABEL: define hidden void @_Z2fnRA64_U3AS3Dv4_ff(ptr addrspace(3) noundef align 16 dereferenceable(1024) %Arr, float noundef nofpclass(nan inf) %F)
+// CHECK-LABEL: define hidden void @_Z2fnRA64_U3AS3Dv4_ff(ptr addrspace(3) noundef align 4 dereferenceable(1024) %Arr, float noundef nofpclass(nan inf) %F)
 // CHECK: [[ArrAddr:%.*]] = alloca ptr addrspace(3), align 4
 // CHECK: [[FAddr:%.*]] = alloca float, align 4
 // CHECK: store ptr addrspace(3) %Arr, ptr [[ArrAddr]], align 4
@@ -12,7 +12,7 @@ groupshared float4 SharedArr[64];
 // CHECK: [[B:%.*]] = shufflevector <1 x float> [[Splat]], <1 x float> poison, <4 x i32> zeroinitializer
 // CHECK: [[C:%.*]] = load ptr addrspace(3), ptr [[ArrAddr]], align 4, !align !3
 // CHECK: [[ArrIdx:%.*]] = getelementptr inbounds [64 x <4 x float>], ptr addrspace(3) [[C]], i32 0, i32 5
-// CHECK: store <4 x float> [[B]], ptr addrspace(3) [[ArrIdx]], align 16
+// CHECK: store <4 x float> [[B]], ptr addrspace(3) [[ArrIdx]], align 4
 // CHECK: ret void
 void fn(groupshared float4 Arr[64], float F) {
   Arr[5] = F.xxxx;
diff --git a/clang/test/CodeGenHLSL/groupsharedArgs/Overloads.hlsl b/clang/test/CodeGenHLSL/groupsharedArgs/Overloads.hlsl
index 5576eed5b5043..bc753009c0706 100644
--- a/clang/test/CodeGenHLSL/groupsharedArgs/Overloads.hlsl
+++ b/clang/test/CodeGenHLSL/groupsharedArgs/Overloads.hlsl
@@ -17,7 +17,7 @@ void main() {
   fn2(11.xxxx);
 }
 
-// CHECK-LABEL: define hidden void @_Z2fnA2_Dv4_f(ptr noalias noundef align 16 %Arr)
+// CHECK-LABEL: define hidden void @_Z2fnA2_Dv4_f(ptr noalias noundef align 4 %Arr)
 void fn(inout float4 Arr[2]) {
   Arr[1] = 5.0.xxxx;
 }
@@ -27,12 +27,12 @@ void fn2(int4 Local) {
   int X = Local.y;
 }
 
-// CHECK-LABEL: define hidden void @_Z2fnRA2_U3AS3Dv4_f(ptr addrspace(3) noundef align 16 dereferenceable(32) %Arr)
+// CHECK-LABEL: define hidden void @_Z2fnRA2_U3AS3Dv4_f(ptr addrspace(3) noundef align 4 dereferenceable(32) %Arr)
 void fn(groupshared float4 Arr[2]) {
   Arr[1] = 7.0.xxxx;
 }
 
-// CHECK-LABEL: define hidden void @_Z3fn2RU3AS3Dv4_i(ptr addrspace(3) noundef align 16 dereferenceable(16) %Shared)
+// CHECK-LABEL: define hidden void @_Z3fn2RU3AS3Dv4_i(ptr addrspace(3) noundef align 4 dereferenceable(16) %Shared)
 void fn2(groupshared int4 Shared) {
   Shared.x = 10;
 }
diff --git a/clang/test/CodeGenHLSL/groupsharedArgs/TemplateTest.hlsl b/clang/test/CodeGenHLSL/groupsharedArgs/TemplateTest.hlsl
index 863a6f14d445b..d6faa846655ac 100644
--- a/clang/test/CodeGenHLSL/groupsharedArgs/TemplateTest.hlsl
+++ b/clang/test/CodeGenHLSL/groupsharedArgs/TemplateTest.hlsl
@@ -1,41 +1,41 @@
 // RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.3-compute -std=hlsl202x -emit-llvm -disable-llvm-passes -hlsl-entry main -o - %s | FileCheck %s
 
 // In the case the template type is specified the groupshared attribute is preserved in the type
-// CHECK-LABEL: define linkonce_odr hidden void @_Z4tfooIU3AS3Dv4_iEvT_S2_(ptr addrspace(3) noundef align 16 dereferenceable(16) %a, ptr addrspace(3) noundef align 16 dereferenceable(16) %b)
+// CHECK-LABEL: define linkonce_odr hidden void @_Z4tfooIU3AS3Dv4_iEvT_S2_(ptr addrspace(3) noundef align 4 dereferenceable(16) %a, ptr addrspace(3) noundef align 4 dereferenceable(16) %b)
 // CHECK: [[AAddr:%.*]] = alloca ptr addrspace(3), align 4
 // CHECK: [[BAddr:%.*]] = alloca ptr addrspace(3), align 4
 // CHECK: store ptr addrspace(3) %a, ptr [[AAddr]], align 4
 // CHECK: store ptr addrspace(3) %b, ptr [[BAddr]], align 4
 // CHECK: [[C:%.*]] = load ptr addrspace(3), ptr [[BAddr]], align 4
-// CHECK: [[D:%.*]] = load <4 x i32>, ptr addrspace(3) [[C]], align 16
+// CHECK: [[D:%.*]] = load <4 x i32>, ptr addrspace(3) [[C]], align 4
 // CHECK: [[E:%.*]] = load ptr addrspace(3), ptr [[AAddr]], align 4
-// CHECK: store <4 x i32> [[D]], ptr addrspace(3) [[E]], align 16
+// CHECK: store <4 x i32> [[D]], ptr addrspace(3) [[E]], align 4
 // CHECK: ret void
 
 // In the case the template type is deduced the deduction is done on the non cv-qualified type (the address space is removed)
 // So the non groupshared version of the function is deduced
 // CHECK-LABEL: define linkonce_odr hidden void @_Z4tfooIDv4_iEvT_S1_(<4 x i32> noundef %a, <4 x i32> noundef %b)
-// CHECK: [[AAddr:%.*]] = alloca <4 x i32>, align 16
-// CHECK: [[BAddr:%.*]] = alloca <4 x i32>, align 16
-// CHECK: store <4 x i32> %a, ptr [[AAddr]], align 16
-// CHECK: store <4 x i32> %b, ptr [[BAddr]], align 16
-// CHECK: [[C:%.*]] = load <4 x i32>, ptr [[BAddr]], align 16
-// CHECK: store <4 x i32> [[C]], ptr [[AAddr]], align 16
+// CHECK: [[AAddr:%.*]] = alloca <4 x i32>, align 4
+// CHECK: [[BAddr:%.*]] = alloca <4 x i32>, align 4
+// CHECK: store <4 x i32> %a, ptr [[AAddr]], align 4
+// CHECK: store <4 x i32> %b, ptr [[BAddr]], align 4
+// CHECK: [[C:%.*]] = load <4 x i32>, ptr [[BAddr]], align 4
+// CHECK: store <4 x i32> [[C]], ptr [[AAddr]], align 4
 // CHECK: ret void
 template<typename T>
 void tfoo(T a, T b) {
   a = b;
 }
 
-// CHECK-LABEL: define linkonce_odr hidden void @_Z5tfoo2IDv4_iEvRU3AS3T_S3_(ptr addrspace(3) noundef align 16 dereferenceable(16) %a, ptr addrspace(3) noundef align 16 dereferenceable(16) %b)
+// CHECK-LABEL: define linkonce_odr hidden void @_Z5tfoo2IDv4_iEvRU3AS3T_S3_(ptr addrspace(3) noundef align 4 dereferenceable(16) %a, ptr addrspace(3) noundef align 4 dereferenceable(16) %b)
 // CHECK: [[AAddr:%.*]] = alloca ptr addrspace(3), align 4
 // CHECK: [[BAddr:%.*]] = alloca ptr addrspace(3), align 4
 // CHECK: store ptr addrspace(3) %a, ptr [[AAddr]], align 4
 // CHECK: store ptr addrspace(3) %b, ptr [[BAddr]], align 4
 // CHECK: [[Z:%.*]] = load ptr addrspace(3), ptr [[BAddr]], align 4
-// CHECK: [[Y:%.*]] = load <4 x i32>, ptr addrspace(3) [[Z]], align 16
+// CHECK: [[Y:%.*]] = load <4 x i32>, ptr addrspace(3) [[Z]], align 4
 // CHECK: [[X:%.*]] = load ptr addrspace(3), ptr [[AAddr]], align 4
-// CHECK: store <4 x i32> [[Y]], ptr addrspace(3) [[X]], align 16
+// CHECK: store <4 x i32> [[Y]], ptr addrspace(3) [[X]], align 4
 // CHECK: ret void
 template<typename T>
 void tfoo2(groupshared T a, groupshared T b) {
diff --git a/clang/test/CodeGenHLSL/groupsharedArgs/VectorTest.hlsl b/clang/test/CodeGenHLSL/groupsharedArgs/VectorTest.hlsl
index e61f8d2cdf43b..c19397b6bc5fd 100644
--- a/clang/test/CodeGenHLSL/groupsharedArgs/VectorTest.hlsl
+++ b/clang/test/CodeGenHLSL/groupsharedArgs/VectorTest.hlsl
@@ -2,7 +2,7 @@
 
 groupshared float4 SharedData;
 
-// CHECK-LABEL: define hidden void @_Z3fn1RU3AS3Dv4_f(ptr addrspace(3) noundef align 16 dereferenceable(16) %Sh)
+// CHECK-LABEL: define hidden void @_Z3fn1RU3AS3Dv4_f(ptr addrspace(3) noundef align 4 dereferenceable(16) %Sh)
 // CHECK: [[ShAddr:%.*]] = alloca ptr addrspace(3), align 4
 // CHECK: [[Tmp:%.*]] = alloca <1 x float>, align 4
 // CHECK: store ptr addrspace(3) %Sh, ptr [[ShAddr]], align 4
@@ -10,7 +10,7 @@ groupshared float4 SharedData;
 // CHECK: [[A:%.*]] = load <1 x float>, ptr [[Tmp]], align 4
 // CHECK: [[B:%.*]] = shufflevector <1 x float> [[A]], <1 x float> poison, <4 x i32> zeroinitializer
 // CHECK: [[C:%.*]] = load ptr addrspace(3), ptr [[ShAddr]], align 4
-// CHECK: store <4 x float> [[B]], ptr addrspace(3) [[C]], align 16
+// CHECK: store <4 x float> [[B]], ptr addrspace(3) [[C]], align 4
 // CHECK: ret void
 void fn1(groupshared float4 Sh) {
   Sh = 5.0.xxxx;
diff --git a/clang/test/CodeGenHLSL/matrix-member-one-based-swizzle-store.hlsl b/clang/test/CodeGenHLSL/matrix-member-one-based-swizzle-store.hlsl
index 26b16d02a0d4a..d6a3744dd1128 100644
--- a/clang/test/CodeGenHLSL/matrix-member-one-based-swizzle-store.hlsl
+++ b/clang/test/CodeGenHLSL/matrix-member-one-based-swizzle-store.hlsl
@@ -35,10 +35,10 @@ void OnesSwizzleToScalar(out int4x4 A, int I) {
 // CHECK-SAME: ptr noalias noundef nonnull align 4 dereferenceable(64) [[A:%.*]], <4 x i32> noundef [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 4
-// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META4]], !align [[META5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[TMP1]], align 4
@@ -90,10 +90,10 @@ void TwosSwizzleToScalar(out int4x4 A, int I) {
 // CHECK-SAME: ptr noalias noundef nonnull align 4 dereferenceable(64) [[A:%.*]], <4 x i32> noundef [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 4
-// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META4]], !align [[META5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <16 x i32>, ptr [[TMP1]], i32 0, i32 4
@@ -146,10 +146,10 @@ void ThreesSwizzleToScalar(out int4x4 A, int I) {
 // CHECK-SAME: ptr noalias noundef nonnull align 4 dereferenceable(64) [[A:%.*]], <4 x i32> noundef [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 4
-// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META4]], !align [[META5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <16 x i32>, ptr [[TMP1]], i32 0, i32 8
@@ -202,10 +202,10 @@ void FoursSwizzleToScalar(out int4x4 A, int I) {
 // CHECK-SAME: ptr noalias noundef nonnull align 4 dereferenceable(64) [[A:%.*]], <4 x i32> noundef [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x i32>, align 4
 // CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 4
-// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 16
+// CHECK-NEXT:    store <4 x i32> [[V]], ptr [[V_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[V_ADDR]], align 4
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META4]], !align [[META5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <16 x i32>, ptr [[TMP1]], i32 0, i32 3
diff --git a/clang/test/CodeGenHLSL/matrix-member-zero-based-swizzle-store.hlsl b/clang/test/CodeGenHLSL/matrix-member-zero-based-swizzle-store.hlsl
index 986cdf136631b..001f47007579c 100644
--- a/clang/test/CodeGenHLSL/matrix-member-zero-based-swizzle-store.hlsl
+++ b/clang/test/CodeGenHLSL/matrix-member-zero-based-swizzle-store.hlsl
@@ -35,10 +35,10 @@ void ZerosSwizzleToScalar(out double4x4 A, double D) {
 // CHECK-SAME: ptr noalias noundef nonnull align 8 dereferenceable(128) [[A:%.*]], <4 x double> noundef nofpclass(nan inf) [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x double>, align 32
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x double>, align 8
 // CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 4
-// CHECK-NEXT:    store <4 x double> [[V]], ptr [[V_ADDR]], align 32
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x double>, ptr [[V_ADDR]], align 32
+// CHECK-NEXT:    store <4 x double> [[V]], ptr [[V_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x double>, ptr [[V_ADDR]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META4]], !align [[META5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x double> [[TMP0]], i32 0
 // CHECK-NEXT:    store double [[TMP2]], ptr [[TMP1]], align 8
@@ -90,10 +90,10 @@ void OnesSwizzleToScalar(out double4x4 A, double D) {
 // CHECK-SAME: ptr noalias noundef nonnull align 8 dereferenceable(128) [[A:%.*]], <4 x double> noundef nofpclass(nan inf) [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x double>, align 32
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x double>, align 8
 // CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 4
-// CHECK-NEXT:    store <4 x double> [[V]], ptr [[V_ADDR]], align 32
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x double>, ptr [[V_ADDR]], align 32
+// CHECK-NEXT:    store <4 x double> [[V]], ptr [[V_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x double>, ptr [[V_ADDR]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META4]], !align [[META5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x double> [[TMP0]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <16 x double>, ptr [[TMP1]], i32 0, i32 4
@@ -146,10 +146,10 @@ void TwosSwizzleToScalar(out double4x4 A, double D) {
 // CHECK-SAME: ptr noalias noundef nonnull align 8 dereferenceable(128) [[A:%.*]], <4 x double> noundef nofpclass(nan inf) [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x double>, align 32
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x double>, align 8
 // CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 4
-// CHECK-NEXT:    store <4 x double> [[V]], ptr [[V_ADDR]], align 32
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x double>, ptr [[V_ADDR]], align 32
+// CHECK-NEXT:    store <4 x double> [[V]], ptr [[V_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x double>, ptr [[V_ADDR]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META4]], !align [[META5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x double> [[TMP0]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <16 x double>, ptr [[TMP1]], i32 0, i32 8
@@ -202,10 +202,10 @@ void ThreesSwizzleToScalar(out double4x4 A, double D) {
 // CHECK-SAME: ptr noalias noundef nonnull align 8 dereferenceable(128) [[A:%.*]], <4 x double> noundef nofpclass(nan inf) [[V:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 4
-// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x double>, align 32
+// CHECK-NEXT:    [[V_ADDR:%.*]] = alloca <4 x double>, align 8
 // CHECK-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 4
-// CHECK-NEXT:    store <4 x double> [[V]], ptr [[V_ADDR]], align 32
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x double>, ptr [[V_ADDR]], align 32
+// CHECK-NEXT:    store <4 x double> [[V]], ptr [[V_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x double>, ptr [[V_ADDR]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META4]], !align [[META5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x double> [[TMP0]], i32 0
 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <16 x double>, ptr [[TMP1]], i32 0, i32 12
diff --git a/clang/test/CodeGenHLSL/resources/cbuffer-empty-struct-array.hlsl b/clang/test/CodeGenHLSL/resources/cbuffer-empty-struct-array.hlsl
index 2e92d071202ae..72939cab07ac5 100644
--- a/clang/test/CodeGenHLSL/resources/cbuffer-empty-struct-array.hlsl
+++ b/clang/test/CodeGenHLSL/resources/cbuffer-empty-struct-array.hlsl
@@ -52,10 +52,10 @@ void main() {
 // CHECK-NOT: @a = external hidden addrspace(2) global
 // CHECK-NOT: @c = external hidden addrspace(2) global
 // CHECK2: @i = external hidden addrspace(2) global i32
-// CHECK2: @v = external hidden addrspace(2) global <2 x float>, align 8
+// CHECK2: @v = external hidden addrspace(2) global <2 x float>, align 4
 // CHECK2: @m = external hidden addrspace(2) global [4 x <4 x i32>], align 4
 // CHECK2: @"$Globals.cb" = global target("dx.CBuffer",
 // CHECK-NOT: @b = external hidden addrspace(2) global
 // CHECK2: @j = external hidden addrspace(2) global i32
-// CHECK2: @w = external hidden addrspace(2) global <2 x float>, align 8
+// CHECK2: @w = external hidden addrspace(2) global <2 x float>, align 4
 // CHECK2: @n = external hidden addrspace(2) global [4 x <4 x i32>], align 4
diff --git a/clang/test/CodeGenHLSL/resources/cbuffer.hlsl b/clang/test/CodeGenHLSL/resources/cbuffer.hlsl
index b72cf587d0f93..335e68e2e8d92 100644
--- a/clang/test/CodeGenHLSL/resources/cbuffer.hlsl
+++ b/clang/test/CodeGenHLSL/resources/cbuffer.hlsl
@@ -153,13 +153,13 @@ cbuffer CBVectors {
 }
 
 // CHECK: @CBVectors.cb = global target("dx.CBuffer", %__cblayout_CBVectors)
-// CHECK: @b1 = external hidden addrspace(2) global <3 x float>, align 16
-// CHECK: @b2 = external hidden addrspace(2) global <3 x double>, align 32
-// CHECK: @b3 = external hidden addrspace(2) global <2 x half>, align 4
-// CHECK: @b4 = external hidden addrspace(2) global <3 x i64>, align 32
-// CHECK: @b5 = external hidden addrspace(2) global <4 x i32>, align 16
-// CHECK: @b6 = external hidden addrspace(2) global <3 x i16>, align 8
-// CHECK: @b7 = external hidden addrspace(2) global <3 x i64>, align 32
+// CHECK: @b1 = external hidden addrspace(2) global <3 x float>, align 4
+// CHECK: @b2 = external hidden addrspace(2) global <3 x double>, align 8
+// CHECK: @b3 = external hidden addrspace(2) global <2 x half>, align 2
+// CHECK: @b4 = external hidden addrspace(2) global <3 x i64>, align 8
+// CHECK: @b5 = external hidden addrspace(2) global <4 x i32>, align 4
+// CHECK: @b6 = external hidden addrspace(2) global <3 x i16>, align 2
+// CHECK: @b7 = external hidden addrspace(2) global <3 x i64>, align 8
 // CHECK: @CBVectors.str = private unnamed_addr constant [10 x i8] c"CBVectors\00", align 1
 
 cbuffer CBArrays : register(b2) {
@@ -175,10 +175,10 @@ cbuffer CBArrays : register(b2) {
 
 // CHECK: @CBArrays.cb = global target("dx.CBuffer", %__cblayout_CBArrays)
 // CHECK: @c1 = external hidden addrspace(2) global <{ [2 x <{ float, target("dx.Padding", 12) }>], float }>, align 4
-// CHECK: @c2 = external hidden addrspace(2) global <{ [1 x <{ <3 x double>, target("dx.Padding", 8) }>], <3 x double> }>, align 32
+// CHECK: @c2 = external hidden addrspace(2) global <{ [1 x <{ <3 x double>, target("dx.Padding", 8) }>], <3 x double> }>, align 8
 // CHECK: @c3 = external hidden addrspace(2) global <{ [1 x <{ <{ [1 x <{ half, target("dx.Padding", 14) }>], half }>, target("dx.Padding", 14) }>], <{ [1 x <{ half, target("dx.Padding", 14) }>], half }> }>, align 2
 // CHECK: @c4 = external hidden addrspace(2) global <{ [2 x <{ i64, target("dx.Padding", 8) }>], i64 }>, align 8
-// CHECK: @c5 = external hidden addrspace(2) global [2 x [3 x [4 x <4 x i32>]]], align 16
+// CHECK: @c5 = external hidden addrspace(2) global [2 x [3 x [4 x <4 x i32>]]], align 4
 // CHECK: @c6 = external hidden addrspace(2) global [1 x i16], align 2
 // CHECK: @c7 = external hidden addrspace(2) global <{ [1 x <{ i64, target("dx.Padding", 8) }>], i64 }>, align 8
 // CHECK: @c8 = external hidden addrspace(2) global <{ [3 x <{ i32, target("dx.Padding", 12) }>], i32 }>, align 4
@@ -194,8 +194,8 @@ cbuffer CBTypedefArray : register(space2) {
 }
 
 // CHECK: @CBTypedefArray.cb = global target("dx.CBuffer", %__cblayout_CBTypedefArray)
-// CHECK: @t1 = external hidden addrspace(2) global [2 x [2 x <4 x i32>]], align 16
-// CHECK: @t2 = external hidden addrspace(2) global [2 x [2 x <4 x i32>]], align 16
+// CHECK: @t1 = external hidden addrspace(2) global [2 x [2 x <4 x i32>]], align 4
+// CHECK: @t2 = external hidden addrspace(2) global [2 x [2 x <4 x i32>]], align 4
 // CHECK: @CBTypedefArray.str = private unnamed_addr constant [15 x i8] c"CBTypedefArray\00", align 1
 struct Empty {};
 
@@ -224,7 +224,7 @@ struct D {
 // CHECK: @array_of_A = external hidden addrspace(2) global <{ [4 x <{ %A, target("dx.Padding", 8) }>], %A }>, align 1
 // CHECK: @d = external hidden addrspace(2) global %__cblayout_D, align 1
 // CHECK: @e = external hidden addrspace(2) global half, align 2
-// CHECK: @f = external hidden addrspace(2) global <3 x i16>, align 8
+// CHECK: @f = external hidden addrspace(2) global <3 x i16>, align 2
 // CHECK: @CBStructs.str = private unnamed_addr constant [10 x i8] c"CBStructs\00", align 1
 
 cbuffer CBStructs {
@@ -271,7 +271,7 @@ struct Test {
 // CHECK: @CBMix.cb = global target("dx.CBuffer", %__cblayout_CBMix)
 // CHECK: @test = external hidden addrspace(2) global <{ [1 x <{ %Test, target("dx.Padding", 8) }>], %Test }>, align 1
 // CHECK: @f1 = external hidden addrspace(2) global float, align 4
-// CHECK: @f2 = external hidden addrspace(2) global <{ [2 x <{ <{ [1 x <{ <2 x float>, target("dx.Padding", 8) }>], <2 x float> }>, target("dx.Padding", 8) }>], <{ [1 x <{ <2 x float>, target("dx.Padding", 8) }>], <2 x float> }> }>, align 8
+// CHECK: @f2 = external hidden addrspace(2) global <{ [2 x <{ <{ [1 x <{ <2 x float>, target("dx.Padding", 8) }>], <2 x float> }>, target("dx.Padding", 8) }>], <{ [1 x <{ <2 x float>, target("dx.Padding", 8) }>], <2 x float> }> }>, align 4
 // CHECK: @f3 = external hidden addrspace(2) global float, align 4
 // CHECK: @f4 = external hidden addrspace(2) global %anon, align 1
 // CHECK: @f5 = external hidden addrspace(2) global double, align 8
diff --git a/clang/test/CodeGenHLSL/resources/cbuffer_geps.hlsl b/clang/test/CodeGenHLSL/resources/cbuffer_geps.hlsl
index 6fb0fed32d42d..6ad53e15c743f 100644
--- a/clang/test/CodeGenHLSL/resources/cbuffer_geps.hlsl
+++ b/clang/test/CodeGenHLSL/resources/cbuffer_geps.hlsl
@@ -32,7 +32,7 @@ void cbarrays() {
   use(c3[9][5]);
   // CHECK: load i64, ptr addrspace(2) getelementptr inbounds nuw (i8, ptr addrspace(2) @c4, i32 96), align 16
   use(c4[6]);
-  // CHECK:  load <4 x i32>, ptr addrspace(2) getelementptr inbounds nuw (i8, ptr addrspace(2) @c5, i32 27120), align 16
+  // CHECK:  load <4 x i32>, ptr addrspace(2) getelementptr inbounds nuw (i8, ptr addrspace(2) @c5, i32 27120), align 4
   use(c5[1][12][15]);
   // CHECK: load i16, ptr addrspace(2) getelementptr inbounds nuw (i8, ptr addrspace(2) @c6, i32 64), align 16
   use(c6[4]);
diff --git a/clang/test/CodeGenHLSL/resources/cbuffer_with_packoffset.hlsl b/clang/test/CodeGenHLSL/resources/cbuffer_with_packoffset.hlsl
index 68e263b9fc07f..48463e8b1c0e6 100644
--- a/clang/test/CodeGenHLSL/resources/cbuffer_with_packoffset.hlsl
+++ b/clang/test/CodeGenHLSL/resources/cbuffer_with_packoffset.hlsl
@@ -19,7 +19,7 @@
 // CHECK-DAG: @CB.cb = global target("dx.CBuffer", %__cblayout_CB)
 // CHECK-DAG: @a = external hidden addrspace(2) global float, align 4
 // CHECK-DAG: @b = external hidden addrspace(2) global double, align 8
-// CHECK-DAG: @c = external hidden addrspace(2) global <2 x i32>, align 8
+// CHECK-DAG: @c = external hidden addrspace(2) global <2 x i32>, align 4
 // CHECK: @CB.str = private unnamed_addr constant [3 x i8] c"CB\00", align 1
 
 cbuffer CB : register(b1, space3) {
@@ -30,7 +30,7 @@ cbuffer CB : register(b1, space3) {
 
 // CHECK-DAG: @CB.cb.1 = global target("dx.CBuffer", %__cblayout_CB_1)
 // CHECK-DAG: @x = external hidden addrspace(2) global float, align 4
-// CHECK-DAG: @y = external hidden addrspace(2) global <2 x float>, align 8
+// CHECK-DAG: @y = external hidden addrspace(2) global <2 x float>, align 4
 
 // Missing packoffset annotation will produce a warning.
 // Element x will be placed after the element y that has an explicit packoffset.
diff --git a/clang/test/CodeGenHLSL/resources/default_cbuffer_with_layout.hlsl b/clang/test/CodeGenHLSL/resources/default_cbuffer_with_layout.hlsl
index 63960f817de8f..f3d85e57983b2 100644
--- a/clang/test/CodeGenHLSL/resources/default_cbuffer_with_layout.hlsl
+++ b/clang/test/CodeGenHLSL/resources/default_cbuffer_with_layout.hlsl
@@ -21,8 +21,8 @@
 // CHECK-DAG: @a = external hidden addrspace(2) global i32, align 4
 // CHECK-DAG: @b = external hidden addrspace(2) global float, align 4
 // CHECK-DAG: @c = external hidden addrspace(2) global <{ [3 x <{ double, target("dx.Padding", 8) }>], double }>, align 8
-// CHECK-DAG: @d = external hidden addrspace(2) global <4 x i32>, align 16
-// CHECK-DAG: @e = external hidden addrspace(2) global <4 x float>, align 16
+// CHECK-DAG: @d = external hidden addrspace(2) global <4 x i32>, align 4
+// CHECK-DAG: @e = external hidden addrspace(2) global <4 x float>, align 4
 // CHECK-DAG: @s = external hidden addrspace(2) global %S, align 1
 // CHECK-DAG: @m = external hidden addrspace(2) global <{ [2 x <{ <2 x float>, target("dx.Padding", 8) }>], <2 x float> }>, align 4
 // CHECK-DAG: @n = external hidden addrspace(2) global [3 x <4 x float>], align 4
diff --git a/clang/test/CodeGenHLSL/resources/res-array-global-dyn-index.hlsl b/clang/test/CodeGenHLSL/resources/res-array-global-dyn-index.hlsl
index f17cf12945e4a..3800a09382d2b 100644
--- a/clang/test/CodeGenHLSL/resources/res-array-global-dyn-index.hlsl
+++ b/clang/test/CodeGenHLSL/resources/res-array-global-dyn-index.hlsl
@@ -10,13 +10,13 @@ RWStructuredBuffer<float> Out;
 // with range 12 and dynamically calculated index
 
 // CHECK: define internal void @main(unsigned int vector[3])(<3 x i32> noundef %GI)
-// CHECK: %[[GI_alloca:.*]] = alloca <3 x i32>, align 16
+// CHECK: %[[GI_alloca:.*]] = alloca <3 x i32>, align 4
 // CHECK: %[[Tmp0:.*]] = alloca %"class.hlsl::RWBuffer
 // CHECK: store <3 x i32> %GI, ptr %[[GI_alloca]]
 
-// CHECK: %[[GI:.*]] = load <3 x i32>, ptr %[[GI_alloca]], align 16
+// CHECK: %[[GI:.*]] = load <3 x i32>, ptr %[[GI_alloca]], align 4
 // CHECK: %[[GI_y:.*]] = extractelement <3 x i32> %[[GI]], i32 1
-// CHECK: %[[GI:.*]] = load <3 x i32>, ptr %[[GI_alloca]], align 16
+// CHECK: %[[GI:.*]] = load <3 x i32>, ptr %[[GI_alloca]], align 4
 // CHECK: %[[GI_x:.*]] = extractelement <3 x i32> %[[GI]], i32 0
 // CHECK: %[[Tmp1:.*]] = mul i32 %[[GI_x]], 3
 // CHECK: %[[Index:.*]] = add i32 %[[GI_y]], %[[Tmp1]]

>From 32fd1378bca0064d830f9fd2dd77d9ed335e1b70 Mon Sep 17 00:00:00 2001
From: Justin Bogner <mail at justinbogner.com>
Date: Wed, 11 Mar 2026 13:41:18 -0700
Subject: [PATCH 2/2] fixup: check element-aligned consistency

---
 clang/lib/CodeGen/CodeGenModule.cpp | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 3b64be7a477d6..679e69be870d9 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -340,7 +340,7 @@ static void checkDataLayoutConsistency(const TargetInfo &Target,
                                        const LangOptions &Opts) {
 #ifndef NDEBUG
   // Don't verify non-standard ABI configurations.
-  if (Opts.AlignDouble || Opts.OpenCL || Opts.HLSL)
+  if (Opts.AlignDouble || Opts.OpenCL)
     return;
 
   llvm::Triple Triple = Target.getTriple();
@@ -393,6 +393,15 @@ static void checkDataLayoutConsistency(const TargetInfo &Target,
     Check("__ibm128", llvm::Type::getPPC_FP128Ty(Context), Target.Ibm128Align);
 
   Check("void*", llvm::PointerType::getUnqual(Context), Target.PointerAlign);
+
+  if (Target.vectorsAreElementAligned() != DL.vectorsAreElementAligned()) {
+    llvm::errs() << "Datalayout for target " << Triple.str()
+                 << " sets element-aligned vectors to '"
+                 << Target.vectorsAreElementAligned()
+                 << "' but clang specifies '" << DL.vectorsAreElementAligned()
+                 << "'\n";
+    abort();
+  }
 #endif
 }
 



More information about the llvm-branch-commits mailing list