[clang] bdd55b2 - Fix the default alignment of i1 vectors.

Eli Friedman via cfe-commits cfe-commits at lists.llvm.org
Sat Jul 31 14:11:08 PDT 2021


Author: Eli Friedman
Date: 2021-07-31T14:09:59-07:00
New Revision: bdd55b2f1810eb5a2474a36229d08a9e5ca870fc

URL: https://github.com/llvm/llvm-project/commit/bdd55b2f1810eb5a2474a36229d08a9e5ca870fc
DIFF: https://github.com/llvm/llvm-project/commit/bdd55b2f1810eb5a2474a36229d08a9e5ca870fc.diff

LOG: Fix the default alignment of i1 vectors.

Currently, the default alignment is much larger than the actual size of
the vector in memory.  Fix this to use a sane default.

For SVE, temporarily remove lowering of load/store operations for
predicates with less than 16 elements. The layout the backend was
assuming for SVE predicates with less than 16 elements doesn't agree
with the frontend. More work probably needs to be done here.

This change is, strictly speaking, not backwards-compatible at the
bitcode level. But probably nobody is actually depending on that; i1
vectors in memory are rare, and the code that does use them probably
ends up forcing the alignment to something sane anyway.  If we think
this is a concern, I can restrict this to scalable vectors for now
(where it's actually causing issues for me at the moment).

Differential Revision: https://reviews.llvm.org/D88994

Added: 
    

Modified: 
    clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c
    clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
    clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
    clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c
    clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c
    clang/test/CodeGen/builtins-ppc-pair-mma.c
    llvm/lib/IR/DataLayout.cpp
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll
    llvm/test/CodeGen/AArch64/spillfill-sve.ll
    llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
    llvm/test/CodeGen/AMDGPU/lower-kernargs.ll
    llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
    llvm/test/CodeGen/NVPTX/param-load-store.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
    llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
    llvm/test/CodeGen/Thumb2/mve-masked-load.ll
    llvm/test/CodeGen/Thumb2/mve-masked-store.ll
    llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll
    llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
    llvm/test/CodeGen/X86/avx512-mask-op.ll
    llvm/test/CodeGen/X86/avx512-select.ll
    llvm/test/CodeGen/X86/bitcast-vector-bool.ll
    llvm/test/CodeGen/X86/load-local-v3i129.ll
    llvm/test/CodeGen/X86/pr41619.ll
    llvm/test/CodeGen/X86/vector-sext.ll
    llvm/test/Transforms/InstCombine/abs-intrinsic.ll
    llvm/test/Transforms/InstCombine/icmp-vec.ll
    llvm/test/Transforms/InstCombine/select-min-max.ll
    llvm/test/Transforms/InstCombine/shufflevec-bitcast.ll
    llvm/test/Transforms/SROA/vector-promotion-different-size.ll
    llvm/test/Transforms/VectorCombine/load-insert-store.ll

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c
index 278cc930610bd..57ff21cb86d8f 100644
--- a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c
+++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c
@@ -191,32 +191,32 @@ void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) {
 
 // CHECK-128-LABEL: @read_bool(
 // CHECK-128-NEXT:  entry:
-// CHECK-128-NEXT:    [[SAVED_VALUE:%.*]] = alloca <2 x i8>, align 16
+// CHECK-128-NEXT:    [[SAVED_VALUE:%.*]] = alloca <2 x i8>, align 2
 // CHECK-128-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-128-NEXT:    [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
-// CHECK-128-NEXT:    store <2 x i8> [[TMP0]], <2 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]]
+// CHECK-128-NEXT:    store <2 x i8> [[TMP0]], <2 x i8>* [[SAVED_VALUE]], align 2, !tbaa [[TBAA6]]
 // CHECK-128-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <2 x i8>* [[SAVED_VALUE]] to <vscale x 16 x i1>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 2, !tbaa [[TBAA6]]
 // CHECK-128-NEXT:    ret <vscale x 16 x i1> [[TMP1]]
 //
 // CHECK-256-LABEL: @read_bool(
 // CHECK-256-NEXT:  entry:
-// CHECK-256-NEXT:    [[SAVED_VALUE:%.*]] = alloca <4 x i8>, align 16
+// CHECK-256-NEXT:    [[SAVED_VALUE:%.*]] = alloca <4 x i8>, align 4
 // CHECK-256-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-256-NEXT:    [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
-// CHECK-256-NEXT:    store <4 x i8> [[TMP0]], <4 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]]
+// CHECK-256-NEXT:    store <4 x i8> [[TMP0]], <4 x i8>* [[SAVED_VALUE]], align 4, !tbaa [[TBAA6]]
 // CHECK-256-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <4 x i8>* [[SAVED_VALUE]] to <vscale x 16 x i1>*
-// CHECK-256-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-256-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 4, !tbaa [[TBAA6]]
 // CHECK-256-NEXT:    ret <vscale x 16 x i1> [[TMP1]]
 //
 // CHECK-512-LABEL: @read_bool(
 // CHECK-512-NEXT:  entry:
-// CHECK-512-NEXT:    [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16
+// CHECK-512-NEXT:    [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8
 // CHECK-512-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-512-NEXT:    [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
-// CHECK-512-NEXT:    store <8 x i8> [[TMP0]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]]
+// CHECK-512-NEXT:    store <8 x i8> [[TMP0]], <8 x i8>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA6]]
 // CHECK-512-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to <vscale x 16 x i1>*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]]
 // CHECK-512-NEXT:    ret <vscale x 16 x i1> [[TMP1]]
 //
 svbool_t read_bool(struct struct_bool *s) {
@@ -225,30 +225,30 @@ svbool_t read_bool(struct struct_bool *s) {
 
 // CHECK-128-LABEL: @write_bool(
 // CHECK-128-NEXT:  entry:
-// CHECK-128-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-128-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-128-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CHECK-128-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 2, !tbaa [[TBAA9:![0-9]+]]
 // CHECK-128-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_VALUE]] to <2 x i8>*
-// CHECK-128-NEXT:    [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-128-NEXT:    [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[CASTFIXEDSVE]], align 2, !tbaa [[TBAA6]]
 // CHECK-128-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-128-NEXT:    store <2 x i8> [[TMP0]], <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
 // CHECK-128-NEXT:    ret void
 //
 // CHECK-256-LABEL: @write_bool(
 // CHECK-256-NEXT:  entry:
-// CHECK-256-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-256-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-256-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 4
+// CHECK-256-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 4, !tbaa [[TBAA9:![0-9]+]]
 // CHECK-256-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_VALUE]] to <4 x i8>*
-// CHECK-256-NEXT:    [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-256-NEXT:    [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[CASTFIXEDSVE]], align 4, !tbaa [[TBAA6]]
 // CHECK-256-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-256-NEXT:    store <4 x i8> [[TMP0]], <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
 // CHECK-256-NEXT:    ret void
 //
 // CHECK-512-LABEL: @write_bool(
 // CHECK-512-NEXT:  entry:
-// CHECK-512-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-512-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-512-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 8
+// CHECK-512-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA9:![0-9]+]]
 // CHECK-512-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_VALUE]] to <8 x i8>*
-// CHECK-512-NEXT:    [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-512-NEXT:    [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]]
 // CHECK-512-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-512-NEXT:    store <8 x i8> [[TMP0]], <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
 // CHECK-512-NEXT:    ret void

diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
index 136647cada3ad..4f510d35ad127 100644
--- a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
+++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
@@ -77,31 +77,31 @@ fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_
 
 // CHECK-LABEL: @call_bool_ff(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[OP1:%.*]] = alloca <8 x i8>, align 16
-// CHECK-NEXT:    [[OP2:%.*]] = alloca <8 x i8>, align 16
-// CHECK-NEXT:    [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16
-// CHECK-NEXT:    [[SAVED_VALUE3:%.*]] = alloca <8 x i8>, align 16
-// CHECK-NEXT:    [[SAVED_VALUE5:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
+// CHECK-NEXT:    [[OP1:%.*]] = alloca <8 x i8>, align 8
+// CHECK-NEXT:    [[OP2:%.*]] = alloca <8 x i8>, align 8
+// CHECK-NEXT:    [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8
+// CHECK-NEXT:    [[SAVED_VALUE3:%.*]] = alloca <8 x i8>, align 8
+// CHECK-NEXT:    [[SAVED_VALUE5:%.*]] = alloca <vscale x 16 x i1>, align 8
+// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
-// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 8
+// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 8, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[OP2]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    store <vscale x 16 x i1> [[OP2_COERCE:%.*]], <vscale x 16 x i1>* [[TMP1]], align 16
-// CHECK-NEXT:    [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, !tbaa [[TBAA6]]
-// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <vscale x 16 x i1> [[OP2_COERCE:%.*]], <vscale x 16 x i1>* [[TMP1]], align 8
+// CHECK-NEXT:    [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 8, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
-// CHECK-NEXT:    store <8 x i8> [[OP22]], <8 x i8>* [[SAVED_VALUE3]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <8 x i8> [[OP22]], <8 x i8>* [[SAVED_VALUE3]], align 8, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[CASTFIXEDSVE4:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE3]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE4]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE4]], align 8, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP3]])
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1>* [[SAVED_VALUE5]], align 16, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1>* [[SAVED_VALUE5]], align 8, !tbaa [[TBAA9:![0-9]+]]
 // CHECK-NEXT:    [[CASTFIXEDSVE6:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_VALUE5]] to <8 x i8>*
-// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE6]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE6]], align 8, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
-// CHECK-NEXT:    store <8 x i8> [[TMP5]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
-// CHECK-NEXT:    [[TMP6:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
+// CHECK-NEXT:    store <8 x i8> [[TMP5]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 8
+// CHECK-NEXT:    [[TMP6:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 8
 // CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP6]]
 //
 fixed_bool_t call_bool_ff(svbool_t pg, fixed_bool_t op1, fixed_bool_t op2) {
@@ -134,23 +134,23 @@ fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op
 
 // CHECK-LABEL: @call_bool_fs(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[OP1:%.*]] = alloca <8 x i8>, align 16
-// CHECK-NEXT:    [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16
-// CHECK-NEXT:    [[SAVED_VALUE2:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
+// CHECK-NEXT:    [[OP1:%.*]] = alloca <8 x i8>, align 8
+// CHECK-NEXT:    [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8
+// CHECK-NEXT:    [[SAVED_VALUE2:%.*]] = alloca <vscale x 16 x i1>, align 8
+// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
-// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, !tbaa [[TBAA6]]
-// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 8
+// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 8, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP1]], <vscale x 16 x i1> [[OP2:%.*]])
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1>* [[SAVED_VALUE2]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1>* [[SAVED_VALUE2]], align 8, !tbaa [[TBAA9]]
 // CHECK-NEXT:    [[CASTFIXEDSVE3:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_VALUE2]] to <8 x i8>*
-// CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE3]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE3]], align 8, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
-// CHECK-NEXT:    store <8 x i8> [[TMP3]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
-// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
+// CHECK-NEXT:    store <8 x i8> [[TMP3]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 8
 // CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP4]]
 //
 fixed_bool_t call_bool_fs(svbool_t pg, fixed_bool_t op1, svbool_t op2) {
@@ -183,15 +183,15 @@ fixed_float64_t call_float64_ss(svbool_t pg, svfloat64_t op1, svfloat64_t op2) {
 
 // CHECK-LABEL: @call_bool_ss(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
+// CHECK-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 8
+// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]])
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA9]]
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA9]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_VALUE]] to <8 x i8>*
-// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
-// CHECK-NEXT:    store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
+// CHECK-NEXT:    store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 8
 // CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP2]]
 //
 fixed_bool_t call_bool_ss(svbool_t pg, svbool_t op1, svbool_t op2) {

diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
index 6dc87aa37af8d..04c85a53a42e8 100644
--- a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
+++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
@@ -45,15 +45,15 @@ fixed_float64_t from_svfloat64_t(svfloat64_t type) {
 
 // CHECK-LABEL: @to_svbool_t(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TYPE:%.*]] = alloca <8 x i8>, align 16
-// CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <8 x i8>, align 16
+// CHECK-NEXT:    [[TYPE:%.*]] = alloca <8 x i8>, align 8
+// CHECK-NEXT:    [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[TYPE]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
-// CHECK-NEXT:    [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 16, !tbaa [[TBAA6:![0-9]+]]
-// CHECK-NEXT:    store <8 x i8> [[TYPE1]], <8 x i8>* [[TYPE_ADDR]], align 16, !tbaa [[TBAA6]]
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, !tbaa [[TBAA6]]
-// CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP2]]
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 8
+// CHECK-NEXT:    [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 8, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-NEXT:    store <8 x i8> [[TYPE1]], <8 x i8>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to <vscale x 16 x i1>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP1]]
 //
 svbool_t to_svbool_t(fixed_bool_t type) {
   return type;
@@ -61,15 +61,15 @@ svbool_t to_svbool_t(fixed_bool_t type) {
 
 // CHECK-LABEL: @from_svbool_t(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE:%.*]], <vscale x 16 x i1>* [[TYPE_ADDR]], align 16, !tbaa [[TBAA9:![0-9]+]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i1>* [[TYPE_ADDR]] to <8 x i8>*
-// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 8
+// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 8
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE:%.*]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_VALUE]] to <8 x i8>*
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
-// CHECK-NEXT:    store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
-// CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP2]]
+// CHECK-NEXT:    store <8 x i8> [[TMP0]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 8
+// CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP1]]
 //
 fixed_bool_t from_svbool_t(svbool_t type) {
   return type;

diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c
index c27651f55a188..457acac6199a9 100644
--- a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c
+++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c
@@ -92,7 +92,7 @@ fixed_int32_t array_arg(fixed_int32_t arr[]) {
 // CHECK-NEXT:    [[RETVAL:%.*]] = alloca <8 x i8>, align 2
 // CHECK-NEXT:    [[ARR:%.*]] = alloca [3 x <8 x i8>], align 2
 // CHECK-NEXT:    [[PARR:%.*]] = alloca <8 x i8>*, align 8
-// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
+// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 2
 // CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], [3 x <8 x i8>]* [[ARR]], i64 0, i64 0
 // CHECK-NEXT:    store <8 x i8>* [[ARRAYIDX]], <8 x i8>** [[PARR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>*, <8 x i8>** [[PARR]], align 8
@@ -100,8 +100,8 @@ fixed_int32_t array_arg(fixed_int32_t arr[]) {
 // CHECK-NEXT:    store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL]], align 2
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to i8*
 // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i8>* [[RETVAL]] to i8*
-// CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP2]], i8* align 2 [[TMP3]], i64 8, i1 false)
-// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
+// CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[TMP2]], i8* align 2 [[TMP3]], i64 8, i1 false)
+// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 2
 // CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP4]]
 //
 fixed_bool_t address_of_array_idx() {

diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c
index 0171c763ef512..ba6e60f43f449 100644
--- a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c
+++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c
@@ -49,19 +49,19 @@ void write_global_bf16(svbfloat16_t v) { global_bf16 = v; }
 
 // CHECK-128-LABEL: @write_global_bool(
 // CHECK-128-NEXT:  entry:
-// CHECK-128-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-128-NEXT:    store <vscale x 16 x i1> [[V:%.*]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-128-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CHECK-128-NEXT:    store <vscale x 16 x i1> [[V:%.*]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 2, !tbaa [[TBAA9:![0-9]+]]
 // CHECK-128-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_VALUE]] to <2 x i8>*
-// CHECK-128-NEXT:    [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-128-NEXT:    [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[CASTFIXEDSVE]], align 2, !tbaa [[TBAA6]]
 // CHECK-128-NEXT:    store <2 x i8> [[TMP0]], <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6]]
 // CHECK-128-NEXT:    ret void
 //
 // CHECK-512-LABEL: @write_global_bool(
 // CHECK-512-NEXT:  entry:
-// CHECK-512-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-512-NEXT:    store <vscale x 16 x i1> [[V:%.*]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-512-NEXT:    [[SAVED_VALUE:%.*]] = alloca <vscale x 16 x i1>, align 8
+// CHECK-512-NEXT:    store <vscale x 16 x i1> [[V:%.*]], <vscale x 16 x i1>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA9:![0-9]+]]
 // CHECK-512-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_VALUE]] to <8 x i8>*
-// CHECK-512-NEXT:    [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-512-NEXT:    [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]]
 // CHECK-512-NEXT:    store <8 x i8> [[TMP0]], <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]]
 // CHECK-512-NEXT:    ret void
 //
@@ -101,20 +101,20 @@ svbfloat16_t read_global_bf16() { return global_bf16; }
 
 // CHECK-128-LABEL: @read_global_bool(
 // CHECK-128-NEXT:  entry:
-// CHECK-128-NEXT:    [[SAVED_VALUE:%.*]] = alloca <2 x i8>, align 16
+// CHECK-128-NEXT:    [[SAVED_VALUE:%.*]] = alloca <2 x i8>, align 2
 // CHECK-128-NEXT:    [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6]]
-// CHECK-128-NEXT:    store <2 x i8> [[TMP0]], <2 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]]
+// CHECK-128-NEXT:    store <2 x i8> [[TMP0]], <2 x i8>* [[SAVED_VALUE]], align 2, !tbaa [[TBAA6]]
 // CHECK-128-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <2 x i8>* [[SAVED_VALUE]] to <vscale x 16 x i1>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 2, !tbaa [[TBAA6]]
 // CHECK-128-NEXT:    ret <vscale x 16 x i1> [[TMP1]]
 //
 // CHECK-512-LABEL: @read_global_bool(
 // CHECK-512-NEXT:  entry:
-// CHECK-512-NEXT:    [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16
+// CHECK-512-NEXT:    [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8
 // CHECK-512-NEXT:    [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]]
-// CHECK-512-NEXT:    store <8 x i8> [[TMP0]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]]
+// CHECK-512-NEXT:    store <8 x i8> [[TMP0]], <8 x i8>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA6]]
 // CHECK-512-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to <vscale x 16 x i1>*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]]
 // CHECK-512-NEXT:    ret <vscale x 16 x i1> [[TMP1]]
 //
 svbool_t read_global_bool() { return global_bool; }

diff  --git a/clang/test/CodeGen/builtins-ppc-pair-mma.c b/clang/test/CodeGen/builtins-ppc-pair-mma.c
index 472da0393b516..0bbf91090e52b 100644
--- a/clang/test/CodeGen/builtins-ppc-pair-mma.c
+++ b/clang/test/CodeGen/builtins-ppc-pair-mma.c
@@ -1112,10 +1112,10 @@ void test70(const __vector_pair *vpp, const __vector_pair *vp2) {
 
 // CHECK-LABEL: @test71(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VPP:%.*]], i64 128
+// CHECK-NEXT:    [[TMP0:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VPP:%.*]], i64 1024
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <256 x i1>* [[TMP0]] to i8*
 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* [[TMP1]])
-// CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VP2:%.*]], i64 128
+// CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VP2:%.*]], i64 1024
 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <256 x i1>* [[TMP3]] to i8*
 // CHECK-NEXT:    tail call void @llvm.ppc.vsx.stxvp(<256 x i1> [[TMP2]], i8* [[TMP4]])
 // CHECK-NEXT:    ret void
@@ -1304,10 +1304,10 @@ void test82(const __vector_pair *vpp, const __vector_pair *vp2) {
 
 // CHECK-LABEL: @test83(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VPP:%.*]], i64 128
+// CHECK-NEXT:    [[TMP0:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VPP:%.*]], i64 1024
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <256 x i1>* [[TMP0]] to i8*
 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* [[TMP1]])
-// CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VP2:%.*]], i64 128
+// CHECK-NEXT:    [[TMP3:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VP2:%.*]], i64 1024
 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <256 x i1>* [[TMP3]] to i8*
 // CHECK-NEXT:    tail call void @llvm.ppc.vsx.stxvp(<256 x i1> [[TMP2]], i8* [[TMP4]])
 // CHECK-NEXT:    ret void

diff  --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp
index ecd74449dc380..d65340c2fd9c7 100644
--- a/llvm/lib/IR/DataLayout.cpp
+++ b/llvm/lib/IR/DataLayout.cpp
@@ -800,15 +800,11 @@ Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
 
     // By default, use natural alignment for vector types. This is consistent
     // with what clang and llvm-gcc do.
-    // TODO: This should probably not be using the alloc size.
-    unsigned Alignment =
-        getTypeAllocSize(cast<VectorType>(Ty)->getElementType());
+    //
     // We're only calculating a natural alignment, so it doesn't have to be
     // based on the full size for scalable vectors. Using the minimum element
     // count should be enough here.
-    Alignment *= cast<VectorType>(Ty)->getElementCount().getKnownMinValue();
-    Alignment = PowerOf2Ceil(Alignment);
-    return Align(Alignment);
+    return Align(PowerOf2Ceil(getTypeStoreSize(Ty).getKnownMinSize()));
   }
   case Type::X86_AMXTyID:
     return Align(64);

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 93b02b2d692ec..96ad0717d4836 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -2120,9 +2120,6 @@ let Predicates = [HasSVEorStreamingSVE] in {
   }
 
   defm Pat_Store_P16 : unpred_store_predicate<nxv16i1, STR_PXI>;
-  defm Pat_Store_P8  : unpred_store_predicate<nxv8i1, STR_PXI>;
-  defm Pat_Store_P4  : unpred_store_predicate<nxv4i1, STR_PXI>;
-  defm Pat_Store_P2  : unpred_store_predicate<nxv2i1, STR_PXI>;
 
   multiclass unpred_load_predicate<ValueType Ty, Instruction Load> {
     def _fi : Pat<(Ty (load (am_sve_fi GPR64sp:$base, simm9:$offset))),
@@ -2133,9 +2130,6 @@ let Predicates = [HasSVEorStreamingSVE] in {
   }
 
   defm Pat_Load_P16 : unpred_load_predicate<nxv16i1, LDR_PXI>;
-  defm Pat_Load_P8  : unpred_load_predicate<nxv8i1, LDR_PXI>;
-  defm Pat_Load_P4  : unpred_load_predicate<nxv4i1, LDR_PXI>;
-  defm Pat_Load_P2  : unpred_load_predicate<nxv2i1, LDR_PXI>;
 
   multiclass ld1<Instruction RegRegInst, Instruction RegImmInst, ValueType Ty,
                  SDPatternOperator Load, ValueType PredTy, ValueType MemVT, ComplexPattern AddrCP> {

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll b/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll
index c83d3b34298b7..28bca50a7de86 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll
@@ -7,7 +7,7 @@ define <4 x i1> @ret_v4i1(<4 x i1> *%v) {
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
   ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-  ; CHECK:   [[LOAD:%[0-9]+]]:_(<4 x s1>) = G_LOAD [[COPY]](p0) :: (load (<4 x s1>) from %ir.v, align 4)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<4 x s1>) = G_LOAD [[COPY]](p0) :: (load (<4 x s1>) from %ir.v)
   ; CHECK:   [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[LOAD]](<4 x s1>)
   ; CHECK:   $d0 = COPY [[ANYEXT]](<4 x s16>)
   ; CHECK:   RET_ReallyLR implicit $d0

diff  --git a/llvm/test/CodeGen/AArch64/spillfill-sve.ll b/llvm/test/CodeGen/AArch64/spillfill-sve.ll
index a703c510952f6..135d6f644fc3d 100644
--- a/llvm/test/CodeGen/AArch64/spillfill-sve.ll
+++ b/llvm/test/CodeGen/AArch64/spillfill-sve.ll
@@ -404,8 +404,8 @@ define void @spill_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1)
 
 define void @fill_nxv16i1() {
 ; CHECK-LABEL: fill_nxv16i1
-; CHECK-DAG: ldr    p{{[01]}}, [sp, #8, mul vl]
-; CHECK-DAG: ldr    p{{[01]}}, [sp]
+; CHECK-DAG: ldr    p{{[01]}}, [sp, #7, mul vl]
+; CHECK-DAG: ldr    p{{[01]}}, [sp, #6, mul vl]
   %local0 = alloca <vscale x 16 x i1>
   %local1 = alloca <vscale x 16 x i1>
   load volatile <vscale x 16 x i1>, <vscale x 16 x i1>* %local0
@@ -413,81 +413,15 @@ define void @fill_nxv16i1() {
   ret void
 }
 
-define void @fill_nxv8i1() {
-; CHECK-LABEL: fill_nxv8i1
-; CHECK-DAG: ldr    p{{[01]}}, [sp, #4, mul vl]
-; CHECK-DAG: ldr    p{{[01]}}, [sp]
-  %local0 = alloca <vscale x 8 x i1>
-  %local1 = alloca <vscale x 8 x i1>
-  load volatile <vscale x 8 x i1>, <vscale x 8 x i1>* %local0
-  load volatile <vscale x 8 x i1>, <vscale x 8 x i1>* %local1
-  ret void
-}
-
-define void @fill_nxv4i1() {
-; CHECK-LABEL: fill_nxv4i1
-; CHECK-DAG: ldr    p{{[01]}}, [sp, #6, mul vl]
-; CHECK-DAG: ldr    p{{[01]}}, [sp, #4, mul vl]
-  %local0 = alloca <vscale x 4 x i1>
-  %local1 = alloca <vscale x 4 x i1>
-  load volatile <vscale x 4 x i1>, <vscale x 4 x i1>* %local0
-  load volatile <vscale x 4 x i1>, <vscale x 4 x i1>* %local1
-  ret void
-}
-
-define void @fill_nxv2i1() {
-; CHECK-LABEL: fill_nxv2i1
-; CHECK-DAG: ldr    p{{[01]}}, [sp, #7, mul vl]
-; CHECK-DAG: ldr    p{{[01]}}, [sp, #6, mul vl]
-  %local0 = alloca <vscale x 2 x i1>
-  %local1 = alloca <vscale x 2 x i1>
-  load volatile <vscale x 2 x i1>, <vscale x 2 x i1>* %local0
-  load volatile <vscale x 2 x i1>, <vscale x 2 x i1>* %local1
-  ret void
-}
-
 ; Predicate spills
 
 define void @spill_nxv16i1(<vscale x 16 x i1> %v0, <vscale x 16 x i1> %v1) {
 ; CHECK-LABEL: spill_nxv16i1
-; CHECK-DAG: str    p{{[01]}}, [sp, #8, mul vl]
-; CHECK-DAG: str    p{{[01]}}, [sp]
+; CHECK-DAG: str    p{{[01]}}, [sp, #7, mul vl]
+; CHECK-DAG: str    p{{[01]}}, [sp, #6, mul vl]
   %local0 = alloca <vscale x 16 x i1>
   %local1 = alloca <vscale x 16 x i1>
   store volatile <vscale x 16 x i1> %v0, <vscale x 16 x i1>* %local0
   store volatile <vscale x 16 x i1> %v1, <vscale x 16 x i1>* %local1
   ret void
 }
-
-define void @spill_nxv8i1(<vscale x 8 x i1> %v0, <vscale x 8 x i1> %v1) {
-; CHECK-LABEL: spill_nxv8i1
-; CHECK-DAG: str    p{{[01]}}, [sp, #4, mul vl]
-; CHECK-DAG: str    p{{[01]}}, [sp]
-  %local0 = alloca <vscale x 8 x i1>
-  %local1 = alloca <vscale x 8 x i1>
-  store volatile <vscale x 8 x i1> %v0, <vscale x 8 x i1>* %local0
-  store volatile <vscale x 8 x i1> %v1, <vscale x 8 x i1>* %local1
-  ret void
-}
-
-define void @spill_nxv4i1(<vscale x 4 x i1> %v0, <vscale x 4 x i1> %v1) {
-; CHECK-LABEL: spill_nxv4i1
-; CHECK-DAG: str    p{{[01]}}, [sp, #6, mul vl]
-; CHECK-DAG: str    p{{[01]}}, [sp, #4, mul vl]
-  %local0 = alloca <vscale x 4 x i1>
-  %local1 = alloca <vscale x 4 x i1>
-  store volatile <vscale x 4 x i1> %v0, <vscale x 4 x i1>* %local0
-  store volatile <vscale x 4 x i1> %v1, <vscale x 4 x i1>* %local1
-  ret void
-}
-
-define void @spill_nxv2i1(<vscale x 2 x i1> %v0, <vscale x 2 x i1> %v1) {
-; CHECK-LABEL: spill_nxv2i1
-; CHECK-DAG: str    p{{[01]}}, [sp, #7, mul vl]
-; CHECK-DAG: str    p{{[01]}}, [sp, #6, mul vl]
-  %local0 = alloca <vscale x 2 x i1>
-  %local1 = alloca <vscale x 2 x i1>
-  store volatile <vscale x 2 x i1> %v0, <vscale x 2 x i1>* %local0
-  store volatile <vscale x 2 x i1> %v1, <vscale x 2 x i1>* %local1
-  ret void
-}

diff  --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
index b8605e7ced927..8cb8b1c92fa7e 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
@@ -43,22 +43,22 @@ define aarch64_sve_vector_pcs <vscale x 4 x i32> @caller_with_many_sve_arg(<vsca
 ;      p3 =  %p3
 ;      x0 = &%p4
 ;      x1 = &%p5
-define aarch64_sve_vector_pcs <vscale x 4 x i1> @callee_with_many_svepred_arg(<vscale x 4 x i1> %p0, <vscale x 4 x i1> %p1, <vscale x 4 x i1> %p2, <vscale x 4 x i1> %p3, <vscale x 4 x i1> %p4, <vscale x 4 x i1> %p5) {
+define aarch64_sve_vector_pcs <vscale x 16 x i1> @callee_with_many_svepred_arg(<vscale x 16 x i1> %p0, <vscale x 16 x i1> %p1, <vscale x 16 x i1> %p2, <vscale x 16 x i1> %p3, <vscale x 16 x i1> %p4, <vscale x 16 x i1> %p5) {
 ; CHECK: name: callee_with_many_svepred_arg
 ; CHECK-DAG: [[BASE:%[0-9]+]]:gpr64common = COPY $x1
 ; CHECK-DAG: [[RES:%[0-9]+]]:ppr = LDR_PXI [[BASE]], 0
 ; CHECK-DAG: $p0 = COPY [[RES]]
 ; CHECK:     RET_ReallyLR implicit $p0
-  ret <vscale x 4 x i1> %p5
+  ret <vscale x 16 x i1> %p5
 }
 
 ; Test that p4 and p5 are passed by reference.
-define aarch64_sve_vector_pcs <vscale x 4 x i1> @caller_with_many_svepred_arg(<vscale x 4 x i1> %p) {
+define aarch64_sve_vector_pcs <vscale x 16 x i1> @caller_with_many_svepred_arg(<vscale x 16 x i1> %p) {
 ; CHECK: name: caller_with_many_svepred_arg
 ; CHECK: stack:
-; CHECK:      - { id: 0, name: '', type: default, offset: 0, size: 1, alignment: 4,
+; CHECK:      - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2,
 ; CHECK-NEXT:     stack-id: scalable-vector
-; CHECK:      - { id: 1, name: '', type: default, offset: 0, size: 1, alignment: 4,
+; CHECK:      - { id: 1, name: '', type: default, offset: 0, size: 2, alignment: 2,
 ; CHECK-NEXT:     stack-id: scalable-vector
 ; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.0, 0
 ; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.1, 0
@@ -68,8 +68,8 @@ define aarch64_sve_vector_pcs <vscale x 4 x i1> @caller_with_many_svepred_arg(<v
 ; CHECK-DAG: $x1 = COPY [[BASE2]]
 ; CHECK-NEXT: BL @callee_with_many_svepred_arg
 ; CHECK:     RET_ReallyLR implicit $p0
-  %ret = call aarch64_sve_vector_pcs <vscale x 4 x i1> @callee_with_many_svepred_arg(<vscale x 4 x i1> %p, <vscale x 4 x i1> %p, <vscale x 4 x i1> %p, <vscale x 4 x i1> %p, <vscale x 4 x i1> %p, <vscale x 4 x i1> %p)
-  ret <vscale x 4 x i1> %ret
+  %ret = call aarch64_sve_vector_pcs <vscale x 16 x i1> @callee_with_many_svepred_arg(<vscale x 16 x i1> %p, <vscale x 16 x i1> %p, <vscale x 16 x i1> %p, <vscale x 16 x i1> %p, <vscale x 16 x i1> %p, <vscale x 16 x i1> %p)
+  ret <vscale x 16 x i1> %ret
 }
 
 ; Test that z8 and z9, passed by reference, are loaded from a location that is passed on the stack.

diff  --git a/llvm/test/CodeGen/AMDGPU/lower-kernargs.ll b/llvm/test/CodeGen/AMDGPU/lower-kernargs.ll
index 3d5b61802cf73..0f01b536d6fef 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-kernargs.ll
@@ -913,33 +913,35 @@ define amdgpu_kernel void @kern_realign_i1_i1_i1_i1(i1 %arg0, i1 %arg1, i1 %arg2
 
 define amdgpu_kernel void @kern_realign_i1_v3i1(i1 %arg0, <3 x i1> %arg1) #0 {
 ; HSA-LABEL: @kern_realign_i1_v3i1(
-; HSA-NEXT:    [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(8) i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
+; HSA-NEXT:    [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(4) i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
 ; HSA-NEXT:    [[ARG0_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 0
 ; HSA-NEXT:    [[ARG0_KERNARG_OFFSET_ALIGN_DOWN_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG0_KERNARG_OFFSET_ALIGN_DOWN]] to i32 addrspace(4)*
 ; HSA-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(4)* [[ARG0_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 16, !invariant.load !0
 ; HSA-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i1
-; HSA-NEXT:    [[ARG1_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 4
+; HSA-NEXT:    [[ARG1_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 0
 ; HSA-NEXT:    [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN]] to i32 addrspace(4)*
-; HSA-NEXT:    [[TMP3:%.*]] = load i32, i32 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 4, !invariant.load !0
-; HSA-NEXT:    [[TMP4:%.*]] = trunc i32 [[TMP3]] to i3
-; HSA-NEXT:    [[ARG1_LOAD:%.*]] = bitcast i3 [[TMP4]] to <3 x i1>
+; HSA-NEXT:    [[TMP3:%.*]] = load i32, i32 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 16, !invariant.load !0
+; HSA-NEXT:    [[TMP4:%.*]] = lshr i32 [[TMP3]], 8
+; HSA-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i3
+; HSA-NEXT:    [[ARG1_LOAD:%.*]] = bitcast i3 [[TMP5]] to <3 x i1>
 ; HSA-NEXT:    store volatile i1 [[TMP2]], i1 addrspace(1)* undef, align 1
-; HSA-NEXT:    store volatile <3 x i1> [[ARG1_LOAD]], <3 x i1> addrspace(1)* undef, align 4
+; HSA-NEXT:    store volatile <3 x i1> [[ARG1_LOAD]], <3 x i1> addrspace(1)* undef, align 1
 ; HSA-NEXT:    ret void
 ;
 ; MESA-LABEL: @kern_realign_i1_v3i1(
-; MESA-NEXT:    [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(44) i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
+; MESA-NEXT:    [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(40) i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
 ; MESA-NEXT:    [[ARG0_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 36
 ; MESA-NEXT:    [[ARG0_KERNARG_OFFSET_ALIGN_DOWN_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG0_KERNARG_OFFSET_ALIGN_DOWN]] to i32 addrspace(4)*
 ; MESA-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(4)* [[ARG0_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 4, !invariant.load !0
 ; MESA-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i1
-; MESA-NEXT:    [[ARG1_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 40
+; MESA-NEXT:    [[ARG1_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 36
 ; MESA-NEXT:    [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN]] to i32 addrspace(4)*
-; MESA-NEXT:    [[TMP3:%.*]] = load i32, i32 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 8, !invariant.load !0
-; MESA-NEXT:    [[TMP4:%.*]] = trunc i32 [[TMP3]] to i3
-; MESA-NEXT:    [[ARG1_LOAD:%.*]] = bitcast i3 [[TMP4]] to <3 x i1>
+; MESA-NEXT:    [[TMP3:%.*]] = load i32, i32 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 4, !invariant.load !0
+; MESA-NEXT:    [[TMP4:%.*]] = lshr i32 [[TMP3]], 8
+; MESA-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i3
+; MESA-NEXT:    [[ARG1_LOAD:%.*]] = bitcast i3 [[TMP5]] to <3 x i1>
 ; MESA-NEXT:    store volatile i1 [[TMP2]], i1 addrspace(1)* undef, align 1
-; MESA-NEXT:    store volatile <3 x i1> [[ARG1_LOAD]], <3 x i1> addrspace(1)* undef, align 4
+; MESA-NEXT:    store volatile <3 x i1> [[ARG1_LOAD]], <3 x i1> addrspace(1)* undef, align 1
 ; MESA-NEXT:    ret void
 ;
   store volatile i1 %arg0, i1 addrspace(1)* undef

diff  --git a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
index 4ca11358c346d..e981d5efe9880 100644
--- a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
@@ -479,8 +479,9 @@ define <2 x half> @test_select_cc_f16_f32(<2 x half> %a, <2 x half> %b,
 ; CHECK-NOF16-DAG:  setp.neu.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.neu.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8  [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8  [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_une(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp une <2 x half> %a, %b
@@ -500,8 +501,9 @@ define <2 x i1> @test_fcmp_une(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.equ.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.equ.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_ueq(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp ueq <2 x half> %a, %b
@@ -521,8 +523,9 @@ define <2 x i1> @test_fcmp_ueq(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.gtu.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.gtu.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_ugt(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp ugt <2 x half> %a, %b
@@ -542,8 +545,9 @@ define <2 x i1> @test_fcmp_ugt(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.geu.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.geu.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_uge(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp uge <2 x half> %a, %b
@@ -563,8 +567,9 @@ define <2 x i1> @test_fcmp_uge(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.ltu.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.ltu.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_ult(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp ult <2 x half> %a, %b
@@ -584,8 +589,9 @@ define <2 x i1> @test_fcmp_ult(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.leu.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.leu.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_ule(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp ule <2 x half> %a, %b
@@ -606,8 +612,9 @@ define <2 x i1> @test_fcmp_ule(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.nan.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.nan.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_uno(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp uno <2 x half> %a, %b
@@ -627,8 +634,9 @@ define <2 x i1> @test_fcmp_uno(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.ne.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.ne.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_one(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp one <2 x half> %a, %b
@@ -648,8 +656,9 @@ define <2 x i1> @test_fcmp_one(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.eq.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.eq.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_oeq(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp oeq <2 x half> %a, %b
@@ -669,8 +678,9 @@ define <2 x i1> @test_fcmp_oeq(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.gt.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.gt.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_ogt(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp ogt <2 x half> %a, %b
@@ -690,8 +700,9 @@ define <2 x i1> @test_fcmp_ogt(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.ge.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.ge.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_oge(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp oge <2 x half> %a, %b
@@ -711,8 +722,9 @@ define <2 x i1> @test_fcmp_oge(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.lt.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.lt.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_olt(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp olt <2 x half> %a, %b
@@ -732,8 +744,9 @@ define <2 x i1> @test_fcmp_olt(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.le.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.le.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_ole(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp ole <2 x half> %a, %b
@@ -753,8 +766,9 @@ define <2 x i1> @test_fcmp_ole(<2 x half> %a, <2 x half> %b) #0 {
 ; CHECK-NOF16-DAG:  setp.num.f32   [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
 ; CHECK-NOF16-DAG:  setp.num.f32   [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
 ; CHECK-DAG:  selp.u16        [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-NEXT: st.param.b8     [func_retval0+0], [[R0]];
 ; CHECK-DAG:  selp.u16        [[R1:%rs[0-9]+]], -1, 0, [[P1]];
-; CHECK-NEXT: st.param.v2.b8  [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: st.param.b8     [func_retval0+1], [[R1]];
 ; CHECK-NEXT: ret;
 define <2 x i1> @test_fcmp_ord(<2 x half> %a, <2 x half> %b) #0 {
   %r = fcmp ord <2 x half> %a, %b

diff  --git a/llvm/test/CodeGen/NVPTX/param-load-store.ll b/llvm/test/CodeGen/NVPTX/param-load-store.ll
index 6f6b8c2eabc05..099a26afb940b 100644
--- a/llvm/test/CodeGen/NVPTX/param-load-store.ll
+++ b/llvm/test/CodeGen/NVPTX/param-load-store.ll
@@ -66,20 +66,20 @@ define signext i1 @test_i1s(i1 signext %a) {
 }
 
 ; Make sure that i1 loads are vectorized as i8 loads, respecting each element alignment.
-; CHECK: .func  (.param .align 4 .b8 func_retval0[4])
+; CHECK: .func  (.param .align 1 .b8 func_retval0[1])
 ; CHECK-LABEL: test_v3i1(
-; CHECK-NEXT: .param .align 4 .b8 test_v3i1_param_0[4]
+; CHECK-NEXT: .param .align 1 .b8 test_v3i1_param_0[1]
 ; CHECK-DAG:  ld.param.u8     [[E2:%rs[0-9]+]], [test_v3i1_param_0+2];
-; CHECK-DAG:  ld.param.v2.u8  {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i1_param_0]
-; CHECK:      .param .align 4 .b8 param0[4];
-; CHECK-DAG:  st.param.v2.b8  [param0+0], {[[E0]], [[E1]]};
+; CHECK-DAG:  ld.param.u8     [[E0:%rs[0-9]+]], [test_v3i1_param_0]
+; CHECK:      .param .align 1 .b8 param0[1];
+; CHECK-DAG:  st.param.b8     [param0+0], [[E0]];
 ; CHECK-DAG:  st.param.b8     [param0+2], [[E2]];
-; CHECK:      .param .align 4 .b8 retval0[4];
+; CHECK:      .param .align 1 .b8 retval0[1];
 ; CHECK:      call.uni (retval0),
 ; CHECK-NEXT: test_v3i1,
-; CHECK-DAG:  ld.param.v2.b8  {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG:  ld.param.b8     [[RE0:%rs[0-9]+]], [retval0+0];
 ; CHECK-DAG:  ld.param.b8     [[RE2:%rs[0-9]+]], [retval0+2];
-; CHECK-DAG:  st.param.v2.b8  [func_retval0+0], {[[RE0]], [[RE1]]}
+; CHECK-DAG:  st.param.b8     [func_retval0+0], [[RE0]]
 ; CHECK-DAG:  st.param.b8     [func_retval0+2], [[RE2]];
 ; CHECK-NEXT: ret;
 define <3 x i1> @test_v3i1(<3 x i1> %a) {
@@ -87,37 +87,43 @@ define <3 x i1> @test_v3i1(<3 x i1> %a) {
        ret <3 x i1> %r;
 }
 
-; CHECK: .func  (.param .align 4 .b8 func_retval0[4])
+; CHECK: .func  (.param .align 1 .b8 func_retval0[1])
 ; CHECK-LABEL: test_v4i1(
-; CHECK-NEXT: .param .align 4 .b8 test_v4i1_param_0[4]
-; CHECK:      ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i1_param_0]
-; CHECK:      .param .align 4 .b8 param0[4];
-; CHECK:      st.param.v4.b8  [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
-; CHECK:      .param .align 4 .b8 retval0[4];
+; CHECK-NEXT: .param .align 1 .b8 test_v4i1_param_0[1]
+; CHECK:      ld.param.u8 [[E0:%rs[0-9]+]], [test_v4i1_param_0]
+; CHECK:      .param .align 1 .b8 param0[1];
+; CHECK:      st.param.b8  [param0+0], [[E0]];
+; CHECK:      .param .align 1 .b8 retval0[1];
 ; CHECK:      call.uni (retval0),
 ; CHECK:      test_v4i1,
-; CHECK:      ld.param.v4.b8  {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
-; CHECK:      st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]};
+; CHECK:      ld.param.b8  [[RE0:%rs[0-9]+]], [retval0+0];
+; CHECK:      ld.param.b8  [[RE1:%rs[0-9]+]], [retval0+1];
+; CHECK:      ld.param.b8  [[RE2:%rs[0-9]+]], [retval0+2];
+; CHECK:      ld.param.b8  [[RE3:%rs[0-9]+]], [retval0+3];
+; CHECK:      st.param.b8  [func_retval0+0], [[RE0]];
+; CHECK:      st.param.b8  [func_retval0+1], [[RE1]];
+; CHECK:      st.param.b8  [func_retval0+2], [[RE2]];
+; CHECK:      st.param.b8  [func_retval0+3], [[RE3]];
 ; CHECK-NEXT: ret;
 define <4 x i1> @test_v4i1(<4 x i1> %a) {
        %r = tail call <4 x i1> @test_v4i1(<4 x i1> %a);
        ret <4 x i1> %r;
 }
 
-; CHECK: .func  (.param .align 8 .b8 func_retval0[8])
+; CHECK: .func  (.param .align 1 .b8 func_retval0[1])
 ; CHECK-LABEL: test_v5i1(
-; CHECK-NEXT: .param .align 8 .b8 test_v5i1_param_0[8]
+; CHECK-NEXT: .param .align 1 .b8 test_v5i1_param_0[1]
 ; CHECK-DAG:  ld.param.u8     [[E4:%rs[0-9]+]], [test_v5i1_param_0+4];
-; CHECK-DAG:  ld.param.v4.u8  {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i1_param_0]
-; CHECK:      .param .align 8 .b8 param0[8];
-; CHECK-DAG:  st.param.v4.b8  [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK-DAG:  ld.param.u8     [[E0:%rs[0-9]+]], [test_v5i1_param_0]
+; CHECK:      .param .align 1 .b8 param0[1];
+; CHECK-DAG:  st.param.b8     [param0+0], [[E0]];
 ; CHECK-DAG:  st.param.b8     [param0+4], [[E4]];
-; CHECK:      .param .align 8 .b8 retval0[8];
+; CHECK:      .param .align 1 .b8 retval0[1];
 ; CHECK:      call.uni (retval0),
 ; CHECK-NEXT: test_v5i1,
-; CHECK-DAG:  ld.param.v4.b8  {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG:  ld.param.b8  [[RE0:%rs[0-9]+]], [retval0+0];
 ; CHECK-DAG:  ld.param.b8     [[RE4:%rs[0-9]+]], [retval0+4];
-; CHECK-DAG:  st.param.v4.b8  [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-DAG:  st.param.b8  [func_retval0+0], [[RE0]]
 ; CHECK-DAG:  st.param.b8     [func_retval0+4], [[RE4]];
 ; CHECK-NEXT: ret;
 define <5 x i1> @test_v5i1(<5 x i1> %a) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index 1dfcf8dbce951..c0dc7fb97a98c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -84,9 +84,9 @@ define <2 x i16> @mgather_v2i16_align1(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16>
 ; RV32-NEXT:    vslideup.vi v26, v25, 0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmsne.vi v25, v26, 0
-; RV32-NEXT:    addi a0, sp, 14
+; RV32-NEXT:    addi a0, sp, 15
 ; RV32-NEXT:    vse1.v v25, (a0)
-; RV32-NEXT:    lbu a0, 14(sp)
+; RV32-NEXT:    lbu a0, 15(sp)
 ; RV32-NEXT:    andi a1, a0, 1
 ; RV32-NEXT:    beqz a1, .LBB4_2
 ; RV32-NEXT:  # %bb.1: # %cond.load
@@ -131,9 +131,9 @@ define <2 x i16> @mgather_v2i16_align1(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16>
 ; RV64-NEXT:    vslideup.vi v26, v25, 0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmsne.vi v25, v26, 0
-; RV64-NEXT:    addi a0, sp, 14
+; RV64-NEXT:    addi a0, sp, 15
 ; RV64-NEXT:    vse1.v v25, (a0)
-; RV64-NEXT:    lbu a0, 14(sp)
+; RV64-NEXT:    lbu a0, 15(sp)
 ; RV64-NEXT:    andi a1, a0, 1
 ; RV64-NEXT:    beqz a1, .LBB4_2
 ; RV64-NEXT:  # %bb.1: # %cond.load
@@ -184,9 +184,9 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64>
 ; RV32-NEXT:    vslideup.vi v26, v25, 0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmsne.vi v25, v26, 0
-; RV32-NEXT:    addi a0, sp, 14
+; RV32-NEXT:    addi a0, sp, 15
 ; RV32-NEXT:    vse1.v v25, (a0)
-; RV32-NEXT:    lbu a0, 14(sp)
+; RV32-NEXT:    lbu a0, 15(sp)
 ; RV32-NEXT:    andi a1, a0, 1
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
 ; RV32-NEXT:    vmv.v.i v25, 0
@@ -231,9 +231,9 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64>
 ; RV64-NEXT:    vslideup.vi v26, v25, 0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmsne.vi v25, v26, 0
-; RV64-NEXT:    addi a0, sp, 14
+; RV64-NEXT:    addi a0, sp, 15
 ; RV64-NEXT:    vse1.v v25, (a0)
-; RV64-NEXT:    lbu a0, 14(sp)
+; RV64-NEXT:    lbu a0, 15(sp)
 ; RV64-NEXT:    andi a1, a0, 1
 ; RV64-NEXT:    beqz a1, .LBB5_2
 ; RV64-NEXT:  # %bb.1: # %cond.load
@@ -284,9 +284,9 @@ define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m
 ; RV32-NEXT:    vslideup.vi v26, v25, 0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmsne.vi v25, v26, 0
-; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    addi a0, sp, 15
 ; RV32-NEXT:    vse1.v v25, (a0)
-; RV32-NEXT:    lbu a0, 12(sp)
+; RV32-NEXT:    lbu a0, 15(sp)
 ; RV32-NEXT:    andi a1, a0, 1
 ; RV32-NEXT:    bnez a1, .LBB6_5
 ; RV32-NEXT:  # %bb.1: # %else
@@ -361,9 +361,9 @@ define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m
 ; RV64-NEXT:    vslideup.vi v26, v25, 0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmsne.vi v25, v26, 0
-; RV64-NEXT:    addi a0, sp, 12
+; RV64-NEXT:    addi a0, sp, 15
 ; RV64-NEXT:    vse1.v v25, (a0)
-; RV64-NEXT:    lbu a0, 12(sp)
+; RV64-NEXT:    lbu a0, 15(sp)
 ; RV64-NEXT:    andi a1, a0, 1
 ; RV64-NEXT:    bnez a1, .LBB6_5
 ; RV64-NEXT:  # %bb.1: # %else
@@ -444,9 +444,9 @@ define void @mscatter_v2i32_align2(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m
 ; RV32-NEXT:    vslideup.vi v26, v25, 0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmsne.vi v25, v26, 0
-; RV32-NEXT:    addi a0, sp, 14
+; RV32-NEXT:    addi a0, sp, 15
 ; RV32-NEXT:    vse1.v v25, (a0)
-; RV32-NEXT:    lbu a0, 14(sp)
+; RV32-NEXT:    lbu a0, 15(sp)
 ; RV32-NEXT:    andi a1, a0, 1
 ; RV32-NEXT:    bnez a1, .LBB7_3
 ; RV32-NEXT:  # %bb.1: # %else
@@ -489,9 +489,9 @@ define void @mscatter_v2i32_align2(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m
 ; RV64-NEXT:    vslideup.vi v26, v25, 0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmsne.vi v25, v26, 0
-; RV64-NEXT:    addi a0, sp, 14
+; RV64-NEXT:    addi a0, sp, 15
 ; RV64-NEXT:    vse1.v v25, (a0)
-; RV64-NEXT:    lbu a0, 14(sp)
+; RV64-NEXT:    lbu a0, 15(sp)
 ; RV64-NEXT:    andi a1, a0, 1
 ; RV64-NEXT:    bnez a1, .LBB7_3
 ; RV64-NEXT:  # %bb.1: # %else
@@ -543,9 +543,9 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %r
 ; RV32-NEXT:    vslideup.vi v26, v25, 0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmsne.vi v25, v26, 0
-; RV32-NEXT:    addi a2, sp, 14
+; RV32-NEXT:    addi a2, sp, 15
 ; RV32-NEXT:    vse1.v v25, (a2)
-; RV32-NEXT:    lbu a2, 14(sp)
+; RV32-NEXT:    lbu a2, 15(sp)
 ; RV32-NEXT:    andi a3, a2, 1
 ; RV32-NEXT:    beqz a3, .LBB8_2
 ; RV32-NEXT:  # %bb.1: # %cond.load
@@ -603,9 +603,9 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %r
 ; RV64-NEXT:    vslideup.vi v26, v25, 0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmsne.vi v25, v26, 0
-; RV64-NEXT:    addi a2, sp, 14
+; RV64-NEXT:    addi a2, sp, 15
 ; RV64-NEXT:    vse1.v v25, (a2)
-; RV64-NEXT:    lbu a2, 14(sp)
+; RV64-NEXT:    lbu a2, 15(sp)
 ; RV64-NEXT:    andi a3, a2, 1
 ; RV64-NEXT:    beqz a3, .LBB8_2
 ; RV64-NEXT:  # %bb.1: # %cond.load
@@ -671,9 +671,9 @@ define void @masked_store_v2i32_align2(<2 x i32> %val, <2 x i32>* %a, <2 x i32>
 ; RV32-NEXT:    vslideup.vi v26, v25, 0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmsne.vi v25, v26, 0
-; RV32-NEXT:    addi a1, sp, 14
+; RV32-NEXT:    addi a1, sp, 15
 ; RV32-NEXT:    vse1.v v25, (a1)
-; RV32-NEXT:    lbu a1, 14(sp)
+; RV32-NEXT:    lbu a1, 15(sp)
 ; RV32-NEXT:    andi a2, a1, 1
 ; RV32-NEXT:    bnez a2, .LBB9_3
 ; RV32-NEXT:  # %bb.1: # %else
@@ -714,9 +714,9 @@ define void @masked_store_v2i32_align2(<2 x i32> %val, <2 x i32>* %a, <2 x i32>
 ; RV64-NEXT:    vslideup.vi v26, v25, 0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmsne.vi v25, v26, 0
-; RV64-NEXT:    addi a1, sp, 14
+; RV64-NEXT:    addi a1, sp, 15
 ; RV64-NEXT:    vse1.v v25, (a1)
-; RV64-NEXT:    lbu a1, 14(sp)
+; RV64-NEXT:    lbu a1, 15(sp)
 ; RV64-NEXT:    andi a2, a1, 1
 ; RV64-NEXT:    bnez a2, .LBB9_3
 ; RV64-NEXT:  # %bb.1: # %else

diff  --git a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
index 5a3cbe4bfd727..2abf5ef19addd 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
@@ -820,8 +820,8 @@ define void @foo_v4f32_v4f16(<4 x float> *%dest, <4 x i16> *%mask, <4 x half> *%
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r7, lr}
 ; CHECK-LE-NEXT:    push {r7, lr}
-; CHECK-LE-NEXT:    .pad #8
-; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
 ; CHECK-LE-NEXT:    vldrh.s32 q0, [r1]
 ; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
 ; CHECK-LE-NEXT:    @ implicit-def: $q0
@@ -888,7 +888,7 @@ define void @foo_v4f32_v4f16(<4 x float> *%dest, <4 x i16> *%mask, <4 x half> *%
 ; CHECK-LE-NEXT:    itt mi
 ; CHECK-LE-NEXT:    vmovmi r1, s7
 ; CHECK-LE-NEXT:    strmi r1, [r0, #12]
-; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    add sp, #4
 ; CHECK-LE-NEXT:    pop {r7, pc}
 ; CHECK-LE-NEXT:  .LBB18_6: @ %cond.load
 ; CHECK-LE-NEXT:    vldr.16 s0, [r2]
@@ -911,8 +911,8 @@ define void @foo_v4f32_v4f16(<4 x float> *%dest, <4 x i16> *%mask, <4 x half> *%
 ; CHECK-BE:       @ %bb.0: @ %entry
 ; CHECK-BE-NEXT:    .save {r7, lr}
 ; CHECK-BE-NEXT:    push {r7, lr}
-; CHECK-BE-NEXT:    .pad #8
-; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
 ; CHECK-BE-NEXT:    vldrh.s32 q0, [r1]
 ; CHECK-BE-NEXT:    vcmp.s32 gt, q0, zr
 ; CHECK-BE-NEXT:    @ implicit-def: $q0
@@ -979,7 +979,7 @@ define void @foo_v4f32_v4f16(<4 x float> *%dest, <4 x i16> *%mask, <4 x half> *%
 ; CHECK-BE-NEXT:    itt ne
 ; CHECK-BE-NEXT:    vmovne r1, s7
 ; CHECK-BE-NEXT:    strne r1, [r0, #12]
-; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    add sp, #4
 ; CHECK-BE-NEXT:    pop {r7, pc}
 ; CHECK-BE-NEXT:  .LBB18_6: @ %cond.load
 ; CHECK-BE-NEXT:    vldr.16 s0, [r2]
@@ -1011,8 +1011,8 @@ define void @foo_v4f32_v4f16_unaligned(<4 x float> *%dest, <4 x i16> *%mask, <4
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r7, lr}
 ; CHECK-LE-NEXT:    push {r7, lr}
-; CHECK-LE-NEXT:    .pad #8
-; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
 ; CHECK-LE-NEXT:    vldrh.s32 q0, [r1]
 ; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
 ; CHECK-LE-NEXT:    @ implicit-def: $q0
@@ -1079,7 +1079,7 @@ define void @foo_v4f32_v4f16_unaligned(<4 x float> *%dest, <4 x i16> *%mask, <4
 ; CHECK-LE-NEXT:    itt mi
 ; CHECK-LE-NEXT:    vmovmi r1, s7
 ; CHECK-LE-NEXT:    strmi r1, [r0, #12]
-; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    add sp, #4
 ; CHECK-LE-NEXT:    pop {r7, pc}
 ; CHECK-LE-NEXT:  .LBB19_6: @ %cond.load
 ; CHECK-LE-NEXT:    vldr.16 s0, [r2]
@@ -1102,8 +1102,8 @@ define void @foo_v4f32_v4f16_unaligned(<4 x float> *%dest, <4 x i16> *%mask, <4
 ; CHECK-BE:       @ %bb.0: @ %entry
 ; CHECK-BE-NEXT:    .save {r7, lr}
 ; CHECK-BE-NEXT:    push {r7, lr}
-; CHECK-BE-NEXT:    .pad #8
-; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
 ; CHECK-BE-NEXT:    vldrh.s32 q0, [r1]
 ; CHECK-BE-NEXT:    vcmp.s32 gt, q0, zr
 ; CHECK-BE-NEXT:    @ implicit-def: $q0
@@ -1170,7 +1170,7 @@ define void @foo_v4f32_v4f16_unaligned(<4 x float> *%dest, <4 x i16> *%mask, <4
 ; CHECK-BE-NEXT:    itt ne
 ; CHECK-BE-NEXT:    vmovne r1, s7
 ; CHECK-BE-NEXT:    strne r1, [r0, #12]
-; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    add sp, #4
 ; CHECK-BE-NEXT:    pop {r7, pc}
 ; CHECK-BE-NEXT:  .LBB19_6: @ %cond.load
 ; CHECK-BE-NEXT:    vldr.16 s0, [r2]

diff  --git a/llvm/test/CodeGen/Thumb2/mve-masked-load.ll b/llvm/test/CodeGen/Thumb2/mve-masked-load.ll
index 352a7f797a438..04632f53b3165 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-load.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-load.ll
@@ -557,8 +557,8 @@ entry:
 define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align1_undef(<8 x i16> *%dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
-; CHECK-LE-NEXT:    .pad #8
-; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
 ; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
 ; CHECK-LE-NEXT:    @ implicit-def: $q0
 ; CHECK-LE-NEXT:    vmrs r1, p0
@@ -620,13 +620,13 @@ define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align1_undef(<8 x i16> *%dest, <8
 ; CHECK-LE-NEXT:    itt mi
 ; CHECK-LE-NEXT:    ldrhmi r0, [r0, #14]
 ; CHECK-LE-NEXT:    vmovmi.16 q0[7], r0
-; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    add sp, #4
 ; CHECK-LE-NEXT:    bx lr
 ;
 ; CHECK-BE-LABEL: masked_v8i16_align1_undef:
 ; CHECK-BE:       @ %bb.0: @ %entry
-; CHECK-BE-NEXT:    .pad #8
-; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
 ; CHECK-BE-NEXT:    vrev64.16 q1, q0
 ; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
 ; CHECK-BE-NEXT:    @ implicit-def: $q1
@@ -690,7 +690,7 @@ define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align1_undef(<8 x i16> *%dest, <8
 ; CHECK-BE-NEXT:    ldrhne r0, [r0, #14]
 ; CHECK-BE-NEXT:    vmovne.16 q1[7], r0
 ; CHECK-BE-NEXT:    vrev64.16 q0, q1
-; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    add sp, #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
@@ -1433,8 +1433,8 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8f16_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
-; CHECK-LE-NEXT:    .pad #40
-; CHECK-LE-NEXT:    sub sp, #40
+; CHECK-LE-NEXT:    .pad #36
+; CHECK-LE-NEXT:    sub sp, #36
 ; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
 ; CHECK-LE-NEXT:    @ implicit-def: $q0
 ; CHECK-LE-NEXT:    vmrs r1, p0
@@ -1488,7 +1488,7 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-LE-NEXT:    lsls r1, r1, #24
 ; CHECK-LE-NEXT:    bmi .LBB45_16
 ; CHECK-LE-NEXT:  .LBB45_8: @ %else20
-; CHECK-LE-NEXT:    add sp, #40
+; CHECK-LE-NEXT:    add sp, #36
 ; CHECK-LE-NEXT:    bx lr
 ; CHECK-LE-NEXT:  .LBB45_9: @ %cond.load
 ; CHECK-LE-NEXT:    ldrh r2, [r0]
@@ -1546,13 +1546,13 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-LE-NEXT:    strh.w r0, [sp]
 ; CHECK-LE-NEXT:    vldr.16 s4, [sp]
 ; CHECK-LE-NEXT:    vins.f16 s3, s4
-; CHECK-LE-NEXT:    add sp, #40
+; CHECK-LE-NEXT:    add sp, #36
 ; CHECK-LE-NEXT:    bx lr
 ;
 ; CHECK-BE-LABEL: masked_v8f16_align1_undef:
 ; CHECK-BE:       @ %bb.0: @ %entry
-; CHECK-BE-NEXT:    .pad #40
-; CHECK-BE-NEXT:    sub sp, #40
+; CHECK-BE-NEXT:    .pad #36
+; CHECK-BE-NEXT:    sub sp, #36
 ; CHECK-BE-NEXT:    vrev64.16 q1, q0
 ; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
 ; CHECK-BE-NEXT:    @ implicit-def: $q1
@@ -1613,7 +1613,7 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-BE-NEXT:    vins.f16 s7, s0
 ; CHECK-BE-NEXT:  .LBB45_9: @ %else20
 ; CHECK-BE-NEXT:    vrev64.16 q0, q1
-; CHECK-BE-NEXT:    add sp, #40
+; CHECK-BE-NEXT:    add sp, #36
 ; CHECK-BE-NEXT:    bx lr
 ; CHECK-BE-NEXT:  .LBB45_10: @ %cond.load
 ; CHECK-BE-NEXT:    ldrh r2, [r0]

diff  --git a/llvm/test/CodeGen/Thumb2/mve-masked-store.ll b/llvm/test/CodeGen/Thumb2/mve-masked-store.ll
index 02b29769382b2..c6c989b2ff85e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-store.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-store.ll
@@ -185,8 +185,8 @@ entry:
 define arm_aapcs_vfpcc void @masked_v8i16_align1(<8 x i16> *%dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
-; CHECK-LE-NEXT:    .pad #8
-; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
 ; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
 ; CHECK-LE-NEXT:    vmrs r1, p0
 ; CHECK-LE-NEXT:    and r2, r1, #1
@@ -247,13 +247,13 @@ define arm_aapcs_vfpcc void @masked_v8i16_align1(<8 x i16> *%dest, <8 x i16> %a)
 ; CHECK-LE-NEXT:    itt mi
 ; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[7]
 ; CHECK-LE-NEXT:    strhmi r1, [r0, #14]
-; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    add sp, #4
 ; CHECK-LE-NEXT:    bx lr
 ;
 ; CHECK-BE-LABEL: masked_v8i16_align1:
 ; CHECK-BE:       @ %bb.0: @ %entry
-; CHECK-BE-NEXT:    .pad #8
-; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
 ; CHECK-BE-NEXT:    vrev64.16 q1, q0
 ; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
 ; CHECK-BE-NEXT:    vmrs r1, p0
@@ -315,7 +315,7 @@ define arm_aapcs_vfpcc void @masked_v8i16_align1(<8 x i16> *%dest, <8 x i16> %a)
 ; CHECK-BE-NEXT:    itt ne
 ; CHECK-BE-NEXT:    vmovne.u16 r1, q1[7]
 ; CHECK-BE-NEXT:    strhne r1, [r0, #14]
-; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    add sp, #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
@@ -646,8 +646,8 @@ entry:
 define arm_aapcs_vfpcc void @masked_v8f16_align1(<8 x half> *%dest, <8 x half> %a, <8 x i16> %b) {
 ; CHECK-LE-LABEL: masked_v8f16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
-; CHECK-LE-NEXT:    .pad #40
-; CHECK-LE-NEXT:    sub sp, #40
+; CHECK-LE-NEXT:    .pad #36
+; CHECK-LE-NEXT:    sub sp, #36
 ; CHECK-LE-NEXT:    vcmp.i16 ne, q1, zr
 ; CHECK-LE-NEXT:    movs r2, #0
 ; CHECK-LE-NEXT:    vmrs r1, p0
@@ -700,7 +700,7 @@ define arm_aapcs_vfpcc void @masked_v8f16_align1(<8 x half> *%dest, <8 x half> %
 ; CHECK-LE-NEXT:    lsls r1, r1, #24
 ; CHECK-LE-NEXT:    bmi .LBB16_16
 ; CHECK-LE-NEXT:  .LBB16_8: @ %else14
-; CHECK-LE-NEXT:    add sp, #40
+; CHECK-LE-NEXT:    add sp, #36
 ; CHECK-LE-NEXT:    bx lr
 ; CHECK-LE-NEXT:  .LBB16_9: @ %cond.store
 ; CHECK-LE-NEXT:    vstr.16 s0, [sp, #28]
@@ -752,13 +752,13 @@ define arm_aapcs_vfpcc void @masked_v8f16_align1(<8 x half> *%dest, <8 x half> %
 ; CHECK-LE-NEXT:    vstr.16 s0, [sp]
 ; CHECK-LE-NEXT:    ldrh.w r1, [sp]
 ; CHECK-LE-NEXT:    strh r1, [r0, #14]
-; CHECK-LE-NEXT:    add sp, #40
+; CHECK-LE-NEXT:    add sp, #36
 ; CHECK-LE-NEXT:    bx lr
 ;
 ; CHECK-BE-LABEL: masked_v8f16_align1:
 ; CHECK-BE:       @ %bb.0: @ %entry
-; CHECK-BE-NEXT:    .pad #40
-; CHECK-BE-NEXT:    sub sp, #40
+; CHECK-BE-NEXT:    .pad #36
+; CHECK-BE-NEXT:    sub sp, #36
 ; CHECK-BE-NEXT:    vrev64.16 q2, q1
 ; CHECK-BE-NEXT:    vrev64.16 q1, q0
 ; CHECK-BE-NEXT:    vcmp.i16 ne, q2, zr
@@ -813,7 +813,7 @@ define arm_aapcs_vfpcc void @masked_v8f16_align1(<8 x half> *%dest, <8 x half> %
 ; CHECK-BE-NEXT:    lsls r1, r1, #31
 ; CHECK-BE-NEXT:    bne .LBB16_16
 ; CHECK-BE-NEXT:  .LBB16_8: @ %else14
-; CHECK-BE-NEXT:    add sp, #40
+; CHECK-BE-NEXT:    add sp, #36
 ; CHECK-BE-NEXT:    bx lr
 ; CHECK-BE-NEXT:  .LBB16_9: @ %cond.store
 ; CHECK-BE-NEXT:    vstr.16 s4, [sp, #28]
@@ -865,7 +865,7 @@ define arm_aapcs_vfpcc void @masked_v8f16_align1(<8 x half> *%dest, <8 x half> %
 ; CHECK-BE-NEXT:    vstr.16 s0, [sp]
 ; CHECK-BE-NEXT:    ldrh.w r1, [sp]
 ; CHECK-BE-NEXT:    strh r1, [r0, #14]
-; CHECK-BE-NEXT:    add sp, #40
+; CHECK-BE-NEXT:    add sp, #36
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp ugt <8 x i16> %b, zeroinitializer

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll b/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll
index e10d984dd39fa..97125a337ace7 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll
@@ -56,8 +56,8 @@ entry:
 define arm_aapcs_vfpcc <8 x i16> @bitcast_to_v8i1(i8 %b, <8 x i16> %a) {
 ; CHECK-LE-LABEL: bitcast_to_v8i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
-; CHECK-LE-NEXT:    .pad #8
-; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
 ; CHECK-LE-NEXT:    uxtb r0, r0
 ; CHECK-LE-NEXT:    vmov.i8 q1, #0x0
 ; CHECK-LE-NEXT:    vmov.i8 q2, #0xff
@@ -82,13 +82,13 @@ define arm_aapcs_vfpcc <8 x i16> @bitcast_to_v8i1(i8 %b, <8 x i16> %a) {
 ; CHECK-LE-NEXT:    vcmp.i16 ne, q1, zr
 ; CHECK-LE-NEXT:    vmov.i32 q1, #0x0
 ; CHECK-LE-NEXT:    vpsel q0, q0, q1
-; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    add sp, #4
 ; CHECK-LE-NEXT:    bx lr
 ;
 ; CHECK-BE-LABEL: bitcast_to_v8i1:
 ; CHECK-BE:       @ %bb.0: @ %entry
-; CHECK-BE-NEXT:    .pad #8
-; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
 ; CHECK-BE-NEXT:    uxtb r0, r0
 ; CHECK-BE-NEXT:    vmov.i8 q1, #0x0
 ; CHECK-BE-NEXT:    rbit r0, r0
@@ -118,7 +118,7 @@ define arm_aapcs_vfpcc <8 x i16> @bitcast_to_v8i1(i8 %b, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    vrev32.16 q0, q0
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.16 q0, q1
-; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    add sp, #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = bitcast i8 %b to <8 x i1>
@@ -129,45 +129,29 @@ entry:
 define arm_aapcs_vfpcc <16 x i8> @bitcast_to_v16i1(i16 %b, <16 x i8> %a) {
 ; CHECK-LE-LABEL: bitcast_to_v16i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
-; CHECK-LE-NEXT:    .save {r4, r6, r7, lr}
-; CHECK-LE-NEXT:    push {r4, r6, r7, lr}
-; CHECK-LE-NEXT:    .setfp r7, sp, #8
-; CHECK-LE-NEXT:    add r7, sp, #8
-; CHECK-LE-NEXT:    .pad #16
-; CHECK-LE-NEXT:    sub sp, #16
-; CHECK-LE-NEXT:    mov r4, sp
-; CHECK-LE-NEXT:    bfc r4, #0, #4
-; CHECK-LE-NEXT:    mov sp, r4
-; CHECK-LE-NEXT:    sub.w r4, r7, #8
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
 ; CHECK-LE-NEXT:    vmsr p0, r0
 ; CHECK-LE-NEXT:    vmov.i32 q1, #0x0
 ; CHECK-LE-NEXT:    vpsel q0, q0, q1
-; CHECK-LE-NEXT:    mov sp, r4
-; CHECK-LE-NEXT:    pop {r4, r6, r7, pc}
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
 ;
 ; CHECK-BE-LABEL: bitcast_to_v16i1:
 ; CHECK-BE:       @ %bb.0: @ %entry
-; CHECK-BE-NEXT:    .save {r4, r6, r7, lr}
-; CHECK-BE-NEXT:    push {r4, r6, r7, lr}
-; CHECK-BE-NEXT:    .setfp r7, sp, #8
-; CHECK-BE-NEXT:    add r7, sp, #8
-; CHECK-BE-NEXT:    .pad #16
-; CHECK-BE-NEXT:    sub sp, #16
-; CHECK-BE-NEXT:    mov r4, sp
-; CHECK-BE-NEXT:    bfc r4, #0, #4
-; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
 ; CHECK-BE-NEXT:    uxth r0, r0
 ; CHECK-BE-NEXT:    vrev64.8 q1, q0
 ; CHECK-BE-NEXT:    rbit r0, r0
 ; CHECK-BE-NEXT:    vmov.i32 q0, #0x0
-; CHECK-BE-NEXT:    sub.w r4, r7, #8
 ; CHECK-BE-NEXT:    vrev32.8 q0, q0
 ; CHECK-BE-NEXT:    lsrs r0, r0, #16
 ; CHECK-BE-NEXT:    vmsr p0, r0
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.8 q0, q1
-; CHECK-BE-NEXT:    mov sp, r4
-; CHECK-BE-NEXT:    pop {r4, r6, r7, pc}
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
 entry:
   %c = bitcast i16 %b to <16 x i1>
   %s = select <16 x i1> %c, <16 x i8> %a, <16 x i8> zeroinitializer
@@ -266,8 +250,8 @@ entry:
 define arm_aapcs_vfpcc i8 @bitcast_from_v8i1(<8 x i16> %a) {
 ; CHECK-LE-LABEL: bitcast_from_v8i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
-; CHECK-LE-NEXT:    .pad #8
-; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
 ; CHECK-LE-NEXT:    vcmp.i16 eq, q0, zr
 ; CHECK-LE-NEXT:    vmrs r1, p0
 ; CHECK-LE-NEXT:    and r0, r1, #1
@@ -296,13 +280,13 @@ define arm_aapcs_vfpcc i8 @bitcast_from_v8i1(<8 x i16> %a) {
 ; CHECK-LE-NEXT:    rsbs r1, r1, #0
 ; CHECK-LE-NEXT:    bfi r0, r1, #7, #1
 ; CHECK-LE-NEXT:    uxtb r0, r0
-; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    add sp, #4
 ; CHECK-LE-NEXT:    bx lr
 ;
 ; CHECK-BE-LABEL: bitcast_from_v8i1:
 ; CHECK-BE:       @ %bb.0: @ %entry
-; CHECK-BE-NEXT:    .pad #8
-; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
 ; CHECK-BE-NEXT:    vrev64.16 q1, q0
 ; CHECK-BE-NEXT:    vcmp.i16 eq, q1, zr
 ; CHECK-BE-NEXT:    vmrs r1, p0
@@ -332,7 +316,7 @@ define arm_aapcs_vfpcc i8 @bitcast_from_v8i1(<8 x i16> %a) {
 ; CHECK-BE-NEXT:    rsbs r1, r1, #0
 ; CHECK-BE-NEXT:    bfi r0, r1, #7, #1
 ; CHECK-BE-NEXT:    uxtb r0, r0
-; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    add sp, #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp eq <8 x i16> %a, zeroinitializer
@@ -343,41 +327,25 @@ entry:
 define arm_aapcs_vfpcc i16 @bitcast_from_v16i1(<16 x i8> %a) {
 ; CHECK-LE-LABEL: bitcast_from_v16i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
-; CHECK-LE-NEXT:    .save {r4, r6, r7, lr}
-; CHECK-LE-NEXT:    push {r4, r6, r7, lr}
-; CHECK-LE-NEXT:    .setfp r7, sp, #8
-; CHECK-LE-NEXT:    add r7, sp, #8
-; CHECK-LE-NEXT:    .pad #16
-; CHECK-LE-NEXT:    sub sp, #16
-; CHECK-LE-NEXT:    mov r4, sp
-; CHECK-LE-NEXT:    bfc r4, #0, #4
-; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
 ; CHECK-LE-NEXT:    vcmp.i8 eq, q0, zr
-; CHECK-LE-NEXT:    sub.w r4, r7, #8
 ; CHECK-LE-NEXT:    vmrs r0, p0
 ; CHECK-LE-NEXT:    uxth r0, r0
-; CHECK-LE-NEXT:    mov sp, r4
-; CHECK-LE-NEXT:    pop {r4, r6, r7, pc}
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
 ;
 ; CHECK-BE-LABEL: bitcast_from_v16i1:
 ; CHECK-BE:       @ %bb.0: @ %entry
-; CHECK-BE-NEXT:    .save {r4, r6, r7, lr}
-; CHECK-BE-NEXT:    push {r4, r6, r7, lr}
-; CHECK-BE-NEXT:    .setfp r7, sp, #8
-; CHECK-BE-NEXT:    add r7, sp, #8
-; CHECK-BE-NEXT:    .pad #16
-; CHECK-BE-NEXT:    sub sp, #16
-; CHECK-BE-NEXT:    mov r4, sp
-; CHECK-BE-NEXT:    bfc r4, #0, #4
-; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
 ; CHECK-BE-NEXT:    vrev64.8 q1, q0
-; CHECK-BE-NEXT:    sub.w r4, r7, #8
 ; CHECK-BE-NEXT:    vcmp.i8 eq, q1, zr
 ; CHECK-BE-NEXT:    vmrs r0, p0
 ; CHECK-BE-NEXT:    rbit r0, r0
 ; CHECK-BE-NEXT:    lsrs r0, r0, #16
-; CHECK-BE-NEXT:    mov sp, r4
-; CHECK-BE-NEXT:    pop {r4, r6, r7, pc}
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp eq <16 x i8> %a, zeroinitializer
   %b = bitcast <16 x i1> %c to i16

diff  --git a/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll b/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
index a7bf312acc1cf..8585cb1d25ee3 100644
--- a/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
+++ b/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
@@ -18,7 +18,8 @@ define void @load_v8i1_broadcast_4_v2i1(<8 x i1>* %a0,<2 x double> %a1,<2 x doub
 ;
 ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v2i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    kmovw (%rdi), %k0
+; AVX512NOTDQ-NEXT:    movzbl (%rdi), %eax
+; AVX512NOTDQ-NEXT:    kmovd %eax, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $4, %k0, %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm2, %xmm2 {%k1} {z}
@@ -47,7 +48,8 @@ define void @load_v8i1_broadcast_7_v2i1(<8 x i1>* %a0,<2 x double> %a1,<2 x doub
 ;
 ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v2i1:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    kmovw (%rdi), %k0
+; AVX512NOTDQ-NEXT:    movzbl (%rdi), %eax
+; AVX512NOTDQ-NEXT:    kmovd %eax, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $6, %k0, %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm2, %xmm2 {%k1} {z}
@@ -678,7 +680,8 @@ define void @load_v2i1_broadcast_1_v1i1_store(<2 x i1>* %a0,<1 x i1>* %a1) {
 ;
 ; AVX512NOTDQ-LABEL: load_v2i1_broadcast_1_v1i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    kmovw (%rdi), %k0
+; AVX512NOTDQ-NEXT:    movzbl (%rdi), %eax
+; AVX512NOTDQ-NEXT:    kmovd %eax, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $1, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kshiftlw $15, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $15, %k0, %k0
@@ -770,7 +773,8 @@ define void @load_v4i1_broadcast_2_v1i1_store(<4 x i1>* %a0,<1 x i1>* %a1) {
 ;
 ; AVX512NOTDQ-LABEL: load_v4i1_broadcast_2_v1i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    kmovw (%rdi), %k0
+; AVX512NOTDQ-NEXT:    movzbl (%rdi), %eax
+; AVX512NOTDQ-NEXT:    kmovd %eax, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $2, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kshiftlw $15, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $15, %k0, %k0
@@ -794,7 +798,8 @@ define void @load_v4i1_broadcast_3_v1i1_store(<4 x i1>* %a0,<1 x i1>* %a1) {
 ;
 ; AVX512NOTDQ-LABEL: load_v4i1_broadcast_3_v1i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    kmovw (%rdi), %k0
+; AVX512NOTDQ-NEXT:    movzbl (%rdi), %eax
+; AVX512NOTDQ-NEXT:    kmovd %eax, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $3, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kshiftlw $15, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $15, %k0, %k0
@@ -818,7 +823,8 @@ define void @load_v8i1_broadcast_4_v1i1_store(<8 x i1>* %a0,<1 x i1>* %a1) {
 ;
 ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v1i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    kmovw (%rdi), %k0
+; AVX512NOTDQ-NEXT:    movzbl (%rdi), %eax
+; AVX512NOTDQ-NEXT:    kmovd %eax, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $4, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kshiftlw $15, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $15, %k0, %k0
@@ -843,7 +849,8 @@ define void @load_v8i1_broadcast_4_v2i1_store(<8 x i1>* %a0,<2 x i1>* %a1) {
 ;
 ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v2i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    kmovw (%rdi), %k0
+; AVX512NOTDQ-NEXT:    movzbl (%rdi), %eax
+; AVX512NOTDQ-NEXT:    kmovd %eax, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $4, %k0, %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -869,7 +876,8 @@ define void @load_v8i1_broadcast_7_v1i1_store(<8 x i1>* %a0,<1 x i1>* %a1) {
 ;
 ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v1i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    kmovw (%rdi), %k0
+; AVX512NOTDQ-NEXT:    movzbl (%rdi), %eax
+; AVX512NOTDQ-NEXT:    kmovd %eax, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $7, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kshiftlw $15, %k0, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $15, %k0, %k0
@@ -894,7 +902,8 @@ define void @load_v8i1_broadcast_7_v2i1_store(<8 x i1>* %a0,<2 x i1>* %a1) {
 ;
 ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v2i1_store:
 ; AVX512NOTDQ:       # %bb.0:
-; AVX512NOTDQ-NEXT:    kmovw (%rdi), %k0
+; AVX512NOTDQ-NEXT:    movzbl (%rdi), %eax
+; AVX512NOTDQ-NEXT:    kmovd %eax, %k0
 ; AVX512NOTDQ-NEXT:    kshiftrw $6, %k0, %k1
 ; AVX512NOTDQ-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512NOTDQ-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}

diff  --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll
index c6535a2a00435..6ebe6a36398a6 100644
--- a/llvm/test/CodeGen/X86/avx512-mask-op.ll
+++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll
@@ -459,13 +459,13 @@ define i8 @conv1(<8 x i1>* %R) {
 ;
 ; X86-LABEL: conv1:
 ; X86:       ## %bb.0: ## %entry
-; X86-NEXT:    subl $12, %esp
-; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movb $-1, (%eax)
-; X86-NEXT:    movb $-2, (%esp)
+; X86-NEXT:    movb $-2, {{[0-9]+}}(%esp)
 ; X86-NEXT:    movb $-2, %al
-; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    popl %ecx
 ; X86-NEXT:    retl
 entry:
   store <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1>* %R
@@ -2291,7 +2291,8 @@ End:
 define <8 x i64> @load_8i1(<8 x i1>* %a) {
 ; KNL-LABEL: load_8i1:
 ; KNL:       ## %bb.0:
-; KNL-NEXT:    kmovw (%rdi), %k1
+; KNL-NEXT:    movzbl (%rdi), %eax
+; KNL-NEXT:    kmovw %eax, %k1
 ; KNL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
@@ -2303,7 +2304,8 @@ define <8 x i64> @load_8i1(<8 x i1>* %a) {
 ;
 ; AVX512BW-LABEL: load_8i1:
 ; AVX512BW:       ## %bb.0:
-; AVX512BW-NEXT:    kmovw (%rdi), %k1
+; AVX512BW-NEXT:    movzbl (%rdi), %eax
+; AVX512BW-NEXT:    kmovd %eax, %k1
 ; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512BW-NEXT:    retq
 ;
@@ -2363,7 +2365,8 @@ define <16 x i32> @load_16i1(<16 x i1>* %a) {
 define <2 x i16> @load_2i1(<2 x i1>* %a) {
 ; KNL-LABEL: load_2i1:
 ; KNL:       ## %bb.0:
-; KNL-NEXT:    kmovw (%rdi), %k1
+; KNL-NEXT:    movzbl (%rdi), %eax
+; KNL-NEXT:    kmovw %eax, %k1
 ; KNL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    vpmovdw %zmm0, %ymm0
 ; KNL-NEXT:    ## kill: def $xmm0 killed $xmm0 killed $ymm0
@@ -2378,7 +2381,8 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
 ;
 ; AVX512BW-LABEL: load_2i1:
 ; AVX512BW:       ## %bb.0:
-; AVX512BW-NEXT:    kmovw (%rdi), %k0
+; AVX512BW-NEXT:    movzbl (%rdi), %eax
+; AVX512BW-NEXT:    kmovd %eax, %k0
 ; AVX512BW-NEXT:    vpmovm2w %k0, %zmm0
 ; AVX512BW-NEXT:    ## kill: def $xmm0 killed $xmm0 killed $zmm0
 ; AVX512BW-NEXT:    vzeroupper
@@ -2407,7 +2411,8 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
 define <4 x i16> @load_4i1(<4 x i1>* %a) {
 ; KNL-LABEL: load_4i1:
 ; KNL:       ## %bb.0:
-; KNL-NEXT:    kmovw (%rdi), %k1
+; KNL-NEXT:    movzbl (%rdi), %eax
+; KNL-NEXT:    kmovw %eax, %k1
 ; KNL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    vpmovdw %zmm0, %ymm0
 ; KNL-NEXT:    ## kill: def $xmm0 killed $xmm0 killed $ymm0
@@ -2422,7 +2427,8 @@ define <4 x i16> @load_4i1(<4 x i1>* %a) {
 ;
 ; AVX512BW-LABEL: load_4i1:
 ; AVX512BW:       ## %bb.0:
-; AVX512BW-NEXT:    kmovw (%rdi), %k0
+; AVX512BW-NEXT:    movzbl (%rdi), %eax
+; AVX512BW-NEXT:    kmovd %eax, %k0
 ; AVX512BW-NEXT:    vpmovm2w %k0, %zmm0
 ; AVX512BW-NEXT:    ## kill: def $xmm0 killed $xmm0 killed $zmm0
 ; AVX512BW-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/avx512-select.ll b/llvm/test/CodeGen/X86/avx512-select.ll
index 8eadc360d9395..c64a097c3b26d 100644
--- a/llvm/test/CodeGen/X86/avx512-select.ll
+++ b/llvm/test/CodeGen/X86/avx512-select.ll
@@ -153,8 +153,10 @@ define i8 @select05_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
 ; X86-AVX512F:       # %bb.0:
 ; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512F-NEXT:    kmovw (%ecx), %k0
-; X86-AVX512F-NEXT:    kmovw (%eax), %k1
+; X86-AVX512F-NEXT:    movzbl (%ecx), %ecx
+; X86-AVX512F-NEXT:    kmovw %ecx, %k0
+; X86-AVX512F-NEXT:    movzbl (%eax), %eax
+; X86-AVX512F-NEXT:    kmovw %eax, %k1
 ; X86-AVX512F-NEXT:    korw %k1, %k0, %k0
 ; X86-AVX512F-NEXT:    kmovw %k0, %eax
 ; X86-AVX512F-NEXT:    # kill: def $al killed $al killed $eax
@@ -162,8 +164,10 @@ define i8 @select05_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
 ;
 ; X64-AVX512F-LABEL: select05_mem:
 ; X64-AVX512F:       # %bb.0:
-; X64-AVX512F-NEXT:    kmovw (%rsi), %k0
-; X64-AVX512F-NEXT:    kmovw (%rdi), %k1
+; X64-AVX512F-NEXT:    movzbl (%rsi), %eax
+; X64-AVX512F-NEXT:    kmovw %eax, %k0
+; X64-AVX512F-NEXT:    movzbl (%rdi), %eax
+; X64-AVX512F-NEXT:    kmovw %eax, %k1
 ; X64-AVX512F-NEXT:    korw %k1, %k0, %k0
 ; X64-AVX512F-NEXT:    kmovw %k0, %eax
 ; X64-AVX512F-NEXT:    # kill: def $al killed $al killed $eax
@@ -173,8 +177,10 @@ define i8 @select05_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
 ; X86-AVX512BW:       # %bb.0:
 ; X86-AVX512BW-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX512BW-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512BW-NEXT:    kmovw (%ecx), %k0
-; X86-AVX512BW-NEXT:    kmovw (%eax), %k1
+; X86-AVX512BW-NEXT:    movzbl (%ecx), %ecx
+; X86-AVX512BW-NEXT:    kmovd %ecx, %k0
+; X86-AVX512BW-NEXT:    movzbl (%eax), %eax
+; X86-AVX512BW-NEXT:    kmovd %eax, %k1
 ; X86-AVX512BW-NEXT:    korw %k1, %k0, %k0
 ; X86-AVX512BW-NEXT:    kmovd %k0, %eax
 ; X86-AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
@@ -182,8 +188,10 @@ define i8 @select05_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
 ;
 ; X64-AVX512BW-LABEL: select05_mem:
 ; X64-AVX512BW:       # %bb.0:
-; X64-AVX512BW-NEXT:    kmovw (%rsi), %k0
-; X64-AVX512BW-NEXT:    kmovw (%rdi), %k1
+; X64-AVX512BW-NEXT:    movzbl (%rsi), %eax
+; X64-AVX512BW-NEXT:    kmovd %eax, %k0
+; X64-AVX512BW-NEXT:    movzbl (%rdi), %eax
+; X64-AVX512BW-NEXT:    kmovd %eax, %k1
 ; X64-AVX512BW-NEXT:    korw %k1, %k0, %k0
 ; X64-AVX512BW-NEXT:    kmovd %k0, %eax
 ; X64-AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
@@ -220,8 +228,10 @@ define i8 @select06_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
 ; X86-AVX512F:       # %bb.0:
 ; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512F-NEXT:    kmovw (%ecx), %k0
-; X86-AVX512F-NEXT:    kmovw (%eax), %k1
+; X86-AVX512F-NEXT:    movzbl (%ecx), %ecx
+; X86-AVX512F-NEXT:    kmovw %ecx, %k0
+; X86-AVX512F-NEXT:    movzbl (%eax), %eax
+; X86-AVX512F-NEXT:    kmovw %eax, %k1
 ; X86-AVX512F-NEXT:    kandw %k1, %k0, %k0
 ; X86-AVX512F-NEXT:    kmovw %k0, %eax
 ; X86-AVX512F-NEXT:    # kill: def $al killed $al killed $eax
@@ -229,8 +239,10 @@ define i8 @select06_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
 ;
 ; X64-AVX512F-LABEL: select06_mem:
 ; X64-AVX512F:       # %bb.0:
-; X64-AVX512F-NEXT:    kmovw (%rsi), %k0
-; X64-AVX512F-NEXT:    kmovw (%rdi), %k1
+; X64-AVX512F-NEXT:    movzbl (%rsi), %eax
+; X64-AVX512F-NEXT:    kmovw %eax, %k0
+; X64-AVX512F-NEXT:    movzbl (%rdi), %eax
+; X64-AVX512F-NEXT:    kmovw %eax, %k1
 ; X64-AVX512F-NEXT:    kandw %k1, %k0, %k0
 ; X64-AVX512F-NEXT:    kmovw %k0, %eax
 ; X64-AVX512F-NEXT:    # kill: def $al killed $al killed $eax
@@ -240,8 +252,10 @@ define i8 @select06_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
 ; X86-AVX512BW:       # %bb.0:
 ; X86-AVX512BW-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX512BW-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512BW-NEXT:    kmovw (%ecx), %k0
-; X86-AVX512BW-NEXT:    kmovw (%eax), %k1
+; X86-AVX512BW-NEXT:    movzbl (%ecx), %ecx
+; X86-AVX512BW-NEXT:    kmovd %ecx, %k0
+; X86-AVX512BW-NEXT:    movzbl (%eax), %eax
+; X86-AVX512BW-NEXT:    kmovd %eax, %k1
 ; X86-AVX512BW-NEXT:    kandw %k1, %k0, %k0
 ; X86-AVX512BW-NEXT:    kmovd %k0, %eax
 ; X86-AVX512BW-NEXT:    # kill: def $al killed $al killed $eax
@@ -249,8 +263,10 @@ define i8 @select06_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
 ;
 ; X64-AVX512BW-LABEL: select06_mem:
 ; X64-AVX512BW:       # %bb.0:
-; X64-AVX512BW-NEXT:    kmovw (%rsi), %k0
-; X64-AVX512BW-NEXT:    kmovw (%rdi), %k1
+; X64-AVX512BW-NEXT:    movzbl (%rsi), %eax
+; X64-AVX512BW-NEXT:    kmovd %eax, %k0
+; X64-AVX512BW-NEXT:    movzbl (%rdi), %eax
+; X64-AVX512BW-NEXT:    kmovd %eax, %k1
 ; X64-AVX512BW-NEXT:    kandw %k1, %k0, %k0
 ; X64-AVX512BW-NEXT:    kmovd %k0, %eax
 ; X64-AVX512BW-NEXT:    # kill: def $al killed $al killed $eax

diff  --git a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
index ea49803e5d4f1..bf3ffecb414ff 100644
--- a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
+++ b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
@@ -478,19 +478,13 @@ define i16 @bitcast_v32i16_to_v2i16(<32 x i16> %a0) nounwind {
 ;
 ; AVX512-LABEL: bitcast_v32i16_to_v2i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    pushq %rbp
-; AVX512-NEXT:    movq %rsp, %rbp
-; AVX512-NEXT:    andq $-32, %rsp
-; AVX512-NEXT:    subq $32, %rsp
 ; AVX512-NEXT:    vpmovw2m %zmm0, %k0
-; AVX512-NEXT:    kmovd %k0, (%rsp)
-; AVX512-NEXT:    vmovdqa (%rsp), %xmm0
+; AVX512-NEXT:    kmovd %k0, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm0
 ; AVX512-NEXT:    vmovd %xmm0, %ecx
 ; AVX512-NEXT:    vpextrw $1, %xmm0, %eax
 ; AVX512-NEXT:    addl %ecx, %eax
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT:    movq %rbp, %rsp
-; AVX512-NEXT:    popq %rbp
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = icmp slt <32 x i16> %a0, zeroinitializer

diff  --git a/llvm/test/CodeGen/X86/load-local-v3i129.ll b/llvm/test/CodeGen/X86/load-local-v3i129.ll
index 8484f66246908..b88e7a03ab0b1 100644
--- a/llvm/test/CodeGen/X86/load-local-v3i129.ll
+++ b/llvm/test/CodeGen/X86/load-local-v3i129.ll
@@ -4,8 +4,6 @@
 define void @_start() {
 ; CHECK-LABEL: _start:
 ; CHECK:       # %bb.0: # %Entry
-; CHECK-NEXT:    pushq %rax
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
 ; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
 ; CHECK-NEXT:    shrdq $2, %rcx, %rax
@@ -16,8 +14,6 @@ define void @_start() {
 ; CHECK-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    orq $-2, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movq $-1, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT:    popq %rax
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NEXT:    retq
 Entry:
   %y = alloca <3 x i129>, align 4

diff  --git a/llvm/test/CodeGen/X86/pr41619.ll b/llvm/test/CodeGen/X86/pr41619.ll
index 87c6292709037..c7678bdef35c7 100644
--- a/llvm/test/CodeGen/X86/pr41619.ll
+++ b/llvm/test/CodeGen/X86/pr41619.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-macosx10.14.0 -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,AVX
-; RUN: llc < %s -mtriple=x86_64-apple-macosx10.14.0 -mattr=avx512bw | FileCheck %s --check-prefixes=CHECK,AVX512
+; RUN: llc < %s -mtriple=x86_64-apple-macosx10.14.0 -mattr=avx2 | FileCheck %s --check-prefixes=CHECK
+; RUN: llc < %s -mtriple=x86_64-apple-macosx10.14.0 -mattr=avx512bw | FileCheck %s --check-prefixes=CHECK
 
 define void @foo(double %arg) {
 ; CHECK-LABEL: foo:
@@ -29,23 +29,11 @@ bb:
 
 ; This used to crash with mask registers on avx512bw targets.
 define i32 @bar(double %blah) nounwind {
-; AVX-LABEL: bar:
-; AVX:       ## %bb.0:
-; AVX-NEXT:    pushq %rbp
-; AVX-NEXT:    movq %rsp, %rbp
-; AVX-NEXT:    andq $-32, %rsp
-; AVX-NEXT:    subq $32, %rsp
-; AVX-NEXT:    vmovq %xmm0, %rax
-; AVX-NEXT:    ## kill: def $eax killed $eax killed $rax
-; AVX-NEXT:    movq %rbp, %rsp
-; AVX-NEXT:    popq %rbp
-; AVX-NEXT:    retq
-;
-; AVX512-LABEL: bar:
-; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vmovq %xmm0, %rax
-; AVX512-NEXT:    ## kill: def $eax killed $eax killed $rax
-; AVX512-NEXT:    retq
+; CHECK-LABEL: bar:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vmovq %xmm0, %rax
+; CHECK-NEXT:    ## kill: def $eax killed $eax killed $rax
+; CHECK-NEXT:    retq
   %z = bitcast double %blah to i64
   %y = trunc i64 %z to i32
   %a = bitcast i32 %y to <32 x i1>

diff  --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll
index 3ae5f09a146fe..97ba377a1ab15 100644
--- a/llvm/test/CodeGen/X86/vector-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-sext.ll
@@ -1382,13 +1382,23 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
 ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: load_sext_2i1_to_2i64:
-; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    kmovw (%rdi), %k1
-; AVX512-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: load_sext_2i1_to_2i64:
+; AVX512F:       # %bb.0: # %entry
+; AVX512F-NEXT:    movzbl (%rdi), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: load_sext_2i1_to_2i64:
+; AVX512BW:       # %bb.0: # %entry
+; AVX512BW-NEXT:    movzbl (%rdi), %eax
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
 ;
 ; X86-SSE2-LABEL: load_sext_2i1_to_2i64:
 ; X86-SSE2:       # %bb.0: # %entry
@@ -1619,13 +1629,23 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
 ; AVX2-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: load_sext_4i1_to_4i32:
-; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    kmovw (%rdi), %k1
-; AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: load_sext_4i1_to_4i32:
+; AVX512F:       # %bb.0: # %entry
+; AVX512F-NEXT:    movzbl (%rdi), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: load_sext_4i1_to_4i32:
+; AVX512BW:       # %bb.0: # %entry
+; AVX512BW-NEXT:    movzbl (%rdi), %eax
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
 ;
 ; X86-SSE2-LABEL: load_sext_4i1_to_4i32:
 ; X86-SSE2:       # %bb.0: # %entry
@@ -1882,12 +1902,21 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: load_sext_4i1_to_4i64:
-; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    kmovw (%rdi), %k1
-; AVX512-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: load_sext_4i1_to_4i64:
+; AVX512F:       # %bb.0: # %entry
+; AVX512F-NEXT:    movzbl (%rdi), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: load_sext_4i1_to_4i64:
+; AVX512BW:       # %bb.0: # %entry
+; AVX512BW-NEXT:    movzbl (%rdi), %eax
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
 ;
 ; X86-SSE2-LABEL: load_sext_4i1_to_4i64:
 ; X86-SSE2:       # %bb.0: # %entry
@@ -2105,7 +2134,8 @@ define <2 x i64> @load_sext_4i8_to_4i64_extract(<4 x i8> *%ptr) {
 define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
 ; SSE-LABEL: load_sext_8i1_to_8i16:
 ; SSE:       # %bb.0: # %entry
-; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movzbl (%rdi), %eax
+; SSE-NEXT:    movd %eax, %xmm0
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
@@ -2115,7 +2145,8 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
 ;
 ; AVX1-LABEL: load_sext_8i1_to_8i16:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    movzbl (%rdi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm0
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
@@ -2133,7 +2164,8 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
 ;
 ; AVX512F-LABEL: load_sext_8i1_to_8i16:
 ; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    kmovw (%rdi), %k1
+; AVX512F-NEXT:    movzbl (%rdi), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
 ; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
@@ -2142,7 +2174,8 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
 ;
 ; AVX512BW-LABEL: load_sext_8i1_to_8i16:
 ; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    kmovw (%rdi), %k0
+; AVX512BW-NEXT:    movzbl (%rdi), %eax
+; AVX512BW-NEXT:    kmovd %eax, %k0
 ; AVX512BW-NEXT:    vpmovm2w %k0, %zmm0
 ; AVX512BW-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
 ; AVX512BW-NEXT:    vzeroupper
@@ -2151,7 +2184,8 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
 ; X86-SSE-LABEL: load_sext_8i1_to_8i16:
 ; X86-SSE:       # %bb.0: # %entry
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movzbl (%eax), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
 ; X86-SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X86-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
@@ -2316,7 +2350,8 @@ entry:
 define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
 ; SSE-LABEL: load_sext_8i1_to_8i32:
 ; SSE:       # %bb.0: # %entry
-; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movzbl (%rdi), %eax
+; SSE-NEXT:    movd %eax, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8]
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
@@ -2329,8 +2364,9 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
 ;
 ; AVX1-LABEL: load_sext_8i1_to_8i32:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT:    movzbl (%rdi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
@@ -2347,17 +2383,27 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
 ; AVX2-NEXT:    vpcmpeqd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: load_sext_8i1_to_8i32:
-; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    kmovw (%rdi), %k1
-; AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: load_sext_8i1_to_8i32:
+; AVX512F:       # %bb.0: # %entry
+; AVX512F-NEXT:    movzbl (%rdi), %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: load_sext_8i1_to_8i32:
+; AVX512BW:       # %bb.0: # %entry
+; AVX512BW-NEXT:    movzbl (%rdi), %eax
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT:    retq
 ;
 ; X86-SSE-LABEL: load_sext_8i1_to_8i32:
 ; X86-SSE:       # %bb.0: # %entry
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movzbl (%eax), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
 ; X86-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8]
 ; X86-SSE-NEXT:    movdqa %xmm1, %xmm0
@@ -2443,7 +2489,8 @@ entry:
 define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
 ; SSE2-LABEL: load_sext_16i1_to_16i8:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT:    movzwl (%rdi), %eax
+; SSE2-NEXT:    movd %eax, %xmm0
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -2454,7 +2501,8 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
 ;
 ; SSSE3-LABEL: load_sext_16i1_to_16i8:
 ; SSSE3:       # %bb.0: # %entry
-; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    movzwl (%rdi), %eax
+; SSSE3-NEXT:    movd %eax, %xmm0
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
 ; SSSE3-NEXT:    pand %xmm1, %xmm0
@@ -2463,7 +2511,8 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
 ;
 ; SSE41-LABEL: load_sext_16i1_to_16i8:
 ; SSE41:       # %bb.0: # %entry
-; SSE41-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE41-NEXT:    movzwl (%rdi), %eax
+; SSE41-NEXT:    movd %eax, %xmm0
 ; SSE41-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
 ; SSE41-NEXT:    pand %xmm1, %xmm0
@@ -2472,7 +2521,8 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
 ;
 ; AVX1-LABEL: load_sext_16i1_to_16i8:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    movzwl (%rdi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm0
 ; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
 ; AVX1-NEXT:    vmovddup {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745]
 ; AVX1-NEXT:    # xmm1 = mem[0,0]
@@ -2482,7 +2532,8 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
 ;
 ; AVX2-LABEL: load_sext_16i1_to_16i8:
 ; AVX2:       # %bb.0: # %entry
-; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    movzwl (%rdi), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
 ; AVX2-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745]
 ; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -2508,7 +2559,8 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
 ; X86-SSE2-LABEL: load_sext_16i1_to_16i8:
 ; X86-SSE2:       # %bb.0: # %entry
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT:    movzwl (%eax), %eax
+; X86-SSE2-NEXT:    movd %eax, %xmm0
 ; X86-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; X86-SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -2520,7 +2572,8 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
 ; X86-SSE41-LABEL: load_sext_16i1_to_16i8:
 ; X86-SSE41:       # %bb.0: # %entry
 ; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE41-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE41-NEXT:    movzwl (%eax), %eax
+; X86-SSE41-NEXT:    movd %eax, %xmm0
 ; X86-SSE41-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
 ; X86-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
 ; X86-SSE41-NEXT:    pand %xmm1, %xmm0
@@ -2535,7 +2588,8 @@ entry:
 define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
 ; SSE-LABEL: load_sext_16i1_to_16i16:
 ; SSE:       # %bb.0: # %entry
-; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movzwl (%rdi), %eax
+; SSE-NEXT:    movd %eax, %xmm0
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
@@ -2549,7 +2603,8 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
 ;
 ; AVX1-LABEL: load_sext_16i1_to_16i16:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    movzwl (%rdi), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm0
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -2585,7 +2640,8 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
 ; X86-SSE-LABEL: load_sext_16i1_to_16i16:
 ; X86-SSE:       # %bb.0: # %entry
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movzwl (%eax), %eax
+; X86-SSE-NEXT:    movd %eax, %xmm0
 ; X86-SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
 ; X86-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]

diff  --git a/llvm/test/Transforms/InstCombine/abs-intrinsic.ll b/llvm/test/Transforms/InstCombine/abs-intrinsic.ll
index 56ba17fe07385..058289d8e6317 100644
--- a/llvm/test/Transforms/InstCombine/abs-intrinsic.ll
+++ b/llvm/test/Transforms/InstCombine/abs-intrinsic.ll
@@ -402,7 +402,7 @@ define i32 @srem_by_2_int_min_is_poison(i32 %x) {
 define <3 x i82> @srem_by_2(<3 x i82> %x, <3 x i82>* %p) {
 ; CHECK-LABEL: @srem_by_2(
 ; CHECK-NEXT:    [[S:%.*]] = srem <3 x i82> [[X:%.*]], <i82 2, i82 2, i82 2>
-; CHECK-NEXT:    store <3 x i82> [[S]], <3 x i82>* [[P:%.*]], align 64
+; CHECK-NEXT:    store <3 x i82> [[S]], <3 x i82>* [[P:%.*]], align 32
 ; CHECK-NEXT:    [[R:%.*]] = and <3 x i82> [[X]], <i82 1, i82 1, i82 1>
 ; CHECK-NEXT:    ret <3 x i82> [[R]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/icmp-vec.ll b/llvm/test/Transforms/InstCombine/icmp-vec.ll
index ecd14e594d736..3a94c3888ca24 100644
--- a/llvm/test/Transforms/InstCombine/icmp-vec.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-vec.ll
@@ -506,7 +506,7 @@ define i1 @not_cast_ne-1(<3 x i1> %x) {
 define i1 @not_cast_ne-1_uses(<3 x i2> %x, <3 x i2>* %p) {
 ; CHECK-LABEL: @not_cast_ne-1_uses(
 ; CHECK-NEXT:    [[NOT:%.*]] = xor <3 x i2> [[X:%.*]], <i2 -1, i2 -1, i2 -1>
-; CHECK-NEXT:    store <3 x i2> [[NOT]], <3 x i2>* [[P:%.*]], align 4
+; CHECK-NEXT:    store <3 x i2> [[NOT]], <3 x i2>* [[P:%.*]], align 1
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <3 x i2> [[X]] to i6
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i6 [[TMP1]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
@@ -553,7 +553,7 @@ define i1 @eq_cast_eq1(<2 x i4> %x, <2 x i4> %y) {
 define i1 @eq_cast_eq-1_use1(<2 x i4> %x, <2 x i4> %y, <2 x i1>* %p) {
 ; CHECK-LABEL: @eq_cast_eq-1_use1(
 ; CHECK-NEXT:    [[IC:%.*]] = icmp sgt <2 x i4> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    store <2 x i1> [[IC]], <2 x i1>* [[P:%.*]], align 2
+; CHECK-NEXT:    store <2 x i1> [[IC]], <2 x i1>* [[P:%.*]], align 1
 ; CHECK-NEXT:    [[B:%.*]] = bitcast <2 x i1> [[IC]] to i2
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i2 [[B]], -1
 ; CHECK-NEXT:    ret i1 [[R]]

diff  --git a/llvm/test/Transforms/InstCombine/select-min-max.ll b/llvm/test/Transforms/InstCombine/select-min-max.ll
index ae6ee31705918..79dbe40f20c6a 100644
--- a/llvm/test/Transforms/InstCombine/select-min-max.ll
+++ b/llvm/test/Transforms/InstCombine/select-min-max.ll
@@ -48,7 +48,7 @@ define i5 @umin_umin_common_op_10(i1 %cond, i5 %x, i5 %y, i5 %z, i5* %p) {
 define <3 x i5> @umax_umax_common_op_11(i1 %cond, <3 x i5> %x, <3 x i5> %y, <3 x i5> %z, <3 x i5>* %p) {
 ; CHECK-LABEL: @umax_umax_common_op_11(
 ; CHECK-NEXT:    [[M2:%.*]] = call <3 x i5> @llvm.umax.v3i5(<3 x i5> [[Y:%.*]], <3 x i5> [[Z:%.*]])
-; CHECK-NEXT:    store <3 x i5> [[M2]], <3 x i5>* [[P:%.*]], align 4
+; CHECK-NEXT:    store <3 x i5> [[M2]], <3 x i5>* [[P:%.*]], align 2
 ; CHECK-NEXT:    [[MINMAXOP:%.*]] = select i1 [[COND:%.*]], <3 x i5> [[X:%.*]], <3 x i5> [[Y]]
 ; CHECK-NEXT:    [[SEL:%.*]] = call <3 x i5> @llvm.umax.v3i5(<3 x i5> [[MINMAXOP]], <3 x i5> [[Z]])
 ; CHECK-NEXT:    ret <3 x i5> [[SEL]]

diff  --git a/llvm/test/Transforms/InstCombine/shufflevec-bitcast.ll b/llvm/test/Transforms/InstCombine/shufflevec-bitcast.ll
index ba57de358aa46..5f6e1f18695f8 100644
--- a/llvm/test/Transforms/InstCombine/shufflevec-bitcast.ll
+++ b/llvm/test/Transforms/InstCombine/shufflevec-bitcast.ll
@@ -197,7 +197,7 @@ define <2 x i4> @shuf_bitcast_insert_use2(<2 x i8> %v, i8 %x, <4 x i4>* %p) {
 ; CHECK-LABEL: @shuf_bitcast_insert_use2(
 ; CHECK-NEXT:    [[I:%.*]] = insertelement <2 x i8> [[V:%.*]], i8 [[X:%.*]], i32 0
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x i4>* [[P:%.*]] to <2 x i8>*
-; CHECK-NEXT:    store <2 x i8> [[I]], <2 x i8>* [[TMP1]], align 4
+; CHECK-NEXT:    store <2 x i8> [[I]], <2 x i8>* [[TMP1]], align 2
 ; CHECK-NEXT:    [[R:%.*]] = bitcast i8 [[X]] to <2 x i4>
 ; CHECK-NEXT:    ret <2 x i4> [[R]]
 ;

diff  --git a/llvm/test/Transforms/SROA/vector-promotion-
diff erent-size.ll b/llvm/test/Transforms/SROA/vector-promotion-
diff erent-size.ll
index 56e1f1f2160a7..d5c69410330c8 100644
--- a/llvm/test/Transforms/SROA/vector-promotion-
diff erent-size.ll
+++ b/llvm/test/Transforms/SROA/vector-promotion-
diff erent-size.ll
@@ -3,7 +3,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
 
 define <4 x i1> @vector_bitcast() {
   ; CHECK-LABEL: @vector_bitcast
-  ; CHECK: alloca i1
+  ; CHECK: alloca <3 x i1>
 
     %a = alloca <3 x i1>
     store <3 x i1> <i1 1,i1 0,i1 1>, <3 x i1>* %a

diff  --git a/llvm/test/Transforms/VectorCombine/load-insert-store.ll b/llvm/test/Transforms/VectorCombine/load-insert-store.ll
index 6218ecd10c7dc..39893b4298bda 100644
--- a/llvm/test/Transforms/VectorCombine/load-insert-store.ll
+++ b/llvm/test/Transforms/VectorCombine/load-insert-store.ll
@@ -64,7 +64,7 @@ entry:
 define void @insert_store_v9i4(<9 x i4>* %q, i4 zeroext %s) {
 ; CHECK-LABEL: @insert_store_v9i4(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load <9 x i4>, <9 x i4>* [[Q:%.*]], align 16
+; CHECK-NEXT:    [[TMP0:%.*]] = load <9 x i4>, <9 x i4>* [[Q:%.*]], align 8
 ; CHECK-NEXT:    [[VECINS:%.*]] = insertelement <9 x i4> [[TMP0]], i4 [[S:%.*]], i32 3
 ; CHECK-NEXT:    store <9 x i4> [[VECINS]], <9 x i4>* [[Q]], align 1
 ; CHECK-NEXT:    ret void


        


More information about the cfe-commits mailing list