[clang] ab2ed8b - [SVE] Regenerate sve vector bits tests. NFC

David Green via cfe-commits cfe-commits at lists.llvm.org
Fri Sep 11 10:52:15 PDT 2020


Author: David Green
Date: 2020-09-11T18:51:57+01:00
New Revision: ab2ed8bce9e924a2fc734ca4369419c18d124043

URL: https://github.com/llvm/llvm-project/commit/ab2ed8bce9e924a2fc734ca4369419c18d124043
DIFF: https://github.com/llvm/llvm-project/commit/ab2ed8bce9e924a2fc734ca4369419c18d124043.diff

LOG: [SVE] Regenerate sve vector bits tests. NFC

Added: 
    

Modified: 
    clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c
    clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
    clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
    clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c
index cab424c3dbe1..84559e9edb9a 100644
--- a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c
+++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c
@@ -31,21 +31,21 @@ DEFINE_STRUCT(bool)
 // CHECK-128-NEXT:  entry:
 // CHECK-128-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-128-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64>* [[ARRAYIDX]] to <vscale x 2 x i64>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP0]], align 16, !tbaa !2
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP0]], align 16, [[TBAA2:!tbaa !.*]]
 // CHECK-128-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 // CHECK-256-LABEL: @read_int64(
 // CHECK-256-NEXT:  entry:
 // CHECK-256-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-256-NEXT:    [[TMP0:%.*]] = bitcast <4 x i64>* [[ARRAYIDX]] to <vscale x 2 x i64>*
-// CHECK-256-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP0]], align 16, !tbaa !2
+// CHECK-256-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP0]], align 16, [[TBAA2:!tbaa !.*]]
 // CHECK-256-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 // CHECK-512-LABEL: @read_int64(
 // CHECK-512-NEXT:  entry:
 // CHECK-512-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-512-NEXT:    [[TMP0:%.*]] = bitcast <8 x i64>* [[ARRAYIDX]] to <vscale x 2 x i64>*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP0]], align 16, !tbaa !2
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP0]], align 16, [[TBAA2:!tbaa !.*]]
 // CHECK-512-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
 svint64_t read_int64(struct struct_int64 *s) {
@@ -55,31 +55,31 @@ svint64_t read_int64(struct struct_int64 *s) {
 // CHECK-128-LABEL: @write_int64(
 // CHECK-128-NEXT:  entry:
 // CHECK-128-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
-// CHECK-128-NEXT:    store <vscale x 2 x i64> [[X:%.*]], <vscale x 2 x i64>* [[X_ADDR]], align 16, !tbaa !5
+// CHECK-128-NEXT:    store <vscale x 2 x i64> [[X:%.*]], <vscale x 2 x i64>* [[X_ADDR]], align 16, [[TBAA5:!tbaa !.*]]
 // CHECK-128-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64>* [[X_ADDR]] to <2 x i64>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 16, !tbaa !2
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-128-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
-// CHECK-128-NEXT:    store <2 x i64> [[TMP1]], <2 x i64>* [[ARRAYIDX]], align 16, !tbaa !2
+// CHECK-128-NEXT:    store <2 x i64> [[TMP1]], <2 x i64>* [[ARRAYIDX]], align 16, [[TBAA2]]
 // CHECK-128-NEXT:    ret void
 //
 // CHECK-256-LABEL: @write_int64(
 // CHECK-256-NEXT:  entry:
 // CHECK-256-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
-// CHECK-256-NEXT:    store <vscale x 2 x i64> [[X:%.*]], <vscale x 2 x i64>* [[X_ADDR]], align 16, !tbaa !5
+// CHECK-256-NEXT:    store <vscale x 2 x i64> [[X:%.*]], <vscale x 2 x i64>* [[X_ADDR]], align 16, [[TBAA5:!tbaa !.*]]
 // CHECK-256-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64>* [[X_ADDR]] to <4 x i64>*
-// CHECK-256-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* [[TMP0]], align 16, !tbaa !2
+// CHECK-256-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-256-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
-// CHECK-256-NEXT:    store <4 x i64> [[TMP1]], <4 x i64>* [[ARRAYIDX]], align 16, !tbaa !2
+// CHECK-256-NEXT:    store <4 x i64> [[TMP1]], <4 x i64>* [[ARRAYIDX]], align 16, [[TBAA2]]
 // CHECK-256-NEXT:    ret void
 //
 // CHECK-512-LABEL: @write_int64(
 // CHECK-512-NEXT:  entry:
 // CHECK-512-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
-// CHECK-512-NEXT:    store <vscale x 2 x i64> [[X:%.*]], <vscale x 2 x i64>* [[X_ADDR]], align 16, !tbaa !5
+// CHECK-512-NEXT:    store <vscale x 2 x i64> [[X:%.*]], <vscale x 2 x i64>* [[X_ADDR]], align 16, [[TBAA5:!tbaa !.*]]
 // CHECK-512-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64>* [[X_ADDR]] to <8 x i64>*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[TMP0]], align 16, !tbaa !2
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-512-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
-// CHECK-512-NEXT:    store <8 x i64> [[TMP1]], <8 x i64>* [[ARRAYIDX]], align 16, !tbaa !2
+// CHECK-512-NEXT:    store <8 x i64> [[TMP1]], <8 x i64>* [[ARRAYIDX]], align 16, [[TBAA2]]
 // CHECK-512-NEXT:    ret void
 //
 void write_int64(struct struct_int64 *s, svint64_t x) {
@@ -94,21 +94,21 @@ void write_int64(struct struct_int64 *s, svint64_t x) {
 // CHECK-128-NEXT:  entry:
 // CHECK-128-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-128-NEXT:    [[TMP0:%.*]] = bitcast <2 x double>* [[ARRAYIDX]] to <vscale x 2 x double>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP0]], align 16, !tbaa !2
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-128-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 // CHECK-256-LABEL: @read_float64(
 // CHECK-256-NEXT:  entry:
 // CHECK-256-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-256-NEXT:    [[TMP0:%.*]] = bitcast <4 x double>* [[ARRAYIDX]] to <vscale x 2 x double>*
-// CHECK-256-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP0]], align 16, !tbaa !2
+// CHECK-256-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-256-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 // CHECK-512-LABEL: @read_float64(
 // CHECK-512-NEXT:  entry:
 // CHECK-512-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-512-NEXT:    [[TMP0:%.*]] = bitcast <8 x double>* [[ARRAYIDX]] to <vscale x 2 x double>*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP0]], align 16, !tbaa !2
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-512-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 svfloat64_t read_float64(struct struct_float64 *s) {
@@ -118,31 +118,31 @@ svfloat64_t read_float64(struct struct_float64 *s) {
 // CHECK-128-LABEL: @write_float64(
 // CHECK-128-NEXT:  entry:
 // CHECK-128-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 2 x double>, align 16
-// CHECK-128-NEXT:    store <vscale x 2 x double> [[X:%.*]], <vscale x 2 x double>* [[X_ADDR]], align 16, !tbaa !7
+// CHECK-128-NEXT:    store <vscale x 2 x double> [[X:%.*]], <vscale x 2 x double>* [[X_ADDR]], align 16, [[TBAA7:!tbaa !.*]]
 // CHECK-128-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double>* [[X_ADDR]] to <2 x double>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 16, !tbaa !2
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-128-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
-// CHECK-128-NEXT:    store <2 x double> [[TMP1]], <2 x double>* [[ARRAYIDX]], align 16, !tbaa !2
+// CHECK-128-NEXT:    store <2 x double> [[TMP1]], <2 x double>* [[ARRAYIDX]], align 16, [[TBAA2]]
 // CHECK-128-NEXT:    ret void
 //
 // CHECK-256-LABEL: @write_float64(
 // CHECK-256-NEXT:  entry:
 // CHECK-256-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 2 x double>, align 16
-// CHECK-256-NEXT:    store <vscale x 2 x double> [[X:%.*]], <vscale x 2 x double>* [[X_ADDR]], align 16, !tbaa !7
+// CHECK-256-NEXT:    store <vscale x 2 x double> [[X:%.*]], <vscale x 2 x double>* [[X_ADDR]], align 16, [[TBAA7:!tbaa !.*]]
 // CHECK-256-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double>* [[X_ADDR]] to <4 x double>*
-// CHECK-256-NEXT:    [[TMP1:%.*]] = load <4 x double>, <4 x double>* [[TMP0]], align 16, !tbaa !2
+// CHECK-256-NEXT:    [[TMP1:%.*]] = load <4 x double>, <4 x double>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-256-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
-// CHECK-256-NEXT:    store <4 x double> [[TMP1]], <4 x double>* [[ARRAYIDX]], align 16, !tbaa !2
+// CHECK-256-NEXT:    store <4 x double> [[TMP1]], <4 x double>* [[ARRAYIDX]], align 16, [[TBAA2]]
 // CHECK-256-NEXT:    ret void
 //
 // CHECK-512-LABEL: @write_float64(
 // CHECK-512-NEXT:  entry:
 // CHECK-512-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 2 x double>, align 16
-// CHECK-512-NEXT:    store <vscale x 2 x double> [[X:%.*]], <vscale x 2 x double>* [[X_ADDR]], align 16, !tbaa !7
+// CHECK-512-NEXT:    store <vscale x 2 x double> [[X:%.*]], <vscale x 2 x double>* [[X_ADDR]], align 16, [[TBAA7:!tbaa !.*]]
 // CHECK-512-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double>* [[X_ADDR]] to <8 x double>*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[TMP0]], align 16, !tbaa !2
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-512-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
-// CHECK-512-NEXT:    store <8 x double> [[TMP1]], <8 x double>* [[ARRAYIDX]], align 16, !tbaa !2
+// CHECK-512-NEXT:    store <8 x double> [[TMP1]], <8 x double>* [[ARRAYIDX]], align 16, [[TBAA2]]
 // CHECK-512-NEXT:    ret void
 //
 void write_float64(struct struct_float64 *s, svfloat64_t x) {
@@ -157,21 +157,21 @@ void write_float64(struct struct_float64 *s, svfloat64_t x) {
 // CHECK-128-NEXT:  entry:
 // CHECK-128-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-128-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat>* [[ARRAYIDX]] to <vscale x 8 x bfloat>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* [[TMP0]], align 16, !tbaa !2
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-128-NEXT:    ret <vscale x 8 x bfloat> [[TMP1]]
 //
 // CHECK-256-LABEL: @read_bfloat16(
 // CHECK-256-NEXT:  entry:
 // CHECK-256-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-256-NEXT:    [[TMP0:%.*]] = bitcast <16 x bfloat>* [[ARRAYIDX]] to <vscale x 8 x bfloat>*
-// CHECK-256-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* [[TMP0]], align 16, !tbaa !2
+// CHECK-256-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-256-NEXT:    ret <vscale x 8 x bfloat> [[TMP1]]
 //
 // CHECK-512-LABEL: @read_bfloat16(
 // CHECK-512-NEXT:  entry:
 // CHECK-512-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-512-NEXT:    [[TMP0:%.*]] = bitcast <32 x bfloat>* [[ARRAYIDX]] to <vscale x 8 x bfloat>*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* [[TMP0]], align 16, !tbaa !2
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-512-NEXT:    ret <vscale x 8 x bfloat> [[TMP1]]
 //
 svbfloat16_t read_bfloat16(struct struct_bfloat16 *s) {
@@ -181,31 +181,31 @@ svbfloat16_t read_bfloat16(struct struct_bfloat16 *s) {
 // CHECK-128-LABEL: @write_bfloat16(
 // CHECK-128-NEXT:  entry:
 // CHECK-128-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 8 x bfloat>, align 16
-// CHECK-128-NEXT:    store <vscale x 8 x bfloat> [[X:%.*]], <vscale x 8 x bfloat>* [[X_ADDR]], align 16, !tbaa !9
+// CHECK-128-NEXT:    store <vscale x 8 x bfloat> [[X:%.*]], <vscale x 8 x bfloat>* [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]]
 // CHECK-128-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x bfloat>* [[X_ADDR]] to <8 x bfloat>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[TMP0]], align 16, !tbaa !2
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-128-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
-// CHECK-128-NEXT:    store <8 x bfloat> [[TMP1]], <8 x bfloat>* [[ARRAYIDX]], align 16, !tbaa !2
+// CHECK-128-NEXT:    store <8 x bfloat> [[TMP1]], <8 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA2]]
 // CHECK-128-NEXT:    ret void
 //
 // CHECK-256-LABEL: @write_bfloat16(
 // CHECK-256-NEXT:  entry:
 // CHECK-256-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 8 x bfloat>, align 16
-// CHECK-256-NEXT:    store <vscale x 8 x bfloat> [[X:%.*]], <vscale x 8 x bfloat>* [[X_ADDR]], align 16, !tbaa !9
+// CHECK-256-NEXT:    store <vscale x 8 x bfloat> [[X:%.*]], <vscale x 8 x bfloat>* [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]]
 // CHECK-256-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x bfloat>* [[X_ADDR]] to <16 x bfloat>*
-// CHECK-256-NEXT:    [[TMP1:%.*]] = load <16 x bfloat>, <16 x bfloat>* [[TMP0]], align 16, !tbaa !2
+// CHECK-256-NEXT:    [[TMP1:%.*]] = load <16 x bfloat>, <16 x bfloat>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-256-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
-// CHECK-256-NEXT:    store <16 x bfloat> [[TMP1]], <16 x bfloat>* [[ARRAYIDX]], align 16, !tbaa !2
+// CHECK-256-NEXT:    store <16 x bfloat> [[TMP1]], <16 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA2]]
 // CHECK-256-NEXT:    ret void
 //
 // CHECK-512-LABEL: @write_bfloat16(
 // CHECK-512-NEXT:  entry:
 // CHECK-512-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 8 x bfloat>, align 16
-// CHECK-512-NEXT:    store <vscale x 8 x bfloat> [[X:%.*]], <vscale x 8 x bfloat>* [[X_ADDR]], align 16, !tbaa !9
+// CHECK-512-NEXT:    store <vscale x 8 x bfloat> [[X:%.*]], <vscale x 8 x bfloat>* [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]]
 // CHECK-512-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x bfloat>* [[X_ADDR]] to <32 x bfloat>*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[TMP0]], align 16, !tbaa !2
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-512-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
-// CHECK-512-NEXT:    store <32 x bfloat> [[TMP1]], <32 x bfloat>* [[ARRAYIDX]], align 16, !tbaa !2
+// CHECK-512-NEXT:    store <32 x bfloat> [[TMP1]], <32 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA2]]
 // CHECK-512-NEXT:    ret void
 //
 void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) {
@@ -220,21 +220,21 @@ void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) {
 // CHECK-128-NEXT:  entry:
 // CHECK-128-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-128-NEXT:    [[TMP0:%.*]] = bitcast <2 x i8>* [[ARRAYIDX]] to <vscale x 16 x i1>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP0]], align 2, !tbaa !2
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP0]], align 2, [[TBAA2]]
 // CHECK-128-NEXT:    ret <vscale x 16 x i1> [[TMP1]]
 //
 // CHECK-256-LABEL: @read_bool(
 // CHECK-256-NEXT:  entry:
 // CHECK-256-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-256-NEXT:    [[TMP0:%.*]] = bitcast <4 x i8>* [[ARRAYIDX]] to <vscale x 16 x i1>*
-// CHECK-256-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP0]], align 2, !tbaa !2
+// CHECK-256-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP0]], align 2, [[TBAA2]]
 // CHECK-256-NEXT:    ret <vscale x 16 x i1> [[TMP1]]
 //
 // CHECK-512-LABEL: @read_bool(
 // CHECK-512-NEXT:  entry:
 // CHECK-512-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
 // CHECK-512-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[ARRAYIDX]] to <vscale x 16 x i1>*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP0]], align 2, !tbaa !2
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP0]], align 2, [[TBAA2]]
 // CHECK-512-NEXT:    ret <vscale x 16 x i1> [[TMP1]]
 //
 svbool_t read_bool(struct struct_bool *s) {
@@ -244,33 +244,33 @@ svbool_t read_bool(struct struct_bool *s) {
 // CHECK-128-LABEL: @write_bool(
 // CHECK-128-NEXT:  entry:
 // CHECK-128-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-128-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[X_ADDR]], align 16, !tbaa !11
+// CHECK-128-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[X_ADDR]], align 16, [[TBAA11:!tbaa !.*]]
 // CHECK-128-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i1>* [[X_ADDR]] to <2 x i8>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 16, !tbaa !2
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-128-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
-// CHECK-128-NEXT:    store <2 x i8> [[TMP1]], <2 x i8>* [[ARRAYIDX]], align 2, !tbaa !2
+// CHECK-128-NEXT:    store <2 x i8> [[TMP1]], <2 x i8>* [[ARRAYIDX]], align 2, [[TBAA2]]
 // CHECK-128-NEXT:    ret void
 //
 // CHECK-256-LABEL: @write_bool(
 // CHECK-256-NEXT:  entry:
 // CHECK-256-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-256-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[X_ADDR]], align 16, !tbaa !11
+// CHECK-256-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[X_ADDR]], align 16, [[TBAA11:!tbaa !.*]]
 // CHECK-256-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i1>* [[X_ADDR]] to i32*
-// CHECK-256-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 16, !tbaa !2
+// CHECK-256-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-256-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1
 // CHECK-256-NEXT:    [[TMP2:%.*]] = bitcast [3 x <4 x i8>]* [[Y]] to i32*
-// CHECK-256-NEXT:    store i32 [[TMP1]], i32* [[TMP2]], align 2, !tbaa !2
+// CHECK-256-NEXT:    store i32 [[TMP1]], i32* [[TMP2]], align 2, [[TBAA2]]
 // CHECK-256-NEXT:    ret void
 //
 // CHECK-512-LABEL: @write_bool(
 // CHECK-512-NEXT:  entry:
 // CHECK-512-NEXT:    [[X_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-512-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[X_ADDR]], align 16, !tbaa !11
+// CHECK-512-NEXT:    store <vscale x 16 x i1> [[X:%.*]], <vscale x 16 x i1>* [[X_ADDR]], align 16, [[TBAA11:!tbaa !.*]]
 // CHECK-512-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i1>* [[X_ADDR]] to i64*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, !tbaa !2
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-512-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1
 // CHECK-512-NEXT:    [[TMP2:%.*]] = bitcast [3 x <8 x i8>]* [[Y]] to i64*
-// CHECK-512-NEXT:    store i64 [[TMP1]], i64* [[TMP2]], align 2, !tbaa !2
+// CHECK-512-NEXT:    store i64 [[TMP1]], i64* [[TMP2]], align 2, [[TBAA2]]
 // CHECK-512-NEXT:    ret void
 //
 void write_bool(struct struct_bool *s, svbool_t x) {

diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
index 490ec92dfdeb..1c08e46681fb 100644
--- a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
+++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
@@ -30,13 +30,13 @@ svint32_t sizeless_callee(svint32_t x) {
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to <vscale x 4 x i32>*
 // CHECK-NEXT:    store <vscale x 4 x i32> [[X_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
-// CHECK-NEXT:    [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, !tbaa !2
-// CHECK-NEXT:    store <16 x i32> [[X1]], <16 x i32>* [[X_ADDR]], align 16, !tbaa !2
+// CHECK-NEXT:    [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA2:!tbaa !.*]]
+// CHECK-NEXT:    store <16 x i32> [[X1]], <16 x i32>* [[X_ADDR]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i32>* [[X_ADDR]] to <vscale x 4 x i32>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], align 16, !tbaa !2
-// CHECK-NEXT:    store <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]], align 16, !tbaa !5
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA5:!tbaa !.*]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]] to <16 x i32>*
-// CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
 // CHECK-NEXT:    store <16 x i32> [[TMP3]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
@@ -52,7 +52,7 @@ fixed_int32_t fixed_caller(fixed_int32_t x) {
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to <vscale x 4 x i32>*
 // CHECK-NEXT:    store <vscale x 4 x i32> [[X_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
-// CHECK-NEXT:    [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, !tbaa !2
+// CHECK-NEXT:    [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
 // CHECK-NEXT:    store <16 x i32> [[X1]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
@@ -68,19 +68,19 @@ fixed_int32_t fixed_callee(fixed_int32_t x) {
 // CHECK-NEXT:    [[COERCE_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
 // CHECK-NEXT:    [[COERCE1:%.*]] = alloca <16 x i32>, align 16
 // CHECK-NEXT:    [[SAVED_CALL_RVALUE:%.*]] = alloca <16 x i32>, align 64
-// CHECK-NEXT:    store <vscale x 4 x i32> [[X:%.*]], <vscale x 4 x i32>* [[X_ADDR]], align 16, !tbaa !5
+// CHECK-NEXT:    store <vscale x 4 x i32> [[X:%.*]], <vscale x 4 x i32>* [[X_ADDR]], align 16, [[TBAA5]]
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32>* [[X_ADDR]] to <16 x i32>*
-// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[COERCE_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[COERCE_COERCE]] to <16 x i32>*
 // CHECK-NEXT:    store <16 x i32> [[TMP1]], <16 x i32>* [[COERCE_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[COERCE_COERCE]], align 16
 // CHECK-NEXT:    [[CALL:%.*]] = call <vscale x 4 x i32> @fixed_callee(<vscale x 4 x i32> [[TMP2]])
 // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i32>* [[COERCE1]] to <vscale x 4 x i32>*
 // CHECK-NEXT:    store <vscale x 4 x i32> [[CALL]], <vscale x 4 x i32>* [[TMP3]], align 16
-// CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, !tbaa !2
-// CHECK-NEXT:    store <16 x i32> [[TMP4]], <16 x i32>* [[SAVED_CALL_RVALUE]], align 64, !tbaa !2
+// CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <16 x i32> [[TMP4]], <16 x i32>* [[SAVED_CALL_RVALUE]], align 64, [[TBAA2]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <16 x i32>* [[SAVED_CALL_RVALUE]] to <vscale x 4 x i32>*
-// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[CASTFIXEDSVE]], align 64, !tbaa !2
+// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[CASTFIXEDSVE]], align 64, [[TBAA2]]
 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP5]]
 //
 svint32_t sizeless_caller(svint32_t x) {
@@ -101,21 +101,21 @@ svint32_t sizeless_caller(svint32_t x) {
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to <vscale x 4 x i32>*
 // CHECK-NEXT:    store <vscale x 4 x i32> [[OP1_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
-// CHECK-NEXT:    [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i32>* [[OP2]] to <vscale x 4 x i32>*
 // CHECK-NEXT:    store <vscale x 4 x i32> [[OP2_COERCE:%.*]], <vscale x 4 x i32>* [[TMP1]], align 16
-// CHECK-NEXT:    [[OP22:%.*]] = load <16 x i32>, <16 x i32>* [[OP2]], align 16, !tbaa !2
-// CHECK-NEXT:    store <16 x i32> [[OP11]], <16 x i32>* [[OP1_ADDR]], align 16, !tbaa !2
-// CHECK-NEXT:    store <16 x i32> [[OP22]], <16 x i32>* [[OP2_ADDR]], align 16, !tbaa !2
+// CHECK-NEXT:    [[OP22:%.*]] = load <16 x i32>, <16 x i32>* [[OP2]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <16 x i32> [[OP11]], <16 x i32>* [[OP1_ADDR]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <16 x i32> [[OP22]], <16 x i32>* [[OP2_ADDR]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i32>* [[OP1_ADDR]] to <vscale x 4 x i32>*
-// CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP2]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP2]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i32>* [[OP2_ADDR]] to <vscale x 4 x i32>*
-// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP4]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP4]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
 // CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP6]], <vscale x 4 x i32> [[TMP3]], <vscale x 4 x i32> [[TMP5]])
-// CHECK-NEXT:    store <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]], align 16, !tbaa !5
+// CHECK-NEXT:    store <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA5]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]] to <16 x i32>*
-// CHECK-NEXT:    [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
 // CHECK-NEXT:    store <16 x i32> [[TMP8]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP9:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
@@ -135,21 +135,21 @@ fixed_int32_t call_int32_ff(svbool_t pg, fixed_int32_t op1, fixed_int32_t op2) {
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x double>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to <vscale x 2 x double>*
 // CHECK-NEXT:    store <vscale x 2 x double> [[OP1_COERCE:%.*]], <vscale x 2 x double>* [[TMP0]], align 16
-// CHECK-NEXT:    [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x double>* [[OP2]] to <vscale x 2 x double>*
 // CHECK-NEXT:    store <vscale x 2 x double> [[OP2_COERCE:%.*]], <vscale x 2 x double>* [[TMP1]], align 16
-// CHECK-NEXT:    [[OP22:%.*]] = load <8 x double>, <8 x double>* [[OP2]], align 16, !tbaa !2
-// CHECK-NEXT:    store <8 x double> [[OP11]], <8 x double>* [[OP1_ADDR]], align 16, !tbaa !2
-// CHECK-NEXT:    store <8 x double> [[OP22]], <8 x double>* [[OP2_ADDR]], align 16, !tbaa !2
+// CHECK-NEXT:    [[OP22:%.*]] = load <8 x double>, <8 x double>* [[OP2]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <8 x double> [[OP11]], <8 x double>* [[OP1_ADDR]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <8 x double> [[OP22]], <8 x double>* [[OP2_ADDR]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x double>* [[OP1_ADDR]] to <vscale x 2 x double>*
-// CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP2]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP2]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x double>* [[OP2_ADDR]] to <vscale x 2 x double>*
-// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP4]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP4]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
 // CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP6]], <vscale x 2 x double> [[TMP3]], <vscale x 2 x double> [[TMP5]])
-// CHECK-NEXT:    store <vscale x 2 x double> [[TMP7]], <vscale x 2 x double>* [[SAVED_CALL_RVALUE]], align 16, !tbaa !7
+// CHECK-NEXT:    store <vscale x 2 x double> [[TMP7]], <vscale x 2 x double>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA7:!tbaa !.*]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 2 x double>* [[SAVED_CALL_RVALUE]] to <8 x double>*
-// CHECK-NEXT:    [[TMP8:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP8:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 2 x double>* [[RETVAL_COERCE]] to <8 x double>*
 // CHECK-NEXT:    store <8 x double> [[TMP8]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP9:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16
@@ -170,23 +170,23 @@ fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>*
 // CHECK-NEXT:    store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[OP1]] to i64*
-// CHECK-NEXT:    [[OP113:%.*]] = load i64, i64* [[TMP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[OP113:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i8>* [[OP2]] to <vscale x 16 x i1>*
 // CHECK-NEXT:    store <vscale x 16 x i1> [[OP2_COERCE:%.*]], <vscale x 16 x i1>* [[TMP2]], align 16
 // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i8>* [[OP2]] to i64*
-// CHECK-NEXT:    [[OP224:%.*]] = load i64, i64* [[TMP3]], align 16, !tbaa !2
+// CHECK-NEXT:    [[OP224:%.*]] = load i64, i64* [[TMP3]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to i64*
-// CHECK-NEXT:    store i64 [[OP113]], i64* [[TMP4]], align 16, !tbaa !2
+// CHECK-NEXT:    store i64 [[OP113]], i64* [[TMP4]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to i64*
-// CHECK-NEXT:    store i64 [[OP224]], i64* [[TMP5]], align 16, !tbaa !2
+// CHECK-NEXT:    store i64 [[OP224]], i64* [[TMP5]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP7:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP6]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP7:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP6]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP9:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP8]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP9:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP8]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP10:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP7]], <vscale x 16 x i1> [[TMP9]])
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP10]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, !tbaa !9
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP10]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9:!tbaa !.*]]
 // CHECK-NEXT:    [[TMP11:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to i64*
-// CHECK-NEXT:    [[TMP12:%.*]] = load i64, i64* [[TMP11]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP12:%.*]] = load i64, i64* [[TMP11]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP13:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to i64*
 // CHECK-NEXT:    store i64 [[TMP12]], i64* [[TMP13]], align 16
 // CHECK-NEXT:    [[TMP14:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
@@ -208,15 +208,15 @@ fixed_bool_t call_bool_ff(svbool_t pg, fixed_bool_t op1, fixed_bool_t op2) {
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to <vscale x 4 x i32>*
 // CHECK-NEXT:    store <vscale x 4 x i32> [[OP1_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
-// CHECK-NEXT:    [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, !tbaa !2
-// CHECK-NEXT:    store <16 x i32> [[OP11]], <16 x i32>* [[OP1_ADDR]], align 16, !tbaa !2
+// CHECK-NEXT:    [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <16 x i32> [[OP11]], <16 x i32>* [[OP1_ADDR]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i32>* [[OP1_ADDR]] to <vscale x 4 x i32>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
 // CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP3]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[OP2:%.*]])
-// CHECK-NEXT:    store <vscale x 4 x i32> [[TMP4]], <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]], align 16, !tbaa !5
+// CHECK-NEXT:    store <vscale x 4 x i32> [[TMP4]], <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA5]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]] to <16 x i32>*
-// CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
 // CHECK-NEXT:    store <16 x i32> [[TMP5]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP6:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
@@ -234,15 +234,15 @@ fixed_int32_t call_int32_fs(svbool_t pg, fixed_int32_t op1, svint32_t op2) {
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x double>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to <vscale x 2 x double>*
 // CHECK-NEXT:    store <vscale x 2 x double> [[OP1_COERCE:%.*]], <vscale x 2 x double>* [[TMP0]], align 16
-// CHECK-NEXT:    [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, !tbaa !2
-// CHECK-NEXT:    store <8 x double> [[OP11]], <8 x double>* [[OP1_ADDR]], align 16, !tbaa !2
+// CHECK-NEXT:    [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <8 x double> [[OP11]], <8 x double>* [[OP1_ADDR]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x double>* [[OP1_ADDR]] to <vscale x 2 x double>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
 // CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP3]], <vscale x 2 x double> [[TMP2]], <vscale x 2 x double> [[OP2:%.*]])
-// CHECK-NEXT:    store <vscale x 2 x double> [[TMP4]], <vscale x 2 x double>* [[SAVED_CALL_RVALUE]], align 16, !tbaa !7
+// CHECK-NEXT:    store <vscale x 2 x double> [[TMP4]], <vscale x 2 x double>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA7]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 2 x double>* [[SAVED_CALL_RVALUE]] to <8 x double>*
-// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 2 x double>* [[RETVAL_COERCE]] to <8 x double>*
 // CHECK-NEXT:    store <8 x double> [[TMP5]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP6:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16
@@ -261,15 +261,15 @@ fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>*
 // CHECK-NEXT:    store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[OP1]] to i64*
-// CHECK-NEXT:    [[OP112:%.*]] = load i64, i64* [[TMP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[OP112:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to i64*
-// CHECK-NEXT:    store i64 [[OP112]], i64* [[TMP2]], align 16, !tbaa !2
+// CHECK-NEXT:    store i64 [[OP112]], i64* [[TMP2]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP3]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP3]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP4]], <vscale x 16 x i1> [[OP2:%.*]])
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, !tbaa !9
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]]
 // CHECK-NEXT:    [[TMP6:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to i64*
-// CHECK-NEXT:    [[TMP7:%.*]] = load i64, i64* [[TMP6]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP7:%.*]] = load i64, i64* [[TMP6]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP8:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to i64*
 // CHECK-NEXT:    store i64 [[TMP7]], i64* [[TMP8]], align 16
 // CHECK-NEXT:    [[TMP9:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
@@ -289,9 +289,9 @@ fixed_bool_t call_bool_fs(svbool_t pg, fixed_bool_t op1, svbool_t op2) {
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
 // CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
-// CHECK-NEXT:    store <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]], align 16, !tbaa !5
+// CHECK-NEXT:    store <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA5]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 4 x i32>* [[SAVED_CALL_RVALUE]] to <16 x i32>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
 // CHECK-NEXT:    store <16 x i32> [[TMP2]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
@@ -307,9 +307,9 @@ fixed_int32_t call_int32_ss(svbool_t pg, svint32_t op1, svint32_t op2) {
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x double>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
 // CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]])
-// CHECK-NEXT:    store <vscale x 2 x double> [[TMP1]], <vscale x 2 x double>* [[SAVED_CALL_RVALUE]], align 16, !tbaa !7
+// CHECK-NEXT:    store <vscale x 2 x double> [[TMP1]], <vscale x 2 x double>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA7]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 2 x double>* [[SAVED_CALL_RVALUE]] to <8 x double>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP2:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 2 x double>* [[RETVAL_COERCE]] to <8 x double>*
 // CHECK-NEXT:    store <8 x double> [[TMP2]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16
@@ -324,9 +324,9 @@ fixed_float64_t call_float64_ss(svbool_t pg, svfloat64_t op1, svfloat64_t op2) {
 // CHECK-NEXT:    [[SAVED_CALL_RVALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]])
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, !tbaa !9
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to i64*
-// CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to i64*
 // CHECK-NEXT:    store i64 [[TMP2]], i64* [[TMP3]], align 16
 // CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16

diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
index 13d8f14f991a..18a7e1f1496c 100644
--- a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
+++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
@@ -16,10 +16,10 @@ typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <16 x i32>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i32>* [[TYPE]] to <vscale x 4 x i32>*
 // CHECK-NEXT:    store <vscale x 4 x i32> [[TYPE_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
-// CHECK-NEXT:    [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, !tbaa !2
-// CHECK-NEXT:    store <16 x i32> [[TYPE1]], <16 x i32>* [[TYPE_ADDR]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, [[TBAA2:!tbaa !.*]]
+// CHECK-NEXT:    store <16 x i32> [[TYPE1]], <16 x i32>* [[TYPE_ADDR]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i32>* [[TYPE_ADDR]] to <vscale x 4 x i32>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
 //
 svint32_t to_svint32_t(fixed_int32_t type) {
@@ -30,9 +30,9 @@ svint32_t to_svint32_t(fixed_int32_t type) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
-// CHECK-NEXT:    store <vscale x 4 x i32> [[TYPE:%.*]], <vscale x 4 x i32>* [[TYPE_ADDR]], align 16, !tbaa !5
+// CHECK-NEXT:    store <vscale x 4 x i32> [[TYPE:%.*]], <vscale x 4 x i32>* [[TYPE_ADDR]], align 16, [[TBAA5:!tbaa !.*]]
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32>* [[TYPE_ADDR]] to <16 x i32>*
-// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
 // CHECK-NEXT:    store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
@@ -48,10 +48,10 @@ fixed_int32_t from_svint32_t(svint32_t type) {
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <8 x double>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x double>* [[TYPE]] to <vscale x 2 x double>*
 // CHECK-NEXT:    store <vscale x 2 x double> [[TYPE_COERCE:%.*]], <vscale x 2 x double>* [[TMP0]], align 16
-// CHECK-NEXT:    [[TYPE1:%.*]] = load <8 x double>, <8 x double>* [[TYPE]], align 16, !tbaa !2
-// CHECK-NEXT:    store <8 x double> [[TYPE1]], <8 x double>* [[TYPE_ADDR]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TYPE1:%.*]] = load <8 x double>, <8 x double>* [[TYPE]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <8 x double> [[TYPE1]], <8 x double>* [[TYPE_ADDR]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x double>* [[TYPE_ADDR]] to <vscale x 2 x double>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    ret <vscale x 2 x double> [[TMP2]]
 //
 svfloat64_t to_svfloat64_t(fixed_float64_t type) {
@@ -62,9 +62,9 @@ svfloat64_t to_svfloat64_t(fixed_float64_t type) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <vscale x 2 x double>, align 16
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x double>, align 16
-// CHECK-NEXT:    store <vscale x 2 x double> [[TYPE:%.*]], <vscale x 2 x double>* [[TYPE_ADDR]], align 16, !tbaa !7
+// CHECK-NEXT:    store <vscale x 2 x double> [[TYPE:%.*]], <vscale x 2 x double>* [[TYPE_ADDR]], align 16, [[TBAA7:!tbaa !.*]]
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double>* [[TYPE_ADDR]] to <8 x double>*
-// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[TMP0]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 2 x double>* [[RETVAL_COERCE]] to <8 x double>*
 // CHECK-NEXT:    store <8 x double> [[TMP1]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16
@@ -81,11 +81,11 @@ fixed_float64_t from_svfloat64_t(svfloat64_t type) {
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[TYPE]] to <vscale x 16 x i1>*
 // CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[TYPE]] to i64*
-// CHECK-NEXT:    [[TYPE12:%.*]] = load i64, i64* [[TMP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TYPE12:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to i64*
-// CHECK-NEXT:    store i64 [[TYPE12]], i64* [[TMP2]], align 16, !tbaa !2
+// CHECK-NEXT:    store i64 [[TYPE12]], i64* [[TMP2]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP3]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP3]], align 16, [[TBAA2]]
 // CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP4]]
 //
 svbool_t to_svbool_t(fixed_bool_t type) {
@@ -96,9 +96,9 @@ svbool_t to_svbool_t(fixed_bool_t type) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 16
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE:%.*]], <vscale x 16 x i1>* [[TYPE_ADDR]], align 16, !tbaa !9
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE:%.*]], <vscale x 16 x i1>* [[TYPE_ADDR]], align 16, [[TBAA9:!tbaa !.*]]
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i1>* [[TYPE_ADDR]] to i64*
-// CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to i64*
 // CHECK-NEXT:    store i64 [[TMP1]], i64* [[TMP2]], align 16
 // CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16

diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c
index d567c718000c..28464ed4af2b 100644
--- a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c
+++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c
@@ -22,19 +22,19 @@ fixed_bool_t global_bool;
 // CHECK-128-LABEL: @write_global_i64(
 // CHECK-128-NEXT:  entry:
 // CHECK-128-NEXT:    [[V_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
-// CHECK-128-NEXT:    store <vscale x 2 x i64> [[V:%.*]], <vscale x 2 x i64>* [[V_ADDR]], align 16, !tbaa !2
+// CHECK-128-NEXT:    store <vscale x 2 x i64> [[V:%.*]], <vscale x 2 x i64>* [[V_ADDR]], align 16, [[TBAA2:!tbaa !.*]]
 // CHECK-128-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64>* [[V_ADDR]] to <2 x i64>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 16, !tbaa !6
-// CHECK-128-NEXT:    store <2 x i64> [[TMP1]], <2 x i64>* @global_i64, align 16, !tbaa !6
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 16, [[TBAA6:!tbaa !.*]]
+// CHECK-128-NEXT:    store <2 x i64> [[TMP1]], <2 x i64>* @global_i64, align 16, [[TBAA6]]
 // CHECK-128-NEXT:    ret void
 //
 // CHECK-512-LABEL: @write_global_i64(
 // CHECK-512-NEXT:  entry:
 // CHECK-512-NEXT:    [[V_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
-// CHECK-512-NEXT:    store <vscale x 2 x i64> [[V:%.*]], <vscale x 2 x i64>* [[V_ADDR]], align 16, !tbaa !2
+// CHECK-512-NEXT:    store <vscale x 2 x i64> [[V:%.*]], <vscale x 2 x i64>* [[V_ADDR]], align 16, [[TBAA2:!tbaa !.*]]
 // CHECK-512-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64>* [[V_ADDR]] to <8 x i64>*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[TMP0]], align 16, !tbaa !6
-// CHECK-512-NEXT:    store <8 x i64> [[TMP1]], <8 x i64>* @global_i64, align 16, !tbaa !6
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[TMP0]], align 16, [[TBAA6:!tbaa !.*]]
+// CHECK-512-NEXT:    store <8 x i64> [[TMP1]], <8 x i64>* @global_i64, align 16, [[TBAA6]]
 // CHECK-512-NEXT:    ret void
 //
 void write_global_i64(svint64_t v) { global_i64 = v; }
@@ -42,19 +42,19 @@ void write_global_i64(svint64_t v) { global_i64 = v; }
 // CHECK-128-LABEL: @write_global_bf16(
 // CHECK-128-NEXT:  entry:
 // CHECK-128-NEXT:    [[V_ADDR:%.*]] = alloca <vscale x 8 x bfloat>, align 16
-// CHECK-128-NEXT:    store <vscale x 8 x bfloat> [[V:%.*]], <vscale x 8 x bfloat>* [[V_ADDR]], align 16, !tbaa !7
+// CHECK-128-NEXT:    store <vscale x 8 x bfloat> [[V:%.*]], <vscale x 8 x bfloat>* [[V_ADDR]], align 16, [[TBAA7:!tbaa !.*]]
 // CHECK-128-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x bfloat>* [[V_ADDR]] to <8 x bfloat>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[TMP0]], align 16, !tbaa !6
-// CHECK-128-NEXT:    store <8 x bfloat> [[TMP1]], <8 x bfloat>* @global_bf16, align 16, !tbaa !6
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[TMP0]], align 16, [[TBAA6]]
+// CHECK-128-NEXT:    store <8 x bfloat> [[TMP1]], <8 x bfloat>* @global_bf16, align 16, [[TBAA6]]
 // CHECK-128-NEXT:    ret void
 //
 // CHECK-512-LABEL: @write_global_bf16(
 // CHECK-512-NEXT:  entry:
 // CHECK-512-NEXT:    [[V_ADDR:%.*]] = alloca <vscale x 8 x bfloat>, align 16
-// CHECK-512-NEXT:    store <vscale x 8 x bfloat> [[V:%.*]], <vscale x 8 x bfloat>* [[V_ADDR]], align 16, !tbaa !7
+// CHECK-512-NEXT:    store <vscale x 8 x bfloat> [[V:%.*]], <vscale x 8 x bfloat>* [[V_ADDR]], align 16, [[TBAA7:!tbaa !.*]]
 // CHECK-512-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x bfloat>* [[V_ADDR]] to <32 x bfloat>*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[TMP0]], align 16, !tbaa !6
-// CHECK-512-NEXT:    store <32 x bfloat> [[TMP1]], <32 x bfloat>* @global_bf16, align 16, !tbaa !6
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[TMP0]], align 16, [[TBAA6]]
+// CHECK-512-NEXT:    store <32 x bfloat> [[TMP1]], <32 x bfloat>* @global_bf16, align 16, [[TBAA6]]
 // CHECK-512-NEXT:    ret void
 //
 void write_global_bf16(svbfloat16_t v) { global_bf16 = v; }
@@ -62,19 +62,19 @@ void write_global_bf16(svbfloat16_t v) { global_bf16 = v; }
 // CHECK-128-LABEL: @write_global_bool(
 // CHECK-128-NEXT:  entry:
 // CHECK-128-NEXT:    [[V_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-128-NEXT:    store <vscale x 16 x i1> [[V:%.*]], <vscale x 16 x i1>* [[V_ADDR]], align 16, !tbaa !9
+// CHECK-128-NEXT:    store <vscale x 16 x i1> [[V:%.*]], <vscale x 16 x i1>* [[V_ADDR]], align 16, [[TBAA9:!tbaa !.*]]
 // CHECK-128-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i1>* [[V_ADDR]] to <2 x i8>*
-// CHECK-128-NEXT:    [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 16, !tbaa !6
-// CHECK-128-NEXT:    store <2 x i8> [[TMP1]], <2 x i8>* @global_bool, align 2, !tbaa !6
+// CHECK-128-NEXT:    [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 16, [[TBAA6]]
+// CHECK-128-NEXT:    store <2 x i8> [[TMP1]], <2 x i8>* @global_bool, align 2, [[TBAA6]]
 // CHECK-128-NEXT:    ret void
 //
 // CHECK-512-LABEL: @write_global_bool(
 // CHECK-512-NEXT:  entry:
 // CHECK-512-NEXT:    [[V_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-512-NEXT:    store <vscale x 16 x i1> [[V:%.*]], <vscale x 16 x i1>* [[V_ADDR]], align 16, !tbaa !9
+// CHECK-512-NEXT:    store <vscale x 16 x i1> [[V:%.*]], <vscale x 16 x i1>* [[V_ADDR]], align 16, [[TBAA9:!tbaa !.*]]
 // CHECK-512-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i1>* [[V_ADDR]] to i64*
-// CHECK-512-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, !tbaa !6
-// CHECK-512-NEXT:    store i64 [[TMP1]], i64* bitcast (<8 x i8>* @global_bool to i64*), align 2, !tbaa !6
+// CHECK-512-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, [[TBAA6]]
+// CHECK-512-NEXT:    store i64 [[TMP1]], i64* bitcast (<8 x i8>* @global_bool to i64*), align 2, [[TBAA6]]
 // CHECK-512-NEXT:    ret void
 //
 void write_global_bool(svbool_t v) { global_bool = v; }
@@ -85,36 +85,36 @@ void write_global_bool(svbool_t v) { global_bool = v; }
 
 // CHECK-128-LABEL: @read_global_i64(
 // CHECK-128-NEXT:  entry:
-// CHECK-128-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* bitcast (<2 x i64>* @global_i64 to <vscale x 2 x i64>*), align 16, !tbaa !6
+// CHECK-128-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* bitcast (<2 x i64>* @global_i64 to <vscale x 2 x i64>*), align 16, [[TBAA6]]
 // CHECK-128-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-512-LABEL: @read_global_i64(
 // CHECK-512-NEXT:  entry:
-// CHECK-512-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* bitcast (<8 x i64>* @global_i64 to <vscale x 2 x i64>*), align 16, !tbaa !6
+// CHECK-512-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* bitcast (<8 x i64>* @global_i64 to <vscale x 2 x i64>*), align 16, [[TBAA6]]
 // CHECK-512-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 svint64_t read_global_i64() { return global_i64; }
 
 // CHECK-128-LABEL: @read_global_bf16(
 // CHECK-128-NEXT:  entry:
-// CHECK-128-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* bitcast (<8 x bfloat>* @global_bf16 to <vscale x 8 x bfloat>*), align 16, !tbaa !6
+// CHECK-128-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* bitcast (<8 x bfloat>* @global_bf16 to <vscale x 8 x bfloat>*), align 16, [[TBAA6]]
 // CHECK-128-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
 //
 // CHECK-512-LABEL: @read_global_bf16(
 // CHECK-512-NEXT:  entry:
-// CHECK-512-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* bitcast (<32 x bfloat>* @global_bf16 to <vscale x 8 x bfloat>*), align 16, !tbaa !6
+// CHECK-512-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* bitcast (<32 x bfloat>* @global_bf16 to <vscale x 8 x bfloat>*), align 16, [[TBAA6]]
 // CHECK-512-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
 //
 svbfloat16_t read_global_bf16() { return global_bf16; }
 
 // CHECK-128-LABEL: @read_global_bool(
 // CHECK-128-NEXT:  entry:
-// CHECK-128-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* bitcast (<2 x i8>* @global_bool to <vscale x 16 x i1>*), align 2, !tbaa !6
+// CHECK-128-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* bitcast (<2 x i8>* @global_bool to <vscale x 16 x i1>*), align 2, [[TBAA6]]
 // CHECK-128-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
 //
 // CHECK-512-LABEL: @read_global_bool(
 // CHECK-512-NEXT:  entry:
-// CHECK-512-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* bitcast (<8 x i8>* @global_bool to <vscale x 16 x i1>*), align 2, !tbaa !6
+// CHECK-512-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* bitcast (<8 x i8>* @global_bool to <vscale x 16 x i1>*), align 2, [[TBAA6]]
 // CHECK-512-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
 //
 svbool_t read_global_bool() { return global_bool; }


        


More information about the cfe-commits mailing list