[llvm] 2a9b86c - [SROA] Extend !tbaa.struct test coverage with multiple missing cases.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 15 13:21:50 PST 2024
Author: Florian Hahn
Date: 2024-02-15T21:21:18Z
New Revision: 2a9b86cc10c3883cca51a5166aad6e2b755fa958
URL: https://github.com/llvm/llvm-project/commit/2a9b86cc10c3883cca51a5166aad6e2b755fa958
DIFF: https://github.com/llvm/llvm-project/commit/2a9b86cc10c3883cca51a5166aad6e2b755fa958.diff
LOG: [SROA] Extend !tbaa.struct test coverage with multiple missing cases.
Add tests to cover missing cases for
https://github.com/llvm/llvm-project/pull/81289 and
https://github.com/llvm/llvm-project/pull/81313.
Added:
Modified:
llvm/test/Transforms/SROA/tbaa-struct3.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/SROA/tbaa-struct3.ll b/llvm/test/Transforms/SROA/tbaa-struct3.ll
index 4910e0e07ae380..71a4a126710aa2 100644
--- a/llvm/test/Transforms/SROA/tbaa-struct3.ll
+++ b/llvm/test/Transforms/SROA/tbaa-struct3.ll
@@ -1,6 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -p sroa -S %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
+
define void @load_store_transfer_split_struct_tbaa_2_float(ptr dereferenceable(24) %res, float %a, float %b) {
; CHECK-LABEL: define void @load_store_transfer_split_struct_tbaa_2_float(
; CHECK-SAME: ptr dereferenceable(24) [[RES:%.*]], float [[A:%.*]], float [[B:%.*]]) {
@@ -89,6 +92,441 @@ entry:
}
+declare <2 x float> @foo(ptr)
+
+define void @store_vector_part_first(ptr %y2, float %f) {
+; CHECK-LABEL: define void @store_vector_part_first(
+; CHECK-SAME: ptr [[Y2:%.*]], float [[F:%.*]]) {
+; CHECK-NEXT: [[V_1:%.*]] = call <2 x float> @foo(ptr [[Y2]])
+; CHECK-NEXT: store <2 x float> [[V_1]], ptr [[Y2]], align 8, !tbaa.struct [[TBAA_STRUCT6:![0-9]+]]
+; CHECK-NEXT: [[X7_SROA_2_0_Y2_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[Y2]], i64 8
+; CHECK-NEXT: store float [[F]], ptr [[X7_SROA_2_0_Y2_SROA_IDX]], align 8, !tbaa.struct [[TBAA_STRUCT5]]
+; CHECK-NEXT: ret void
+;
+ %x7 = alloca { float, float, float, float }
+ %v.1 = call <2 x float> @foo(ptr %y2)
+ store <2 x float> %v.1, ptr %x7
+ %gep = getelementptr i8, ptr %x7, i64 8
+ store float %f, ptr %gep
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 %y2, ptr align 8 %x7, i64 12, i1 false), !tbaa.struct !7
+ ret void
+}
+
+define void @store_vector_part_second(ptr %y2, float %f) {
+; CHECK-LABEL: define void @store_vector_part_second(
+; CHECK-SAME: ptr [[Y2:%.*]], float [[F:%.*]]) {
+; CHECK-NEXT: [[V_1:%.*]] = call <2 x float> @foo(ptr [[Y2]])
+; CHECK-NEXT: store float [[F]], ptr [[Y2]], align 8, !tbaa.struct [[TBAA_STRUCT9:![0-9]+]]
+; CHECK-NEXT: [[X7_SROA_2_0_Y2_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[Y2]], i64 4
+; CHECK-NEXT: store <2 x float> [[V_1]], ptr [[X7_SROA_2_0_Y2_SROA_IDX]], align 4, !tbaa.struct [[TBAA_STRUCT10:![0-9]+]]
+; CHECK-NEXT: ret void
+;
+ %x7 = alloca { float, float, float, float }
+ %v.1 = call <2 x float> @foo(ptr %y2)
+ store float %f, ptr %x7
+ %gep = getelementptr i8, ptr %x7, i64 4
+ store <2 x float> %v.1, ptr %gep
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 %y2, ptr align 8 %x7, i64 12, i1 false), !tbaa.struct !8
+ ret void
+}
+
+define void @store_vector_single(ptr %y2, float %f) {
+; CHECK-LABEL: define void @store_vector_single(
+; CHECK-SAME: ptr [[Y2:%.*]], float [[F:%.*]]) {
+; CHECK-NEXT: [[V_1:%.*]] = call <2 x float> @foo(ptr [[Y2]])
+; CHECK-NEXT: store <2 x float> [[V_1]], ptr [[Y2]], align 4, !tbaa.struct [[TBAA_STRUCT11:![0-9]+]]
+; CHECK-NEXT: ret void
+;
+ %x7 = alloca { float, float }
+ %v.1 = call <2 x float> @foo(ptr %y2)
+ store <2 x float> %v.1, ptr %x7
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %y2, ptr align 4 %x7, i64 8, i1 false), !tbaa.struct !9
+ ret void
+}
+
+declare void @llvm.memset.p0.i8(ptr nocapture, i8, i32, i1) nounwind
+
+define void @memset(ptr %dst, ptr align 8 %src) {
+; CHECK-LABEL: define void @memset(
+; CHECK-SAME: ptr [[DST:%.*]], ptr align 8 [[SRC:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [7 x i8], align 1
+; CHECK-NEXT: [[A_SROA_3:%.*]] = alloca i16, align 2
+; CHECK-NEXT: [[A_SROA_4:%.*]] = alloca [10 x i8], align 1
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_0]], ptr align 8 [[SRC]], i32 7, i1 false)
+; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 7
+; CHECK-NEXT: [[A_SROA_3_0_COPYLOAD:%.*]] = load i16, ptr [[A_SROA_3_0_SRC_SROA_IDX]], align 1
+; CHECK-NEXT: store i16 [[A_SROA_3_0_COPYLOAD]], ptr [[A_SROA_3]], align 2
+; CHECK-NEXT: [[A_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 9
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4]], ptr align 1 [[A_SROA_4_0_SRC_SROA_IDX]], i32 10, i1 false)
+; CHECK-NEXT: store i16 1, ptr [[A_SROA_3]], align 2
+; CHECK-NEXT: [[A_SROA_0_1_A_1_SROA_IDX2:%.*]] = getelementptr inbounds i8, ptr [[A_SROA_0]], i64 1
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 1 [[A_SROA_0_1_A_1_SROA_IDX2]], i8 42, i32 6, i1 false), !tbaa.struct [[TBAA_STRUCT12:![0-9]+]]
+; CHECK-NEXT: store i16 10794, ptr [[A_SROA_3]], align 2, !tbaa.struct [[TBAA_STRUCT13:![0-9]+]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 1 [[A_SROA_0]], i32 7, i1 true)
+; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 7
+; CHECK-NEXT: [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1:%.*]] = load volatile i16, ptr [[A_SROA_3]], align 2
+; CHECK-NEXT: store volatile i16 [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1]], ptr [[A_SROA_3_0_DST_SROA_IDX]], align 1
+; CHECK-NEXT: [[A_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 9
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4_0_DST_SROA_IDX]], ptr align 1 [[A_SROA_4]], i32 10, i1 true)
+; CHECK-NEXT: ret void
+;
+entry:
+ %a = alloca [19 x i8]
+ call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr align 8 %src, i32 19, i1 false)
+
+ %a.1 = getelementptr i8, ptr %a, i64 1
+ %a.7 = getelementptr i8, ptr %a, i64 7
+ store i16 1, ptr %a.7
+ call void @llvm.memset.p0.i32(ptr %a.1, i8 42, i32 8, i1 false), !tbaa.struct !12
+
+ call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %a, i32 19, i1 true)
+ ret void
+}
+
+define void @memset2(ptr %dst, ptr align 8 %src) {
+; CHECK-LABEL: define void @memset2(
+; CHECK-SAME: ptr [[DST:%.*]], ptr align 8 [[SRC:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [209 x i8], align 1
+; CHECK-NEXT: [[A_SROA_3:%.*]] = alloca i8, align 1
+; CHECK-NEXT: [[A_SROA_4:%.*]] = alloca [90 x i8], align 1
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_0]], ptr align 8 [[SRC]], i32 209, i1 false)
+; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 209
+; CHECK-NEXT: [[A_SROA_3_0_COPYLOAD:%.*]] = load i8, ptr [[A_SROA_3_0_SRC_SROA_IDX]], align 1
+; CHECK-NEXT: store i8 [[A_SROA_3_0_COPYLOAD]], ptr [[A_SROA_3]], align 1
+; CHECK-NEXT: [[A_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 210
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4]], ptr align 2 [[A_SROA_4_0_SRC_SROA_IDX]], i32 90, i1 false)
+; CHECK-NEXT: store i8 1, ptr [[A_SROA_3]], align 1
+; CHECK-NEXT: [[A_SROA_0_202_A_202_SROA_IDX2:%.*]] = getelementptr inbounds i8, ptr [[A_SROA_0]], i64 202
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 1 [[A_SROA_0_202_A_202_SROA_IDX2]], i8 42, i32 7, i1 false), !tbaa.struct [[TBAA_STRUCT14:![0-9]+]]
+; CHECK-NEXT: store i8 42, ptr [[A_SROA_3]], align 1, !tbaa.struct [[TBAA_STRUCT15:![0-9]+]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 1 [[A_SROA_0]], i32 209, i1 true)
+; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 209
+; CHECK-NEXT: [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1:%.*]] = load volatile i8, ptr [[A_SROA_3]], align 1
+; CHECK-NEXT: store volatile i8 [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1]], ptr [[A_SROA_3_0_DST_SROA_IDX]], align 1
+; CHECK-NEXT: [[A_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 210
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4_0_DST_SROA_IDX]], ptr align 1 [[A_SROA_4]], i32 90, i1 true)
+; CHECK-NEXT: ret void
+;
+entry:
+ %a = alloca [300 x i8]
+
+ call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr align 8 %src, i32 300, i1 false)
+
+ %a.202 = getelementptr [300 x i8], ptr %a, i64 0, i64 202
+
+ %a.209 = getelementptr [300 x i8], ptr %a, i64 0, i64 209
+
+
+ store i8 1, ptr %a.209
+
+ call void @llvm.memset.p0.i32(ptr %a.202, i8 42, i32 8, i1 false), !tbaa.struct !15
+
+ call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %a, i32 300, i1 true)
+
+ ret void
+}
+
+
+
+define void @slice_store_v2i8_1(ptr %dst, ptr %dst.2, ptr %src) {
+; CHECK-LABEL: define void @slice_store_v2i8_1(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [6 x i8], align 1
+; CHECK-NEXT: [[A_SROA_2_SROA_0:%.*]] = alloca <2 x i8>, align 4
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_0]], ptr align 8 [[SRC]], i32 6, i1 false)
+; CHECK-NEXT: [[A_SROA_2_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 6
+; CHECK-NEXT: [[A_SROA_2_SROA_0_0_COPYLOAD:%.*]] = load <2 x i8>, ptr [[A_SROA_2_0_SRC_SROA_IDX]], align 2
+; CHECK-NEXT: store <2 x i8> [[A_SROA_2_SROA_0_0_COPYLOAD]], ptr [[A_SROA_2_SROA_0]], align 4
+; CHECK-NEXT: store <2 x i8> bitcast (<1 x i16> <i16 123> to <2 x i8>), ptr [[A_SROA_2_SROA_0]], align 4, !tbaa.struct [[TBAA_STRUCT16:![0-9]+]]
+; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_A_SROA_2_6_V_4:%.*]] = load <2 x i8>, ptr [[A_SROA_2_SROA_0]], align 4
+; CHECK-NEXT: store <2 x i8> [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_A_SROA_2_6_V_4]], ptr [[DST_2]], align 2
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 1 [[A_SROA_0]], i32 6, i1 true)
+; CHECK-NEXT: [[A_SROA_2_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 6
+; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_COPYLOAD1:%.*]] = load volatile <2 x i8>, ptr [[A_SROA_2_SROA_0]], align 4
+; CHECK-NEXT: store volatile <2 x i8> [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_COPYLOAD1]], ptr [[A_SROA_2_0_DST_SROA_IDX]], align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %a = alloca [20 x i8]
+
+ call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr align 8 %src, i32 8, i1 false)
+ %a.6 = getelementptr inbounds i8, ptr %a, i64 6
+
+ store i32 123, ptr %a.6, !tbaa.struct !10
+
+ %v.4 = load <2 x i8>, ptr %a.6
+
+ store <2 x i8> %v.4, ptr %dst.2
+
+ call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr align 8 %a, i32 8, i1 true)
+ ret void
+}
+
+define void @slice_store_v2i8_2(ptr %dst, ptr %dst.2, ptr %src) {
+; CHECK-LABEL: define void @slice_store_v2i8_2(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A_SROA_0_SROA_1:%.*]] = alloca <2 x i8>, align 2
+; CHECK-NEXT: [[A_SROA_0_SROA_4:%.*]] = alloca i8, align 1
+; CHECK-NEXT: [[A_SROA_4:%.*]] = alloca [5 x i8], align 1
+; CHECK-NEXT: [[A_SROA_0_SROA_1_1_COPYLOAD:%.*]] = load <2 x i8>, ptr [[SRC]], align 8
+; CHECK-NEXT: store <2 x i8> [[A_SROA_0_SROA_1_1_COPYLOAD]], ptr [[A_SROA_0_SROA_1]], align 2
+; CHECK-NEXT: [[A_SROA_0_SROA_4_1_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
+; CHECK-NEXT: [[A_SROA_0_SROA_4_1_COPYLOAD:%.*]] = load i8, ptr [[A_SROA_0_SROA_4_1_SRC_SROA_IDX]], align 2
+; CHECK-NEXT: store i8 [[A_SROA_0_SROA_4_1_COPYLOAD]], ptr [[A_SROA_0_SROA_4]], align 1
+; CHECK-NEXT: [[A_SROA_4_1_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 3
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4]], ptr align 1 [[A_SROA_4_1_SRC_SROA_IDX]], i32 5, i1 false)
+; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[A_SROA_0_SROA_1]], align 2, !tbaa.struct [[TBAA_STRUCT17:![0-9]+]]
+; CHECK-NEXT: store i8 0, ptr [[A_SROA_0_SROA_4]], align 1, !tbaa.struct [[TBAA_STRUCT18:![0-9]+]]
+; CHECK-NEXT: [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_A_SROA_0_1_V_4:%.*]] = load <2 x i8>, ptr [[A_SROA_0_SROA_1]], align 2
+; CHECK-NEXT: store <2 x i8> [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_A_SROA_0_1_V_4]], ptr [[DST_2]], align 2
+; CHECK-NEXT: [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_COPYLOAD3:%.*]] = load volatile <2 x i8>, ptr [[A_SROA_0_SROA_1]], align 2
+; CHECK-NEXT: store volatile <2 x i8> [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_COPYLOAD3]], ptr [[DST]], align 1
+; CHECK-NEXT: [[A_SROA_0_SROA_4_1_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
+; CHECK-NEXT: [[A_SROA_0_SROA_4_0_A_SROA_0_SROA_4_1_COPYLOAD4:%.*]] = load volatile i8, ptr [[A_SROA_0_SROA_4]], align 1
+; CHECK-NEXT: store volatile i8 [[A_SROA_0_SROA_4_0_A_SROA_0_SROA_4_1_COPYLOAD4]], ptr [[A_SROA_0_SROA_4_1_DST_SROA_IDX]], align 1
+; CHECK-NEXT: [[A_SROA_4_1_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 3
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4_1_DST_SROA_IDX]], ptr align 1 [[A_SROA_4]], i32 5, i1 true)
+; CHECK-NEXT: ret void
+;
+entry:
+ %a = alloca [20 x i8]
+
+ %a.1 = getelementptr inbounds i8, ptr %a, i64 1
+ call void @llvm.memcpy.p0.p0.i32(ptr %a.1, ptr align 8 %src, i32 8, i1 false)
+
+ store i32 123, ptr %a, !tbaa.struct !11
+
+ %v.4 = load <2 x i8>, ptr %a.1
+ store <2 x i8> %v.4, ptr %dst.2
+
+ call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr align 8 %a.1, i32 8, i1 true)
+ ret void
+}
+
+define double @tbaa_struct_load(ptr %src, ptr %dst) {
+; CHECK-LABEL: define double @tbaa_struct_load(
+; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DST:%.*]]) {
+; CHECK-NEXT: [[TMP_SROA_0:%.*]] = alloca double, align 8
+; CHECK-NEXT: [[TMP_SROA_3:%.*]] = alloca i64, align 8
+; CHECK-NEXT: [[TMP_SROA_0_0_COPYLOAD:%.*]] = load double, ptr [[SRC]], align 8
+; CHECK-NEXT: store double [[TMP_SROA_0_0_COPYLOAD]], ptr [[TMP_SROA_0]], align 8
+; CHECK-NEXT: [[TMP_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 8
+; CHECK-NEXT: [[TMP_SROA_3_0_COPYLOAD:%.*]] = load i64, ptr [[TMP_SROA_3_0_SRC_SROA_IDX]], align 8
+; CHECK-NEXT: store i64 [[TMP_SROA_3_0_COPYLOAD]], ptr [[TMP_SROA_3]], align 8
+; CHECK-NEXT: [[TMP_SROA_0_0_TMP_SROA_0_0_LG:%.*]] = load double, ptr [[TMP_SROA_0]], align 8, !tbaa.struct [[TBAA_STRUCT10]]
+; CHECK-NEXT: [[TMP_SROA_0_0_TMP_SROA_0_0_COPYLOAD1:%.*]] = load volatile double, ptr [[TMP_SROA_0]], align 8
+; CHECK-NEXT: store volatile double [[TMP_SROA_0_0_TMP_SROA_0_0_COPYLOAD1]], ptr [[DST]], align 8
+; CHECK-NEXT: [[TMP_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 8
+; CHECK-NEXT: [[TMP_SROA_3_0_TMP_SROA_3_0_COPYLOAD2:%.*]] = load volatile i64, ptr [[TMP_SROA_3]], align 8
+; CHECK-NEXT: store volatile i64 [[TMP_SROA_3_0_TMP_SROA_3_0_COPYLOAD2]], ptr [[TMP_SROA_3_0_DST_SROA_IDX]], align 8
+; CHECK-NEXT: ret double [[TMP_SROA_0_0_TMP_SROA_0_0_LG]]
+;
+ %tmp = alloca [16 x i8], align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 16, i1 false)
+ %lg = load double, ptr %tmp, align 8, !tbaa.struct !13
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 16, i1 true)
+ ret double %lg
+}
+
+define i32 @shorten_integer_store_single_field(ptr %dst, ptr %dst.2, ptr %src) {
+; CHECK-LABEL: define i32 @shorten_integer_store_single_field(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i32, align 4
+; CHECK-NEXT: store i32 123, ptr [[A_SROA_0]], align 4, !tbaa.struct [[TBAA_STRUCT0]]
+; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load i32, ptr [[A_SROA_0]], align 4
+; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_COPYLOAD:%.*]] = load volatile i32, ptr [[A_SROA_0]], align 4
+; CHECK-NEXT: store volatile i32 [[A_SROA_0_0_A_SROA_0_0_COPYLOAD]], ptr [[DST]], align 1
+; CHECK-NEXT: ret i32 [[A_SROA_0_0_A_SROA_0_0_L]]
+;
+entry:
+ %a = alloca [8 x i8], align 2
+ store i64 123, ptr %a, align 2, !tbaa.struct !0
+ %l = load i32, ptr %a
+ call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
+ ret i32 %l
+}
+
+define i32 @shorten_integer_store_multiple_fields(ptr %dst, ptr %dst.2, ptr %src) {
+; CHECK-LABEL: define i32 @shorten_integer_store_multiple_fields(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i32, align 4
+; CHECK-NEXT: store i32 123, ptr [[A_SROA_0]], align 4, !tbaa.struct [[TBAA_STRUCT19:![0-9]+]]
+; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load i32, ptr [[A_SROA_0]], align 4
+; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_COPYLOAD:%.*]] = load volatile i32, ptr [[A_SROA_0]], align 4
+; CHECK-NEXT: store volatile i32 [[A_SROA_0_0_A_SROA_0_0_COPYLOAD]], ptr [[DST]], align 1
+; CHECK-NEXT: ret i32 [[A_SROA_0_0_A_SROA_0_0_L]]
+;
+entry:
+ %a = alloca [8 x i8], align 2
+ store i64 123, ptr %a, align 2, !tbaa.struct !14
+ %l = load i32, ptr %a
+ call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
+ ret i32 %l
+}
+
+define <2 x i16> @shorten_vector_store_multiple_fields(ptr %dst, ptr %dst.2, ptr %src) {
+; CHECK-LABEL: define <2 x i16> @shorten_vector_store_multiple_fields(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca <2 x i32>, align 8
+; CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, ptr [[A_SROA_0]], align 8, !tbaa.struct [[TBAA_STRUCT0]]
+; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load <2 x i16>, ptr [[A_SROA_0]], align 8
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 8 [[A_SROA_0]], i32 4, i1 true)
+; CHECK-NEXT: ret <2 x i16> [[A_SROA_0_0_A_SROA_0_0_L]]
+;
+entry:
+ %a = alloca [8 x i8], align 2
+ store <2 x i32> <i32 1, i32 2>, ptr %a, align 2, !tbaa.struct !0
+ %l = load <2 x i16>, ptr %a
+ call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
+ ret <2 x i16> %l
+}
+
+define <2 x i16> @shorten_vector_store_single_fields(ptr %dst, ptr %dst.2, ptr %src) {
+; CHECK-LABEL: define <2 x i16> @shorten_vector_store_single_fields(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca <2 x i32>, align 8
+; CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, ptr [[A_SROA_0]], align 8, !tbaa.struct [[TBAA_STRUCT19]]
+; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load <2 x i16>, ptr [[A_SROA_0]], align 8
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 8 [[A_SROA_0]], i32 4, i1 true)
+; CHECK-NEXT: ret <2 x i16> [[A_SROA_0_0_A_SROA_0_0_L]]
+;
+entry:
+ %a = alloca [8 x i8], align 8
+ store <2 x i32> <i32 1, i32 2>, ptr %a, align 8, !tbaa.struct !14
+ %l = load <2 x i16>, ptr %a
+ call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
+ ret <2 x i16> %l
+}
+
+define i32 @split_load_with_tbaa_struct(i32 %x, ptr %src, ptr %dst) {
+; CHECK-LABEL: define i32 @split_load_with_tbaa_struct(
+; CHECK-SAME: i32 [[X:%.*]], ptr [[SRC:%.*]], ptr [[DST:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A3_SROA_0:%.*]] = alloca i16, align 8
+; CHECK-NEXT: [[A3_SROA_3:%.*]] = alloca i16, align 2
+; CHECK-NEXT: [[A3_SROA_33:%.*]] = alloca float, align 4
+; CHECK-NEXT: [[A3_SROA_4:%.*]] = alloca i8, align 8
+; CHECK-NEXT: [[A3_SROA_5:%.*]] = alloca i8, align 1
+; CHECK-NEXT: [[A3_SROA_0_0_COPYLOAD:%.*]] = load i16, ptr [[SRC]], align 1
+; CHECK-NEXT: store i16 [[A3_SROA_0_0_COPYLOAD]], ptr [[A3_SROA_0]], align 8
+; CHECK-NEXT: [[A3_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
+; CHECK-NEXT: [[A3_SROA_3_0_COPYLOAD:%.*]] = load i16, ptr [[A3_SROA_3_0_SRC_SROA_IDX]], align 1
+; CHECK-NEXT: store i16 [[A3_SROA_3_0_COPYLOAD]], ptr [[A3_SROA_3]], align 2
+; CHECK-NEXT: [[A3_SROA_33_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 4
+; CHECK-NEXT: [[A3_SROA_33_0_COPYLOAD:%.*]] = load float, ptr [[A3_SROA_33_0_SRC_SROA_IDX]], align 1
+; CHECK-NEXT: store float [[A3_SROA_33_0_COPYLOAD]], ptr [[A3_SROA_33]], align 4
+; CHECK-NEXT: [[A3_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 8
+; CHECK-NEXT: [[A3_SROA_4_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_4_0_SRC_SROA_IDX]], align 1
+; CHECK-NEXT: store i8 [[A3_SROA_4_0_COPYLOAD]], ptr [[A3_SROA_4]], align 8
+; CHECK-NEXT: [[A3_SROA_5_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 9
+; CHECK-NEXT: [[A3_SROA_5_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_5_0_SRC_SROA_IDX]], align 1
+; CHECK-NEXT: store i8 [[A3_SROA_5_0_COPYLOAD]], ptr [[A3_SROA_5]], align 1
+; CHECK-NEXT: [[A3_SROA_0_0_A3_SROA_0_0_LOAD4_FCA_0_LOAD:%.*]] = load i16, ptr [[A3_SROA_0]], align 8, !tbaa.struct [[TBAA_STRUCT20:![0-9]+]]
+; CHECK-NEXT: [[LOAD4_FCA_0_INSERT:%.*]] = insertvalue { i16, float, i8 } poison, i16 [[A3_SROA_0_0_A3_SROA_0_0_LOAD4_FCA_0_LOAD]], 0
+; CHECK-NEXT: [[A3_SROA_33_0_A3_SROA_33_4_LOAD4_FCA_1_LOAD:%.*]] = load float, ptr [[A3_SROA_33]], align 4, !tbaa.struct [[TBAA_STRUCT21:![0-9]+]]
+; CHECK-NEXT: [[LOAD4_FCA_1_INSERT:%.*]] = insertvalue { i16, float, i8 } [[LOAD4_FCA_0_INSERT]], float [[A3_SROA_33_0_A3_SROA_33_4_LOAD4_FCA_1_LOAD]], 1
+; CHECK-NEXT: [[A3_SROA_4_0_A3_SROA_4_8_LOAD4_FCA_2_LOAD:%.*]] = load i8, ptr [[A3_SROA_4]], align 8, !tbaa.struct [[TBAA_STRUCT15]]
+; CHECK-NEXT: [[LOAD4_FCA_2_INSERT:%.*]] = insertvalue { i16, float, i8 } [[LOAD4_FCA_1_INSERT]], i8 [[A3_SROA_4_0_A3_SROA_4_8_LOAD4_FCA_2_LOAD]], 2
+; CHECK-NEXT: [[UNWRAP2:%.*]] = extractvalue { i16, float, i8 } [[LOAD4_FCA_2_INSERT]], 1
+; CHECK-NEXT: [[VALCAST2:%.*]] = bitcast float [[UNWRAP2]] to i32
+; CHECK-NEXT: [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1:%.*]] = load volatile i16, ptr [[A3_SROA_0]], align 8
+; CHECK-NEXT: store volatile i16 [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1]], ptr [[DST]], align 1
+; CHECK-NEXT: [[A3_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
+; CHECK-NEXT: [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2:%.*]] = load volatile i16, ptr [[A3_SROA_3]], align 2
+; CHECK-NEXT: store volatile i16 [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2]], ptr [[A3_SROA_3_0_DST_SROA_IDX]], align 1
+; CHECK-NEXT: [[A3_SROA_33_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 4
+; CHECK-NEXT: [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4:%.*]] = load volatile float, ptr [[A3_SROA_33]], align 4
+; CHECK-NEXT: store volatile float [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4]], ptr [[A3_SROA_33_0_DST_SROA_IDX]], align 1
+; CHECK-NEXT: [[A3_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 8
+; CHECK-NEXT: [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5:%.*]] = load volatile i8, ptr [[A3_SROA_4]], align 8
+; CHECK-NEXT: store volatile i8 [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5]], ptr [[A3_SROA_4_0_DST_SROA_IDX]], align 1
+; CHECK-NEXT: [[A3_SROA_5_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 9
+; CHECK-NEXT: [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6:%.*]] = load volatile i8, ptr [[A3_SROA_5]], align 1
+; CHECK-NEXT: store volatile i8 [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6]], ptr [[A3_SROA_5_0_DST_SROA_IDX]], align 1
+; CHECK-NEXT: ret i32 [[VALCAST2]]
+;
+entry:
+ %a3 = alloca { float, float , float }
+
+ call void @llvm.memcpy.p0.p0.i64(ptr %a3, ptr %src, i64 10, i1 false)
+ %load4 = load { i16, float , i8}, ptr %a3, !tbaa.struct !16
+ %unwrap2 = extractvalue { i16, float, i8 } %load4 , 1
+ %valcast2 = bitcast float %unwrap2 to i32
+ call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %a3, i64 10, i1 true)
+
+ ret i32 %valcast2
+}
+
+define i32 @split_store_with_tbaa_struct(i32 %x, ptr %src, ptr %dst) {
+; CHECK-LABEL: define i32 @split_store_with_tbaa_struct(
+; CHECK-SAME: i32 [[X:%.*]], ptr [[SRC:%.*]], ptr [[DST:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A3_SROA_0:%.*]] = alloca i16, align 8
+; CHECK-NEXT: [[A3_SROA_3:%.*]] = alloca i16, align 2
+; CHECK-NEXT: [[A3_SROA_33:%.*]] = alloca float, align 4
+; CHECK-NEXT: [[A3_SROA_4:%.*]] = alloca i8, align 8
+; CHECK-NEXT: [[A3_SROA_5:%.*]] = alloca i8, align 1
+; CHECK-NEXT: [[A3_SROA_0_0_COPYLOAD:%.*]] = load i16, ptr [[SRC]], align 1
+; CHECK-NEXT: store i16 [[A3_SROA_0_0_COPYLOAD]], ptr [[A3_SROA_0]], align 8
+; CHECK-NEXT: [[A3_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
+; CHECK-NEXT: [[A3_SROA_3_0_COPYLOAD:%.*]] = load i16, ptr [[A3_SROA_3_0_SRC_SROA_IDX]], align 1
+; CHECK-NEXT: store i16 [[A3_SROA_3_0_COPYLOAD]], ptr [[A3_SROA_3]], align 2
+; CHECK-NEXT: [[A3_SROA_33_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 4
+; CHECK-NEXT: [[A3_SROA_33_0_COPYLOAD:%.*]] = load float, ptr [[A3_SROA_33_0_SRC_SROA_IDX]], align 1
+; CHECK-NEXT: store float [[A3_SROA_33_0_COPYLOAD]], ptr [[A3_SROA_33]], align 4
+; CHECK-NEXT: [[A3_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 8
+; CHECK-NEXT: [[A3_SROA_4_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_4_0_SRC_SROA_IDX]], align 1
+; CHECK-NEXT: store i8 [[A3_SROA_4_0_COPYLOAD]], ptr [[A3_SROA_4]], align 8
+; CHECK-NEXT: [[A3_SROA_5_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 9
+; CHECK-NEXT: [[A3_SROA_5_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_5_0_SRC_SROA_IDX]], align 1
+; CHECK-NEXT: store i8 [[A3_SROA_5_0_COPYLOAD]], ptr [[A3_SROA_5]], align 1
+; CHECK-NEXT: [[I_1:%.*]] = insertvalue { i16, float, i8 } poison, i16 10, 0
+; CHECK-NEXT: [[I_2:%.*]] = insertvalue { i16, float, i8 } [[I_1]], float 3.000000e+00, 1
+; CHECK-NEXT: [[I_3:%.*]] = insertvalue { i16, float, i8 } [[I_2]], i8 99, 2
+; CHECK-NEXT: [[I_3_FCA_0_EXTRACT:%.*]] = extractvalue { i16, float, i8 } [[I_3]], 0
+; CHECK-NEXT: store i16 [[I_3_FCA_0_EXTRACT]], ptr [[A3_SROA_0]], align 8, !tbaa.struct [[TBAA_STRUCT20]]
+; CHECK-NEXT: [[I_3_FCA_1_EXTRACT:%.*]] = extractvalue { i16, float, i8 } [[I_3]], 1
+; CHECK-NEXT: store float [[I_3_FCA_1_EXTRACT]], ptr [[A3_SROA_33]], align 4, !tbaa.struct [[TBAA_STRUCT21]]
+; CHECK-NEXT: [[I_3_FCA_2_EXTRACT:%.*]] = extractvalue { i16, float, i8 } [[I_3]], 2
+; CHECK-NEXT: store i8 [[I_3_FCA_2_EXTRACT]], ptr [[A3_SROA_4]], align 8, !tbaa.struct [[TBAA_STRUCT15]]
+; CHECK-NEXT: [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1:%.*]] = load volatile i16, ptr [[A3_SROA_0]], align 8
+; CHECK-NEXT: store volatile i16 [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1]], ptr [[DST]], align 1
+; CHECK-NEXT: [[A3_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
+; CHECK-NEXT: [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2:%.*]] = load volatile i16, ptr [[A3_SROA_3]], align 2
+; CHECK-NEXT: store volatile i16 [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2]], ptr [[A3_SROA_3_0_DST_SROA_IDX]], align 1
+; CHECK-NEXT: [[A3_SROA_33_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 4
+; CHECK-NEXT: [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4:%.*]] = load volatile float, ptr [[A3_SROA_33]], align 4
+; CHECK-NEXT: store volatile float [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4]], ptr [[A3_SROA_33_0_DST_SROA_IDX]], align 1
+; CHECK-NEXT: [[A3_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 8
+; CHECK-NEXT: [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5:%.*]] = load volatile i8, ptr [[A3_SROA_4]], align 8
+; CHECK-NEXT: store volatile i8 [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5]], ptr [[A3_SROA_4_0_DST_SROA_IDX]], align 1
+; CHECK-NEXT: [[A3_SROA_5_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 9
+; CHECK-NEXT: [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6:%.*]] = load volatile i8, ptr [[A3_SROA_5]], align 1
+; CHECK-NEXT: store volatile i8 [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6]], ptr [[A3_SROA_5_0_DST_SROA_IDX]], align 1
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %a3 = alloca { float, float , float }
+
+ call void @llvm.memcpy.p0.p0.i64(ptr %a3, ptr %src, i64 10, i1 false)
+ %i.1 = insertvalue { i16, float, i8 } poison, i16 10, 0
+ %i.2 = insertvalue { i16, float, i8 } %i.1, float 3.0, 1
+ %i.3 = insertvalue { i16, float, i8 } %i.2, i8 99, 2
+ store { i16, float , i8} %i.3, ptr %a3, !tbaa.struct !16
+ call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %a3, i64 10, i1 true)
+
+ ret i32 0
+}
+
+
; Function Attrs: mustprogress nocallback nofree nounwind willreturn memory(argmem: readwrite)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2
@@ -97,6 +535,18 @@ declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias
!2 = !{!"float", !3, i64 0}
!3 = !{!"omnipotent char", !4, i64 0}
!4 = !{!"Simple C++ TBAA"}
+!5 = !{!"v2f32", !3, i64 0}
+!6 = !{!5, !5, i64 0}
+!7 = !{i64 0, i64 8, !6, i64 8, i64 4, !1}
+!8 = !{i64 0, i64 4, !1, i64 4, i64 8, !6}
+!9 = !{i64 0, i64 8, !6, i64 4, i64 8, !1}
+!10 = !{i64 0, i64 2, !1, i64 2, i64 2, !1}
+!11 = !{i64 0, i64 1, !1, i64 1, i64 3, !1}
+!12 = !{i64 0, i64 2, !1, i64 2, i64 6, !1}
+!13 = !{i64 0, i64 8, !6}
+!14 = !{i64 0, i64 4, !6}
+!15 = !{i64 0, i64 7, !6, i64 7, i64 1, !6}
+!16 = !{i64 0, i64 2, !6, i64 4, i64 4, !6, i64 8, i64 1, !6}
;.
; CHECK: [[TBAA_STRUCT0]] = !{i64 0, i64 4, [[META1:![0-9]+]], i64 4, i64 4, [[META1]]}
; CHECK: [[META1]] = !{[[META2:![0-9]+]], [[META2]], i64 0}
@@ -104,4 +554,20 @@ declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias
; CHECK: [[META3]] = !{!"omnipotent char", [[META4:![0-9]+]], i64 0}
; CHECK: [[META4]] = !{!"Simple C++ TBAA"}
; CHECK: [[TBAA_STRUCT5]] = !{i64 0, i64 4, [[META1]]}
+; CHECK: [[TBAA_STRUCT6]] = !{i64 0, i64 8, [[META7:![0-9]+]], i64 8, i64 4, [[META1]]}
+; CHECK: [[META7]] = !{[[META8:![0-9]+]], [[META8]], i64 0}
+; CHECK: [[META8]] = !{!"v2f32", [[META3]], i64 0}
+; CHECK: [[TBAA_STRUCT9]] = !{i64 0, i64 4, [[META1]], i64 4, i64 8, [[META7]]}
+; CHECK: [[TBAA_STRUCT10]] = !{i64 0, i64 8, [[META7]]}
+; CHECK: [[TBAA_STRUCT11]] = !{i64 0, i64 8, [[META7]], i64 4, i64 8, [[META1]]}
+; CHECK: [[TBAA_STRUCT12]] = !{i64 0, i64 2, [[META1]], i64 2, i64 6, [[META1]]}
+; CHECK: [[TBAA_STRUCT13]] = !{i64 0, i64 2, [[META1]]}
+; CHECK: [[TBAA_STRUCT14]] = !{i64 0, i64 7, [[META7]], i64 7, i64 1, [[META7]]}
+; CHECK: [[TBAA_STRUCT15]] = !{i64 0, i64 1, [[META7]]}
+; CHECK: [[TBAA_STRUCT16]] = !{i64 0, i64 2, [[META1]], i64 2, i64 2, [[META1]]}
+; CHECK: [[TBAA_STRUCT17]] = !{i64 0, i64 3, [[META1]]}
+; CHECK: [[TBAA_STRUCT18]] = !{i64 0, i64 1, [[META1]]}
+; CHECK: [[TBAA_STRUCT19]] = !{i64 0, i64 4, [[META7]]}
+; CHECK: [[TBAA_STRUCT20]] = !{i64 0, i64 2, [[META7]], i64 4, i64 4, [[META7]], i64 8, i64 1, [[META7]]}
+; CHECK: [[TBAA_STRUCT21]] = !{i64 0, i64 4, [[META7]], i64 4, i64 1, [[META7]]}
;.
More information about the llvm-commits
mailing list