[llvm] 6f1c07b - [SLP][Test] Update pr47269.ll test. NFC
Anton Afanasyev via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 20 07:34:50 PST 2020
Author: Anton Afanasyev
Date: 2020-11-20T18:33:57+03:00
New Revision: 6f1c07b23a1ce012ed614f411776cea019d6e51a
URL: https://github.com/llvm/llvm-project/commit/6f1c07b23a1ce012ed614f411776cea019d6e51a
DIFF: https://github.com/llvm/llvm-project/commit/6f1c07b23a1ce012ed614f411776cea019d6e51a.diff
LOG: [SLP][Test] Update pr47269.ll test. NFC
Expand test for PR47269 to better demonstrate changes introduced by D90445.
Added:
Modified:
llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll
index c33fa099ee28..e0537b64def6 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll
@@ -5,174 +5,315 @@
; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX512
; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux -mattr=+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
-define void @gather_load(i32* %0, i32* readonly %1) {
+define void @gather_load(i32* noalias nocapture %0, i32* noalias nocapture readonly %1) {
; CHECK-LABEL: @gather_load(
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[TMP1:%.*]], i64 1
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4, [[TBAA0:!tbaa !.*]]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 11
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4, [[TBAA0]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 4
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
-; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP3]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4, [[TBAA0]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP3]], align 4, [[TBAA0]]
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> undef, i32 [[TMP4]], i32 0
; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 [[TMP6]], i32 1
; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP8]], i32 2
; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP12]], i32 [[TMP9]], i32 3
; CHECK-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[TMP13]], <i32 1, i32 2, i32 3, i32 4>
; CHECK-NEXT: [[TMP15:%.*]] = bitcast i32* [[TMP0:%.*]] to <4 x i32>*
-; CHECK-NEXT: store <4 x i32> [[TMP14]], <4 x i32>* [[TMP15]], align 4
+; CHECK-NEXT: store <4 x i32> [[TMP14]], <4 x i32>* [[TMP15]], align 4, [[TBAA0]]
; CHECK-NEXT: ret void
;
%3 = getelementptr inbounds i32, i32* %1, i64 1
- %4 = load i32, i32* %1, align 4
+ %4 = load i32, i32* %1, align 4, !tbaa !2
%5 = getelementptr inbounds i32, i32* %0, i64 1
%6 = getelementptr inbounds i32, i32* %1, i64 11
- %7 = load i32, i32* %6, align 4
+ %7 = load i32, i32* %6, align 4, !tbaa !2
%8 = getelementptr inbounds i32, i32* %0, i64 2
%9 = getelementptr inbounds i32, i32* %1, i64 4
- %10 = load i32, i32* %9, align 4
+ %10 = load i32, i32* %9, align 4, !tbaa !2
%11 = getelementptr inbounds i32, i32* %0, i64 3
- %12 = load i32, i32* %3, align 4
+ %12 = load i32, i32* %3, align 4, !tbaa !2
%13 = insertelement <4 x i32> undef, i32 %4, i32 0
%14 = insertelement <4 x i32> %13, i32 %7, i32 1
%15 = insertelement <4 x i32> %14, i32 %10, i32 2
%16 = insertelement <4 x i32> %15, i32 %12, i32 3
%17 = add nsw <4 x i32> %16, <i32 1, i32 2, i32 3, i32 4>
%18 = bitcast i32* %0 to <4 x i32>*
- store <4 x i32> %17, <4 x i32>* %18, align 4
+ store <4 x i32> %17, <4 x i32>* %18, align 4, !tbaa !2
ret void
}
-define void @gather_load_2(i32* %0, i32* readonly %1) {
-; CHECK-LABEL: @gather_load_2(
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[TMP1:%.*]], i64 1
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
-; CHECK-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP4]], 1
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1
-; CHECK-NEXT: store i32 [[TMP5]], i32* [[TMP0]], align 4
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 10
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
-; CHECK-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP8]], 2
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 2
-; CHECK-NEXT: store i32 [[TMP9]], i32* [[TMP6]], align 4
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 3
-; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
-; CHECK-NEXT: [[TMP13:%.*]] = add nsw i32 [[TMP12]], 3
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 3
-; CHECK-NEXT: store i32 [[TMP13]], i32* [[TMP10]], align 4
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 5
-; CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
-; CHECK-NEXT: [[TMP17:%.*]] = add nsw i32 [[TMP16]], 4
-; CHECK-NEXT: store i32 [[TMP17]], i32* [[TMP14]], align 4
-; CHECK-NEXT: ret void
+define void @gather_load_2(i32* noalias nocapture %0, i32* noalias nocapture readonly %1) {
+; SSE-LABEL: @gather_load_2(
+; SSE-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[TMP1:%.*]], i64 1
+; SSE-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4, [[TBAA0:!tbaa !.*]]
+; SSE-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP4]], 1
+; SSE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1
+; SSE-NEXT: store i32 [[TMP5]], i32* [[TMP0]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 10
+; SSE-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP8]], 2
+; SSE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 2
+; SSE-NEXT: store i32 [[TMP9]], i32* [[TMP6]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 3
+; SSE-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP13:%.*]] = add nsw i32 [[TMP12]], 3
+; SSE-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 3
+; SSE-NEXT: store i32 [[TMP13]], i32* [[TMP10]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 5
+; SSE-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP17:%.*]] = add nsw i32 [[TMP16]], 4
+; SSE-NEXT: store i32 [[TMP17]], i32* [[TMP14]], align 4, [[TBAA0]]
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @gather_load_2(
+; AVX-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[TMP1:%.*]], i64 1
+; AVX-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4, [[TBAA0:!tbaa !.*]]
+; AVX-NEXT: [[TMP5:%.*]] = add nsw i32 [[TMP4]], 1
+; AVX-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1
+; AVX-NEXT: store i32 [[TMP5]], i32* [[TMP0]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 10
+; AVX-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP8]], 2
+; AVX-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 2
+; AVX-NEXT: store i32 [[TMP9]], i32* [[TMP6]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 3
+; AVX-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP13:%.*]] = add nsw i32 [[TMP12]], 3
+; AVX-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 3
+; AVX-NEXT: store i32 [[TMP13]], i32* [[TMP10]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 5
+; AVX-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP17:%.*]] = add nsw i32 [[TMP16]], 4
+; AVX-NEXT: store i32 [[TMP17]], i32* [[TMP14]], align 4, [[TBAA0]]
+; AVX-NEXT: ret void
+;
+; AVX2-LABEL: @gather_load_2(
+; AVX2-NEXT: [[TMP3:%.*]] = insertelement <4 x i32*> undef, i32* [[TMP1:%.*]], i32 0
+; AVX2-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32*> [[TMP3]], <4 x i32*> undef, <4 x i32> zeroinitializer
+; AVX2-NEXT: [[TMP5:%.*]] = getelementptr i32, <4 x i32*> [[TMP4]], <4 x i64> <i64 1, i64 10, i64 3, i64 5>
+; AVX2-NEXT: [[TMP6:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP5]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef), [[TBAA0:!tbaa !.*]]
+; AVX2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[TMP6]], <i32 1, i32 2, i32 3, i32 4>
+; AVX2-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP0:%.*]] to <4 x i32>*
+; AVX2-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 4, [[TBAA0]]
+; AVX2-NEXT: ret void
+;
+; AVX512-LABEL: @gather_load_2(
+; AVX512-NEXT: [[TMP3:%.*]] = insertelement <4 x i32*> undef, i32* [[TMP1:%.*]], i32 0
+; AVX512-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32*> [[TMP3]], <4 x i32*> undef, <4 x i32> zeroinitializer
+; AVX512-NEXT: [[TMP5:%.*]] = getelementptr i32, <4 x i32*> [[TMP4]], <4 x i64> <i64 1, i64 10, i64 3, i64 5>
+; AVX512-NEXT: [[TMP6:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP5]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef), [[TBAA0:!tbaa !.*]]
+; AVX512-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[TMP6]], <i32 1, i32 2, i32 3, i32 4>
+; AVX512-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP0:%.*]] to <4 x i32>*
+; AVX512-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 4, [[TBAA0]]
+; AVX512-NEXT: ret void
;
%3 = getelementptr inbounds i32, i32* %1, i64 1
- %4 = load i32, i32* %3, align 4
+ %4 = load i32, i32* %3, align 4, !tbaa !2
%5 = add nsw i32 %4, 1
%6 = getelementptr inbounds i32, i32* %0, i64 1
- store i32 %5, i32* %0, align 4
+ store i32 %5, i32* %0, align 4, !tbaa !2
%7 = getelementptr inbounds i32, i32* %1, i64 10
- %8 = load i32, i32* %7, align 4
+ %8 = load i32, i32* %7, align 4, !tbaa !2
%9 = add nsw i32 %8, 2
%10 = getelementptr inbounds i32, i32* %0, i64 2
- store i32 %9, i32* %6, align 4
+ store i32 %9, i32* %6, align 4, !tbaa !2
%11 = getelementptr inbounds i32, i32* %1, i64 3
- %12 = load i32, i32* %11, align 4
+ %12 = load i32, i32* %11, align 4, !tbaa !2
%13 = add nsw i32 %12, 3
%14 = getelementptr inbounds i32, i32* %0, i64 3
- store i32 %13, i32* %10, align 4
+ store i32 %13, i32* %10, align 4, !tbaa !2
%15 = getelementptr inbounds i32, i32* %1, i64 5
- %16 = load i32, i32* %15, align 4
+ %16 = load i32, i32* %15, align 4, !tbaa !2
%17 = add nsw i32 %16, 4
- store i32 %17, i32* %14, align 4
+ store i32 %17, i32* %14, align 4, !tbaa !2
ret void
}
-define void @gather_load_3(i32* %0, i32* readonly %1) {
-; CHECK-LABEL: @gather_load_3(
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1:%.*]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 1
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1
-; CHECK-NEXT: store i32 [[TMP4]], i32* [[TMP0]], align 4
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 11
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
-; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP7]], 2
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 2
-; CHECK-NEXT: store i32 [[TMP8]], i32* [[TMP5]], align 4
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 4
-; CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
-; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[TMP11]], 3
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 3
-; CHECK-NEXT: store i32 [[TMP12]], i32* [[TMP9]], align 4
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 15
-; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
-; CHECK-NEXT: [[TMP16:%.*]] = add i32 [[TMP15]], 4
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 4
-; CHECK-NEXT: store i32 [[TMP16]], i32* [[TMP13]], align 4
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 18
-; CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
-; CHECK-NEXT: [[TMP20:%.*]] = add i32 [[TMP19]], 1
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 5
-; CHECK-NEXT: store i32 [[TMP20]], i32* [[TMP17]], align 4
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 9
-; CHECK-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
-; CHECK-NEXT: [[TMP24:%.*]] = add i32 [[TMP23]], 2
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 6
-; CHECK-NEXT: store i32 [[TMP24]], i32* [[TMP21]], align 4
-; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6
-; CHECK-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
-; CHECK-NEXT: [[TMP28:%.*]] = add i32 [[TMP27]], 3
-; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 7
-; CHECK-NEXT: store i32 [[TMP28]], i32* [[TMP25]], align 4
-; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 21
-; CHECK-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP30]], align 4
-; CHECK-NEXT: [[TMP32:%.*]] = add i32 [[TMP31]], 4
-; CHECK-NEXT: store i32 [[TMP32]], i32* [[TMP29]], align 4
-; CHECK-NEXT: ret void
+define void @gather_load_3(i32* noalias nocapture %0, i32* noalias nocapture readonly %1) {
+; SSE-LABEL: @gather_load_3(
+; SSE-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1:%.*]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 1
+; SSE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1
+; SSE-NEXT: store i32 [[TMP4]], i32* [[TMP0]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 11
+; SSE-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP8:%.*]] = add i32 [[TMP7]], 2
+; SSE-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 2
+; SSE-NEXT: store i32 [[TMP8]], i32* [[TMP5]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 4
+; SSE-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP12:%.*]] = add i32 [[TMP11]], 3
+; SSE-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 3
+; SSE-NEXT: store i32 [[TMP12]], i32* [[TMP9]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 15
+; SSE-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP16:%.*]] = add i32 [[TMP15]], 4
+; SSE-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 4
+; SSE-NEXT: store i32 [[TMP16]], i32* [[TMP13]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 18
+; SSE-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP20:%.*]] = add i32 [[TMP19]], 1
+; SSE-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 5
+; SSE-NEXT: store i32 [[TMP20]], i32* [[TMP17]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 9
+; SSE-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP24:%.*]] = add i32 [[TMP23]], 2
+; SSE-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 6
+; SSE-NEXT: store i32 [[TMP24]], i32* [[TMP21]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6
+; SSE-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP28:%.*]] = add i32 [[TMP27]], 3
+; SSE-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 7
+; SSE-NEXT: store i32 [[TMP28]], i32* [[TMP25]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 21
+; SSE-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP30]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP32:%.*]] = add i32 [[TMP31]], 4
+; SSE-NEXT: store i32 [[TMP32]], i32* [[TMP29]], align 4, [[TBAA0]]
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @gather_load_3(
+; AVX-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1:%.*]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 1
+; AVX-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1
+; AVX-NEXT: store i32 [[TMP4]], i32* [[TMP0]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 11
+; AVX-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP8:%.*]] = add i32 [[TMP7]], 2
+; AVX-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 2
+; AVX-NEXT: store i32 [[TMP8]], i32* [[TMP5]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 4
+; AVX-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP12:%.*]] = add i32 [[TMP11]], 3
+; AVX-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 3
+; AVX-NEXT: store i32 [[TMP12]], i32* [[TMP9]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 15
+; AVX-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP16:%.*]] = add i32 [[TMP15]], 4
+; AVX-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 4
+; AVX-NEXT: store i32 [[TMP16]], i32* [[TMP13]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 18
+; AVX-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP20:%.*]] = add i32 [[TMP19]], 1
+; AVX-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 5
+; AVX-NEXT: store i32 [[TMP20]], i32* [[TMP17]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 9
+; AVX-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP24:%.*]] = add i32 [[TMP23]], 2
+; AVX-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 6
+; AVX-NEXT: store i32 [[TMP24]], i32* [[TMP21]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6
+; AVX-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP28:%.*]] = add i32 [[TMP27]], 3
+; AVX-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 7
+; AVX-NEXT: store i32 [[TMP28]], i32* [[TMP25]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 21
+; AVX-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP30]], align 4, [[TBAA0]]
+; AVX-NEXT: [[TMP32:%.*]] = add i32 [[TMP31]], 4
+; AVX-NEXT: store i32 [[TMP32]], i32* [[TMP29]], align 4, [[TBAA0]]
+; AVX-NEXT: ret void
+;
+; AVX2-LABEL: @gather_load_3(
+; AVX2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1:%.*]], align 4, [[TBAA0]]
+; AVX2-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 1
+; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1
+; AVX2-NEXT: store i32 [[TMP4]], i32* [[TMP0]], align 4, [[TBAA0]]
+; AVX2-NEXT: [[TMP6:%.*]] = insertelement <4 x i32*> undef, i32* [[TMP1]], i32 0
+; AVX2-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32*> [[TMP6]], <4 x i32*> undef, <4 x i32> zeroinitializer
+; AVX2-NEXT: [[TMP8:%.*]] = getelementptr i32, <4 x i32*> [[TMP7]], <4 x i64> <i64 11, i64 4, i64 15, i64 18>
+; AVX2-NEXT: [[TMP9:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP8]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef), [[TBAA0]]
+; AVX2-NEXT: [[TMP10:%.*]] = add <4 x i32> [[TMP9]], <i32 2, i32 3, i32 4, i32 1>
+; AVX2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 5
+; AVX2-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>*
+; AVX2-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* [[TMP12]], align 4, [[TBAA0]]
+; AVX2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 9
+; AVX2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4, [[TBAA0]]
+; AVX2-NEXT: [[TMP15:%.*]] = add i32 [[TMP14]], 2
+; AVX2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 6
+; AVX2-NEXT: store i32 [[TMP15]], i32* [[TMP11]], align 4, [[TBAA0]]
+; AVX2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6
+; AVX2-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4, [[TBAA0]]
+; AVX2-NEXT: [[TMP19:%.*]] = add i32 [[TMP18]], 3
+; AVX2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 7
+; AVX2-NEXT: store i32 [[TMP19]], i32* [[TMP16]], align 4, [[TBAA0]]
+; AVX2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 21
+; AVX2-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4, [[TBAA0]]
+; AVX2-NEXT: [[TMP23:%.*]] = add i32 [[TMP22]], 4
+; AVX2-NEXT: store i32 [[TMP23]], i32* [[TMP20]], align 4, [[TBAA0]]
+; AVX2-NEXT: ret void
;
- %3 = load i32, i32* %1, align 4
+; AVX512-LABEL: @gather_load_3(
+; AVX512-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1:%.*]], align 4, [[TBAA0]]
+; AVX512-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 1
+; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP0:%.*]], i64 1
+; AVX512-NEXT: store i32 [[TMP4]], i32* [[TMP0]], align 4, [[TBAA0]]
+; AVX512-NEXT: [[TMP6:%.*]] = insertelement <4 x i32*> undef, i32* [[TMP1]], i32 0
+; AVX512-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32*> [[TMP6]], <4 x i32*> undef, <4 x i32> zeroinitializer
+; AVX512-NEXT: [[TMP8:%.*]] = getelementptr i32, <4 x i32*> [[TMP7]], <4 x i64> <i64 11, i64 4, i64 15, i64 18>
+; AVX512-NEXT: [[TMP9:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP8]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef), [[TBAA0]]
+; AVX512-NEXT: [[TMP10:%.*]] = add <4 x i32> [[TMP9]], <i32 2, i32 3, i32 4, i32 1>
+; AVX512-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 5
+; AVX512-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>*
+; AVX512-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* [[TMP12]], align 4, [[TBAA0]]
+; AVX512-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 9
+; AVX512-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4, [[TBAA0]]
+; AVX512-NEXT: [[TMP15:%.*]] = add i32 [[TMP14]], 2
+; AVX512-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 6
+; AVX512-NEXT: store i32 [[TMP15]], i32* [[TMP11]], align 4, [[TBAA0]]
+; AVX512-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 6
+; AVX512-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4, [[TBAA0]]
+; AVX512-NEXT: [[TMP19:%.*]] = add i32 [[TMP18]], 3
+; AVX512-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 7
+; AVX512-NEXT: store i32 [[TMP19]], i32* [[TMP16]], align 4, [[TBAA0]]
+; AVX512-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 21
+; AVX512-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4, [[TBAA0]]
+; AVX512-NEXT: [[TMP23:%.*]] = add i32 [[TMP22]], 4
+; AVX512-NEXT: store i32 [[TMP23]], i32* [[TMP20]], align 4, [[TBAA0]]
+; AVX512-NEXT: ret void
+;
+ %3 = load i32, i32* %1, align 4, !tbaa !2
%4 = add i32 %3, 1
%5 = getelementptr inbounds i32, i32* %0, i64 1
- store i32 %4, i32* %0, align 4
+ store i32 %4, i32* %0, align 4, !tbaa !2
%6 = getelementptr inbounds i32, i32* %1, i64 11
- %7 = load i32, i32* %6, align 4
+ %7 = load i32, i32* %6, align 4, !tbaa !2
%8 = add i32 %7, 2
%9 = getelementptr inbounds i32, i32* %0, i64 2
- store i32 %8, i32* %5, align 4
+ store i32 %8, i32* %5, align 4, !tbaa !2
%10 = getelementptr inbounds i32, i32* %1, i64 4
- %11 = load i32, i32* %10, align 4
+ %11 = load i32, i32* %10, align 4, !tbaa !2
%12 = add i32 %11, 3
%13 = getelementptr inbounds i32, i32* %0, i64 3
- store i32 %12, i32* %9, align 4
+ store i32 %12, i32* %9, align 4, !tbaa !2
%14 = getelementptr inbounds i32, i32* %1, i64 15
- %15 = load i32, i32* %14, align 4
+ %15 = load i32, i32* %14, align 4, !tbaa !2
%16 = add i32 %15, 4
%17 = getelementptr inbounds i32, i32* %0, i64 4
- store i32 %16, i32* %13, align 4
+ store i32 %16, i32* %13, align 4, !tbaa !2
%18 = getelementptr inbounds i32, i32* %1, i64 18
- %19 = load i32, i32* %18, align 4
+ %19 = load i32, i32* %18, align 4, !tbaa !2
%20 = add i32 %19, 1
%21 = getelementptr inbounds i32, i32* %0, i64 5
- store i32 %20, i32* %17, align 4
+ store i32 %20, i32* %17, align 4, !tbaa !2
%22 = getelementptr inbounds i32, i32* %1, i64 9
- %23 = load i32, i32* %22, align 4
+ %23 = load i32, i32* %22, align 4, !tbaa !2
%24 = add i32 %23, 2
%25 = getelementptr inbounds i32, i32* %0, i64 6
- store i32 %24, i32* %21, align 4
+ store i32 %24, i32* %21, align 4, !tbaa !2
%26 = getelementptr inbounds i32, i32* %1, i64 6
- %27 = load i32, i32* %26, align 4
+ %27 = load i32, i32* %26, align 4, !tbaa !2
%28 = add i32 %27, 3
%29 = getelementptr inbounds i32, i32* %0, i64 7
- store i32 %28, i32* %25, align 4
+ store i32 %28, i32* %25, align 4, !tbaa !2
%30 = getelementptr inbounds i32, i32* %1, i64 21
- %31 = load i32, i32* %30, align 4
+ %31 = load i32, i32* %30, align 4, !tbaa !2
%32 = add i32 %31, 4
- store i32 %32, i32* %29, align 4
+ store i32 %32, i32* %29, align 4, !tbaa !2
ret void
}
-define void @gather_load_4(i32* %t0, i32* readonly %t1) {
+define void @gather_load_4(i32* noalias nocapture %t0, i32* noalias nocapture readonly %t1) {
; SSE-LABEL: @gather_load_4(
; SSE-NEXT: [[T5:%.*]] = getelementptr inbounds i32, i32* [[T0:%.*]], i64 1
; SSE-NEXT: [[T6:%.*]] = getelementptr inbounds i32, i32* [[T1:%.*]], i64 11
@@ -188,14 +329,14 @@ define void @gather_load_4(i32* %t0, i32* readonly %t1) {
; SSE-NEXT: [[T26:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 6
; SSE-NEXT: [[T29:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 7
; SSE-NEXT: [[T30:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 21
-; SSE-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4
-; SSE-NEXT: [[T7:%.*]] = load i32, i32* [[T6]], align 4
-; SSE-NEXT: [[T11:%.*]] = load i32, i32* [[T10]], align 4
-; SSE-NEXT: [[T15:%.*]] = load i32, i32* [[T14]], align 4
-; SSE-NEXT: [[T19:%.*]] = load i32, i32* [[T18]], align 4
-; SSE-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4
-; SSE-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4
-; SSE-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4
+; SSE-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4, [[TBAA0]]
+; SSE-NEXT: [[T7:%.*]] = load i32, i32* [[T6]], align 4, [[TBAA0]]
+; SSE-NEXT: [[T11:%.*]] = load i32, i32* [[T10]], align 4, [[TBAA0]]
+; SSE-NEXT: [[T15:%.*]] = load i32, i32* [[T14]], align 4, [[TBAA0]]
+; SSE-NEXT: [[T19:%.*]] = load i32, i32* [[T18]], align 4, [[TBAA0]]
+; SSE-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4, [[TBAA0]]
+; SSE-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4, [[TBAA0]]
+; SSE-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4, [[TBAA0]]
; SSE-NEXT: [[T4:%.*]] = add i32 [[T3]], 1
; SSE-NEXT: [[T8:%.*]] = add i32 [[T7]], 2
; SSE-NEXT: [[T12:%.*]] = add i32 [[T11]], 3
@@ -204,14 +345,14 @@ define void @gather_load_4(i32* %t0, i32* readonly %t1) {
; SSE-NEXT: [[T24:%.*]] = add i32 [[T23]], 2
; SSE-NEXT: [[T28:%.*]] = add i32 [[T27]], 3
; SSE-NEXT: [[T32:%.*]] = add i32 [[T31]], 4
-; SSE-NEXT: store i32 [[T4]], i32* [[T0]], align 4
-; SSE-NEXT: store i32 [[T8]], i32* [[T5]], align 4
-; SSE-NEXT: store i32 [[T12]], i32* [[T9]], align 4
-; SSE-NEXT: store i32 [[T16]], i32* [[T13]], align 4
-; SSE-NEXT: store i32 [[T20]], i32* [[T17]], align 4
-; SSE-NEXT: store i32 [[T24]], i32* [[T21]], align 4
-; SSE-NEXT: store i32 [[T28]], i32* [[T25]], align 4
-; SSE-NEXT: store i32 [[T32]], i32* [[T29]], align 4
+; SSE-NEXT: store i32 [[T4]], i32* [[T0]], align 4, [[TBAA0]]
+; SSE-NEXT: store i32 [[T8]], i32* [[T5]], align 4, [[TBAA0]]
+; SSE-NEXT: store i32 [[T12]], i32* [[T9]], align 4, [[TBAA0]]
+; SSE-NEXT: store i32 [[T16]], i32* [[T13]], align 4, [[TBAA0]]
+; SSE-NEXT: store i32 [[T20]], i32* [[T17]], align 4, [[TBAA0]]
+; SSE-NEXT: store i32 [[T24]], i32* [[T21]], align 4, [[TBAA0]]
+; SSE-NEXT: store i32 [[T28]], i32* [[T25]], align 4, [[TBAA0]]
+; SSE-NEXT: store i32 [[T32]], i32* [[T29]], align 4, [[TBAA0]]
; SSE-NEXT: ret void
;
; AVX-LABEL: @gather_load_4(
@@ -229,14 +370,14 @@ define void @gather_load_4(i32* %t0, i32* readonly %t1) {
; AVX-NEXT: [[T26:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 6
; AVX-NEXT: [[T29:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 7
; AVX-NEXT: [[T30:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 21
-; AVX-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4
-; AVX-NEXT: [[T7:%.*]] = load i32, i32* [[T6]], align 4
-; AVX-NEXT: [[T11:%.*]] = load i32, i32* [[T10]], align 4
-; AVX-NEXT: [[T15:%.*]] = load i32, i32* [[T14]], align 4
-; AVX-NEXT: [[T19:%.*]] = load i32, i32* [[T18]], align 4
-; AVX-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4
-; AVX-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4
-; AVX-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4
+; AVX-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4, [[TBAA0]]
+; AVX-NEXT: [[T7:%.*]] = load i32, i32* [[T6]], align 4, [[TBAA0]]
+; AVX-NEXT: [[T11:%.*]] = load i32, i32* [[T10]], align 4, [[TBAA0]]
+; AVX-NEXT: [[T15:%.*]] = load i32, i32* [[T14]], align 4, [[TBAA0]]
+; AVX-NEXT: [[T19:%.*]] = load i32, i32* [[T18]], align 4, [[TBAA0]]
+; AVX-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4, [[TBAA0]]
+; AVX-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4, [[TBAA0]]
+; AVX-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4, [[TBAA0]]
; AVX-NEXT: [[T4:%.*]] = add i32 [[T3]], 1
; AVX-NEXT: [[T8:%.*]] = add i32 [[T7]], 2
; AVX-NEXT: [[T12:%.*]] = add i32 [[T11]], 3
@@ -245,14 +386,14 @@ define void @gather_load_4(i32* %t0, i32* readonly %t1) {
; AVX-NEXT: [[T24:%.*]] = add i32 [[T23]], 2
; AVX-NEXT: [[T28:%.*]] = add i32 [[T27]], 3
; AVX-NEXT: [[T32:%.*]] = add i32 [[T31]], 4
-; AVX-NEXT: store i32 [[T4]], i32* [[T0]], align 4
-; AVX-NEXT: store i32 [[T8]], i32* [[T5]], align 4
-; AVX-NEXT: store i32 [[T12]], i32* [[T9]], align 4
-; AVX-NEXT: store i32 [[T16]], i32* [[T13]], align 4
-; AVX-NEXT: store i32 [[T20]], i32* [[T17]], align 4
-; AVX-NEXT: store i32 [[T24]], i32* [[T21]], align 4
-; AVX-NEXT: store i32 [[T28]], i32* [[T25]], align 4
-; AVX-NEXT: store i32 [[T32]], i32* [[T29]], align 4
+; AVX-NEXT: store i32 [[T4]], i32* [[T0]], align 4, [[TBAA0]]
+; AVX-NEXT: store i32 [[T8]], i32* [[T5]], align 4, [[TBAA0]]
+; AVX-NEXT: store i32 [[T12]], i32* [[T9]], align 4, [[TBAA0]]
+; AVX-NEXT: store i32 [[T16]], i32* [[T13]], align 4, [[TBAA0]]
+; AVX-NEXT: store i32 [[T20]], i32* [[T17]], align 4, [[TBAA0]]
+; AVX-NEXT: store i32 [[T24]], i32* [[T21]], align 4, [[TBAA0]]
+; AVX-NEXT: store i32 [[T28]], i32* [[T25]], align 4, [[TBAA0]]
+; AVX-NEXT: store i32 [[T32]], i32* [[T29]], align 4, [[TBAA0]]
; AVX-NEXT: ret void
;
; AVX2-LABEL: @gather_load_4(
@@ -266,22 +407,22 @@ define void @gather_load_4(i32* %t0, i32* readonly %t1) {
; AVX2-NEXT: [[T26:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 6
; AVX2-NEXT: [[T29:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 7
; AVX2-NEXT: [[T30:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 21
-; AVX2-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4
-; AVX2-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
-; AVX2-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4
-; AVX2-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4
-; AVX2-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4
+; AVX2-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4, [[TBAA0]]
+; AVX2-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef), [[TBAA0]]
+; AVX2-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4, [[TBAA0]]
+; AVX2-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4, [[TBAA0]]
+; AVX2-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4, [[TBAA0]]
; AVX2-NEXT: [[T4:%.*]] = add i32 [[T3]], 1
; AVX2-NEXT: [[TMP5:%.*]] = add <4 x i32> [[TMP4]], <i32 2, i32 3, i32 4, i32 1>
; AVX2-NEXT: [[T24:%.*]] = add i32 [[T23]], 2
; AVX2-NEXT: [[T28:%.*]] = add i32 [[T27]], 3
; AVX2-NEXT: [[T32:%.*]] = add i32 [[T31]], 4
-; AVX2-NEXT: store i32 [[T4]], i32* [[T0]], align 4
+; AVX2-NEXT: store i32 [[T4]], i32* [[T0]], align 4, [[TBAA0]]
; AVX2-NEXT: [[TMP6:%.*]] = bitcast i32* [[T5]] to <4 x i32>*
-; AVX2-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4
-; AVX2-NEXT: store i32 [[T24]], i32* [[T21]], align 4
-; AVX2-NEXT: store i32 [[T28]], i32* [[T25]], align 4
-; AVX2-NEXT: store i32 [[T32]], i32* [[T29]], align 4
+; AVX2-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4, [[TBAA0]]
+; AVX2-NEXT: store i32 [[T24]], i32* [[T21]], align 4, [[TBAA0]]
+; AVX2-NEXT: store i32 [[T28]], i32* [[T25]], align 4, [[TBAA0]]
+; AVX2-NEXT: store i32 [[T32]], i32* [[T29]], align 4, [[TBAA0]]
; AVX2-NEXT: ret void
;
; AVX512-LABEL: @gather_load_4(
@@ -295,22 +436,22 @@ define void @gather_load_4(i32* %t0, i32* readonly %t1) {
; AVX512-NEXT: [[T26:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 6
; AVX512-NEXT: [[T29:%.*]] = getelementptr inbounds i32, i32* [[T0]], i64 7
; AVX512-NEXT: [[T30:%.*]] = getelementptr inbounds i32, i32* [[T1]], i64 21
-; AVX512-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4
-; AVX512-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
-; AVX512-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4
-; AVX512-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4
-; AVX512-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4
+; AVX512-NEXT: [[T3:%.*]] = load i32, i32* [[T1]], align 4, [[TBAA0]]
+; AVX512-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP3]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef), [[TBAA0]]
+; AVX512-NEXT: [[T23:%.*]] = load i32, i32* [[T22]], align 4, [[TBAA0]]
+; AVX512-NEXT: [[T27:%.*]] = load i32, i32* [[T26]], align 4, [[TBAA0]]
+; AVX512-NEXT: [[T31:%.*]] = load i32, i32* [[T30]], align 4, [[TBAA0]]
; AVX512-NEXT: [[T4:%.*]] = add i32 [[T3]], 1
; AVX512-NEXT: [[TMP5:%.*]] = add <4 x i32> [[TMP4]], <i32 2, i32 3, i32 4, i32 1>
; AVX512-NEXT: [[T24:%.*]] = add i32 [[T23]], 2
; AVX512-NEXT: [[T28:%.*]] = add i32 [[T27]], 3
; AVX512-NEXT: [[T32:%.*]] = add i32 [[T31]], 4
-; AVX512-NEXT: store i32 [[T4]], i32* [[T0]], align 4
+; AVX512-NEXT: store i32 [[T4]], i32* [[T0]], align 4, [[TBAA0]]
; AVX512-NEXT: [[TMP6:%.*]] = bitcast i32* [[T5]] to <4 x i32>*
-; AVX512-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4
-; AVX512-NEXT: store i32 [[T24]], i32* [[T21]], align 4
-; AVX512-NEXT: store i32 [[T28]], i32* [[T25]], align 4
-; AVX512-NEXT: store i32 [[T32]], i32* [[T29]], align 4
+; AVX512-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4, [[TBAA0]]
+; AVX512-NEXT: store i32 [[T24]], i32* [[T21]], align 4, [[TBAA0]]
+; AVX512-NEXT: store i32 [[T28]], i32* [[T25]], align 4, [[TBAA0]]
+; AVX512-NEXT: store i32 [[T32]], i32* [[T29]], align 4, [[TBAA0]]
; AVX512-NEXT: ret void
;
%t5 = getelementptr inbounds i32, i32* %t0, i64 1
@@ -328,14 +469,14 @@ define void @gather_load_4(i32* %t0, i32* readonly %t1) {
%t29 = getelementptr inbounds i32, i32* %t0, i64 7
%t30 = getelementptr inbounds i32, i32* %t1, i64 21
- %t3 = load i32, i32* %t1, align 4
- %t7 = load i32, i32* %t6, align 4
- %t11 = load i32, i32* %t10, align 4
- %t15 = load i32, i32* %t14, align 4
- %t19 = load i32, i32* %t18, align 4
- %t23 = load i32, i32* %t22, align 4
- %t27 = load i32, i32* %t26, align 4
- %t31 = load i32, i32* %t30, align 4
+ %t3 = load i32, i32* %t1, align 4, !tbaa !2
+ %t7 = load i32, i32* %t6, align 4, !tbaa !2
+ %t11 = load i32, i32* %t10, align 4, !tbaa !2
+ %t15 = load i32, i32* %t14, align 4, !tbaa !2
+ %t19 = load i32, i32* %t18, align 4, !tbaa !2
+ %t23 = load i32, i32* %t22, align 4, !tbaa !2
+ %t27 = load i32, i32* %t26, align 4, !tbaa !2
+ %t31 = load i32, i32* %t30, align 4, !tbaa !2
%t4 = add i32 %t3, 1
%t8 = add i32 %t7, 2
@@ -346,14 +487,178 @@ define void @gather_load_4(i32* %t0, i32* readonly %t1) {
%t28 = add i32 %t27, 3
%t32 = add i32 %t31, 4
- store i32 %t4, i32* %t0, align 4
- store i32 %t8, i32* %t5, align 4
- store i32 %t12, i32* %t9, align 4
- store i32 %t16, i32* %t13, align 4
- store i32 %t20, i32* %t17, align 4
- store i32 %t24, i32* %t21, align 4
- store i32 %t28, i32* %t25, align 4
- store i32 %t32, i32* %t29, align 4
+ store i32 %t4, i32* %t0, align 4, !tbaa !2
+ store i32 %t8, i32* %t5, align 4, !tbaa !2
+ store i32 %t12, i32* %t9, align 4, !tbaa !2
+ store i32 %t16, i32* %t13, align 4, !tbaa !2
+ store i32 %t20, i32* %t17, align 4, !tbaa !2
+ store i32 %t24, i32* %t21, align 4, !tbaa !2
+ store i32 %t28, i32* %t25, align 4, !tbaa !2
+ store i32 %t32, i32* %t29, align 4, !tbaa !2
ret void
}
+
+
+define void @gather_load_div(float* noalias nocapture %0, float* noalias nocapture readonly %1) {
+; SSE-LABEL: @gather_load_div(
+; SSE-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, float* [[TMP1:%.*]], i64 10
+; SSE-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 3
+; SSE-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 14
+; SSE-NEXT: [[TMP6:%.*]] = insertelement <4 x float*> undef, float* [[TMP1]], i32 0
+; SSE-NEXT: [[TMP7:%.*]] = insertelement <4 x float*> [[TMP6]], float* [[TMP3]], i32 1
+; SSE-NEXT: [[TMP8:%.*]] = insertelement <4 x float*> [[TMP7]], float* [[TMP4]], i32 2
+; SSE-NEXT: [[TMP9:%.*]] = insertelement <4 x float*> [[TMP8]], float* [[TMP5]], i32 3
+; SSE-NEXT: [[TMP10:%.*]] = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> [[TMP9]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef), [[TBAA0]]
+; SSE-NEXT: [[TMP11:%.*]] = shufflevector <4 x float*> [[TMP6]], <4 x float*> undef, <4 x i32> zeroinitializer
+; SSE-NEXT: [[TMP12:%.*]] = getelementptr float, <4 x float*> [[TMP11]], <4 x i64> <i64 4, i64 13, i64 11, i64 44>
+; SSE-NEXT: [[TMP13:%.*]] = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> [[TMP12]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef), [[TBAA0]]
+; SSE-NEXT: [[TMP14:%.*]] = fdiv <4 x float> [[TMP10]], [[TMP13]]
+; SSE-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, float* [[TMP0:%.*]], i64 4
+; SSE-NEXT: [[TMP16:%.*]] = bitcast float* [[TMP0]] to <4 x float>*
+; SSE-NEXT: store <4 x float> [[TMP14]], <4 x float>* [[TMP16]], align 4, [[TBAA0]]
+; SSE-NEXT: [[TMP17:%.*]] = getelementptr float, <4 x float*> [[TMP11]], <4 x i64> <i64 17, i64 8, i64 5, i64 20>
+; SSE-NEXT: [[TMP18:%.*]] = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> [[TMP17]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef), [[TBAA0]]
+; SSE-NEXT: [[TMP19:%.*]] = getelementptr float, <4 x float*> [[TMP11]], <4 x i64> <i64 33, i64 30, i64 27, i64 23>
+; SSE-NEXT: [[TMP20:%.*]] = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> [[TMP19]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef), [[TBAA0]]
+; SSE-NEXT: [[TMP21:%.*]] = fdiv <4 x float> [[TMP18]], [[TMP20]]
+; SSE-NEXT: [[TMP22:%.*]] = bitcast float* [[TMP15]] to <4 x float>*
+; SSE-NEXT: store <4 x float> [[TMP21]], <4 x float>* [[TMP22]], align 4, [[TBAA0]]
+; SSE-NEXT: ret void
+;
+; AVX-LABEL: @gather_load_div(
+; AVX-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, float* [[TMP1:%.*]], i64 10
+; AVX-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 3
+; AVX-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 14
+; AVX-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 17
+; AVX-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 8
+; AVX-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 5
+; AVX-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 20
+; AVX-NEXT: [[TMP10:%.*]] = insertelement <8 x float*> undef, float* [[TMP1]], i32 0
+; AVX-NEXT: [[TMP11:%.*]] = insertelement <8 x float*> [[TMP10]], float* [[TMP3]], i32 1
+; AVX-NEXT: [[TMP12:%.*]] = insertelement <8 x float*> [[TMP11]], float* [[TMP4]], i32 2
+; AVX-NEXT: [[TMP13:%.*]] = insertelement <8 x float*> [[TMP12]], float* [[TMP5]], i32 3
+; AVX-NEXT: [[TMP14:%.*]] = insertelement <8 x float*> [[TMP13]], float* [[TMP6]], i32 4
+; AVX-NEXT: [[TMP15:%.*]] = insertelement <8 x float*> [[TMP14]], float* [[TMP7]], i32 5
+; AVX-NEXT: [[TMP16:%.*]] = insertelement <8 x float*> [[TMP15]], float* [[TMP8]], i32 6
+; AVX-NEXT: [[TMP17:%.*]] = insertelement <8 x float*> [[TMP16]], float* [[TMP9]], i32 7
+; AVX-NEXT: [[TMP18:%.*]] = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> [[TMP17]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> undef), [[TBAA0]]
+; AVX-NEXT: [[TMP19:%.*]] = shufflevector <8 x float*> [[TMP10]], <8 x float*> undef, <8 x i32> zeroinitializer
+; AVX-NEXT: [[TMP20:%.*]] = getelementptr float, <8 x float*> [[TMP19]], <8 x i64> <i64 4, i64 13, i64 11, i64 44, i64 33, i64 30, i64 27, i64 23>
+; AVX-NEXT: [[TMP21:%.*]] = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> [[TMP20]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> undef), [[TBAA0]]
+; AVX-NEXT: [[TMP22:%.*]] = fdiv <8 x float> [[TMP18]], [[TMP21]]
+; AVX-NEXT: [[TMP23:%.*]] = bitcast float* [[TMP0:%.*]] to <8 x float>*
+; AVX-NEXT: store <8 x float> [[TMP22]], <8 x float>* [[TMP23]], align 4, [[TBAA0]]
+; AVX-NEXT: ret void
+;
+; AVX2-LABEL: @gather_load_div(
+; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, float* [[TMP1:%.*]], i64 10
+; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 3
+; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 14
+; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 17
+; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 8
+; AVX2-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 5
+; AVX2-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 20
+; AVX2-NEXT: [[TMP10:%.*]] = insertelement <8 x float*> undef, float* [[TMP1]], i32 0
+; AVX2-NEXT: [[TMP11:%.*]] = insertelement <8 x float*> [[TMP10]], float* [[TMP3]], i32 1
+; AVX2-NEXT: [[TMP12:%.*]] = insertelement <8 x float*> [[TMP11]], float* [[TMP4]], i32 2
+; AVX2-NEXT: [[TMP13:%.*]] = insertelement <8 x float*> [[TMP12]], float* [[TMP5]], i32 3
+; AVX2-NEXT: [[TMP14:%.*]] = insertelement <8 x float*> [[TMP13]], float* [[TMP6]], i32 4
+; AVX2-NEXT: [[TMP15:%.*]] = insertelement <8 x float*> [[TMP14]], float* [[TMP7]], i32 5
+; AVX2-NEXT: [[TMP16:%.*]] = insertelement <8 x float*> [[TMP15]], float* [[TMP8]], i32 6
+; AVX2-NEXT: [[TMP17:%.*]] = insertelement <8 x float*> [[TMP16]], float* [[TMP9]], i32 7
+; AVX2-NEXT: [[TMP18:%.*]] = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> [[TMP17]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> undef), [[TBAA0]]
+; AVX2-NEXT: [[TMP19:%.*]] = shufflevector <8 x float*> [[TMP10]], <8 x float*> undef, <8 x i32> zeroinitializer
+; AVX2-NEXT: [[TMP20:%.*]] = getelementptr float, <8 x float*> [[TMP19]], <8 x i64> <i64 4, i64 13, i64 11, i64 44, i64 33, i64 30, i64 27, i64 23>
+; AVX2-NEXT: [[TMP21:%.*]] = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> [[TMP20]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> undef), [[TBAA0]]
+; AVX2-NEXT: [[TMP22:%.*]] = fdiv <8 x float> [[TMP18]], [[TMP21]]
+; AVX2-NEXT: [[TMP23:%.*]] = bitcast float* [[TMP0:%.*]] to <8 x float>*
+; AVX2-NEXT: store <8 x float> [[TMP22]], <8 x float>* [[TMP23]], align 4, [[TBAA0]]
+; AVX2-NEXT: ret void
+;
+; AVX512-LABEL: @gather_load_div(
+; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, float* [[TMP1:%.*]], i64 10
+; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 3
+; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 14
+; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 17
+; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 8
+; AVX512-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 5
+; AVX512-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 20
+; AVX512-NEXT: [[TMP10:%.*]] = insertelement <8 x float*> undef, float* [[TMP1]], i32 0
+; AVX512-NEXT: [[TMP11:%.*]] = insertelement <8 x float*> [[TMP10]], float* [[TMP3]], i32 1
+; AVX512-NEXT: [[TMP12:%.*]] = insertelement <8 x float*> [[TMP11]], float* [[TMP4]], i32 2
+; AVX512-NEXT: [[TMP13:%.*]] = insertelement <8 x float*> [[TMP12]], float* [[TMP5]], i32 3
+; AVX512-NEXT: [[TMP14:%.*]] = insertelement <8 x float*> [[TMP13]], float* [[TMP6]], i32 4
+; AVX512-NEXT: [[TMP15:%.*]] = insertelement <8 x float*> [[TMP14]], float* [[TMP7]], i32 5
+; AVX512-NEXT: [[TMP16:%.*]] = insertelement <8 x float*> [[TMP15]], float* [[TMP8]], i32 6
+; AVX512-NEXT: [[TMP17:%.*]] = insertelement <8 x float*> [[TMP16]], float* [[TMP9]], i32 7
+; AVX512-NEXT: [[TMP18:%.*]] = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> [[TMP17]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> undef), [[TBAA0]]
+; AVX512-NEXT: [[TMP19:%.*]] = shufflevector <8 x float*> [[TMP10]], <8 x float*> undef, <8 x i32> zeroinitializer
+; AVX512-NEXT: [[TMP20:%.*]] = getelementptr float, <8 x float*> [[TMP19]], <8 x i64> <i64 4, i64 13, i64 11, i64 44, i64 33, i64 30, i64 27, i64 23>
+; AVX512-NEXT: [[TMP21:%.*]] = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> [[TMP20]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> undef), [[TBAA0]]
+; AVX512-NEXT: [[TMP22:%.*]] = fdiv <8 x float> [[TMP18]], [[TMP21]]
+; AVX512-NEXT: [[TMP23:%.*]] = bitcast float* [[TMP0:%.*]] to <8 x float>*
+; AVX512-NEXT: store <8 x float> [[TMP22]], <8 x float>* [[TMP23]], align 4, [[TBAA0]]
+; AVX512-NEXT: ret void
+;
+ %3 = load float, float* %1, align 4, !tbaa !2
+ %4 = getelementptr inbounds float, float* %1, i64 4
+ %5 = load float, float* %4, align 4, !tbaa !2
+ %6 = fdiv float %3, %5
+ %7 = getelementptr inbounds float, float* %0, i64 1
+ store float %6, float* %0, align 4, !tbaa !2
+ %8 = getelementptr inbounds float, float* %1, i64 10
+ %9 = load float, float* %8, align 4, !tbaa !2
+ %10 = getelementptr inbounds float, float* %1, i64 13
+ %11 = load float, float* %10, align 4, !tbaa !2
+ %12 = fdiv float %9, %11
+ %13 = getelementptr inbounds float, float* %0, i64 2
+ store float %12, float* %7, align 4, !tbaa !2
+ %14 = getelementptr inbounds float, float* %1, i64 3
+ %15 = load float, float* %14, align 4, !tbaa !2
+ %16 = getelementptr inbounds float, float* %1, i64 11
+ %17 = load float, float* %16, align 4, !tbaa !2
+ %18 = fdiv float %15, %17
+ %19 = getelementptr inbounds float, float* %0, i64 3
+ store float %18, float* %13, align 4, !tbaa !2
+ %20 = getelementptr inbounds float, float* %1, i64 14
+ %21 = load float, float* %20, align 4, !tbaa !2
+ %22 = getelementptr inbounds float, float* %1, i64 44
+ %23 = load float, float* %22, align 4, !tbaa !2
+ %24 = fdiv float %21, %23
+ %25 = getelementptr inbounds float, float* %0, i64 4
+ store float %24, float* %19, align 4, !tbaa !2
+ %26 = getelementptr inbounds float, float* %1, i64 17
+ %27 = load float, float* %26, align 4, !tbaa !2
+ %28 = getelementptr inbounds float, float* %1, i64 33
+ %29 = load float, float* %28, align 4, !tbaa !2
+ %30 = fdiv float %27, %29
+ %31 = getelementptr inbounds float, float* %0, i64 5
+ store float %30, float* %25, align 4, !tbaa !2
+ %32 = getelementptr inbounds float, float* %1, i64 8
+ %33 = load float, float* %32, align 4, !tbaa !2
+ %34 = getelementptr inbounds float, float* %1, i64 30
+ %35 = load float, float* %34, align 4, !tbaa !2
+ %36 = fdiv float %33, %35
+ %37 = getelementptr inbounds float, float* %0, i64 6
+ store float %36, float* %31, align 4, !tbaa !2
+ %38 = getelementptr inbounds float, float* %1, i64 5
+ %39 = load float, float* %38, align 4, !tbaa !2
+ %40 = getelementptr inbounds float, float* %1, i64 27
+ %41 = load float, float* %40, align 4, !tbaa !2
+ %42 = fdiv float %39, %41
+ %43 = getelementptr inbounds float, float* %0, i64 7
+ store float %42, float* %37, align 4, !tbaa !2
+ %44 = getelementptr inbounds float, float* %1, i64 20
+ %45 = load float, float* %44, align 4, !tbaa !2
+ %46 = getelementptr inbounds float, float* %1, i64 23
+ %47 = load float, float* %46, align 4, !tbaa !2
+ %48 = fdiv float %45, %47
+ store float %48, float* %43, align 4, !tbaa !2
+ ret void
+}
+
+!2 = !{!3, !3, i64 0}
+!3 = !{!"short", !4, i64 0}
+!4 = !{!"omnipotent char", !5, i64 0}
+!5 = !{!"Simple C++ TBAA"}
More information about the llvm-commits
mailing list