[llvm] r326303 - [SLP] Added new tests and updated existing for jumbled load, NFC.
Mohammad Shahid via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 27 20:19:35 PST 2018
Author: ashahid
Date: Tue Feb 27 20:19:34 2018
New Revision: 326303
URL: http://llvm.org/viewvc/llvm-project?rev=326303&view=rev
Log:
[SLP] Added new tests and updated existing for jumbled load, NFC.
Added:
llvm/trunk/test/Transforms/SLPVectorizer/X86/external_user_jumbled_load.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load-shuffle-placement.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load-used-in-phi.ll
Modified:
llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load.ll
Added: llvm/trunk/test/Transforms/SLPVectorizer/X86/external_user_jumbled_load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/external_user_jumbled_load.ll?rev=326303&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/external_user_jumbled_load.ll (added)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/external_user_jumbled_load.ll Tue Feb 27 20:19:34 2018
@@ -0,0 +1,42 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx -slp-vectorizer | FileCheck %s
+
+ at array = external global [20 x [13 x i32]]
+
+define void @hoge(i64 %idx, <4 x i32>* %sink) {
+; CHECK-LABEL: @hoge(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [20 x [13 x i32]], [20 x [13 x i32]]* @array, i64 0, i64 [[IDX:%.*]], i64 5
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [20 x [13 x i32]], [20 x [13 x i32]]* @array, i64 0, i64 [[IDX]], i64 6
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [20 x [13 x i32]], [20 x [13 x i32]]* @array, i64 0, i64 [[IDX]], i64 7
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [20 x [13 x i32]], [20 x [13 x i32]]* @array, i64 0, i64 [[IDX]], i64 8
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP1]] to <2 x i32>*
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, <2 x i32>* [[TMP4]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i32> [[TMP5]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i32> [[TMP5]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> [[TMP7]], i32 [[TMP8]], i32 1
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP3]], align 4
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP10]], i32 2
+; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP12]], i32 3
+; CHECK-NEXT: store <4 x i32> [[TMP13]], <4 x i32>* [[SINK:%.*]]
+; CHECK-NEXT: ret void
+;
+bb:
+ %0 = getelementptr inbounds [20 x [13 x i32]], [20 x [13 x i32]]* @array, i64 0, i64 %idx, i64 5
+ %1 = getelementptr inbounds [20 x [13 x i32]], [20 x [13 x i32]]* @array, i64 0, i64 %idx, i64 6
+ %2 = getelementptr inbounds [20 x [13 x i32]], [20 x [13 x i32]]* @array, i64 0, i64 %idx, i64 7
+ %3 = getelementptr inbounds [20 x [13 x i32]], [20 x [13 x i32]]* @array, i64 0, i64 %idx, i64 8
+ %4 = load i32, i32* %1, align 4
+ %5 = insertelement <4 x i32> undef, i32 %4, i32 0
+ %6 = load i32, i32* %2, align 4
+ %7 = insertelement <4 x i32> %5, i32 %6, i32 1
+ %8 = load i32, i32* %3, align 4
+ %9 = insertelement <4 x i32> %7, i32 %8, i32 2
+ %10 = load i32, i32* %0, align 4
+ %11 = insertelement <4 x i32> %9, i32 %10, i32 3
+ store <4 x i32> %11, <4 x i32>* %sink
+ ret void
+}
+
Added: llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load-shuffle-placement.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load-shuffle-placement.ll?rev=326303&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load-shuffle-placement.ll (added)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load-shuffle-placement.ll Tue Feb 27 20:19:34 2018
@@ -0,0 +1,139 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx -slp-vectorizer | FileCheck %s
+
+
+;void jumble (int * restrict A, int * restrict B) {
+ ; int tmp0 = A[10]*A[0];
+ ; int tmp1 = A[11]*A[1];
+ ; int tmp2 = A[12]*A[3];
+ ; int tmp3 = A[13]*A[2];
+ ; B[0] = tmp0;
+ ; B[1] = tmp1;
+ ; B[2] = tmp2;
+ ; B[3] = tmp3;
+ ;}
+
+
+ ; Function Attrs: norecurse nounwind uwtable
+ define void @jumble1(i32* noalias nocapture readonly %A, i32* noalias nocapture %B) {
+; CHECK-LABEL: @jumble1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 10
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 11
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to <2 x i32>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, <2 x i32>* [[TMP0]], align 4
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 12
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 13
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[ARRAYIDX]] to <4 x i32>*
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> [[TMP7]], i32 [[TMP8]], i32 1
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP2]], i32 2
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 [[TMP5]], i32 3
+; CHECK-NEXT: [[TMP12:%.*]] = mul nsw <4 x i32> [[TMP4]], [[TMP11]]
+; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2
+; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3
+; CHECK-NEXT: [[TMP13:%.*]] = bitcast i32* [[B]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* [[TMP13]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
+ %0 = load i32, i32* %arrayidx, align 4
+ %1 = load i32, i32* %A, align 4
+ %mul = mul nsw i32 %0, %1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 11
+ %2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1
+ %3 = load i32, i32* %arrayidx3, align 4
+ %mul4 = mul nsw i32 %2, %3
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 12
+ %4 = load i32, i32* %arrayidx5, align 4
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 3
+ %5 = load i32, i32* %arrayidx6, align 4
+ %mul7 = mul nsw i32 %4, %5
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 13
+ %6 = load i32, i32* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 2
+ %7 = load i32, i32* %arrayidx9, align 4
+ %mul10 = mul nsw i32 %6, %7
+ store i32 %mul, i32* %B, align 4
+ %arrayidx12 = getelementptr inbounds i32, i32* %B, i64 1
+ store i32 %mul4, i32* %arrayidx12, align 4
+ %arrayidx13 = getelementptr inbounds i32, i32* %B, i64 2
+ store i32 %mul7, i32* %arrayidx13, align 4
+ %arrayidx14 = getelementptr inbounds i32, i32* %B, i64 3
+ store i32 %mul10, i32* %arrayidx14, align 4
+ ret void
+ }
+
+;Reversing the operand of MUL
+ ; Function Attrs: norecurse nounwind uwtable
+ define void @jumble2(i32* noalias nocapture readonly %A, i32* noalias nocapture %B) {
+; CHECK-LABEL: @jumble2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 10
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 11
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to <2 x i32>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, <2 x i32>* [[TMP0]], align 4
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 12
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 13
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[ARRAYIDX]] to <4 x i32>*
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> [[TMP7]], i32 [[TMP8]], i32 1
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP2]], i32 2
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 [[TMP5]], i32 3
+; CHECK-NEXT: [[TMP12:%.*]] = mul nsw <4 x i32> [[TMP11]], [[TMP4]]
+; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2
+; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3
+; CHECK-NEXT: [[TMP13:%.*]] = bitcast i32* [[B]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* [[TMP13]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
+ %0 = load i32, i32* %arrayidx, align 4
+ %1 = load i32, i32* %A, align 4
+ %mul = mul nsw i32 %1, %0
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 11
+ %2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1
+ %3 = load i32, i32* %arrayidx3, align 4
+ %mul4 = mul nsw i32 %3, %2
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 12
+ %4 = load i32, i32* %arrayidx5, align 4
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 3
+ %5 = load i32, i32* %arrayidx6, align 4
+ %mul7 = mul nsw i32 %5, %4
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 13
+ %6 = load i32, i32* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 2
+ %7 = load i32, i32* %arrayidx9, align 4
+ %mul10 = mul nsw i32 %7, %6
+ store i32 %mul, i32* %B, align 4
+ %arrayidx12 = getelementptr inbounds i32, i32* %B, i64 1
+ store i32 %mul4, i32* %arrayidx12, align 4
+ %arrayidx13 = getelementptr inbounds i32, i32* %B, i64 2
+ store i32 %mul7, i32* %arrayidx13, align 4
+ %arrayidx14 = getelementptr inbounds i32, i32* %B, i64 3
+ store i32 %mul10, i32* %arrayidx14, align 4
+ ret void
+ }
+
Added: llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load-used-in-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load-used-in-phi.ll?rev=326303&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load-used-in-phi.ll (added)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load-used-in-phi.ll Tue Feb 27 20:19:34 2018
@@ -0,0 +1,232 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx -slp-vectorizer | FileCheck %s
+
+;void phiUsingLoads(int *restrict A, int *restrict B) {
+; int tmp0, tmp1, tmp2, tmp3;
+; for (int i = 0; i < 100; i++) {
+; if (A[0] == 0) {
+; tmp0 = A[i + 0];
+; tmp1 = A[i + 1];
+; tmp2 = A[i + 2];
+; tmp3 = A[i + 3];
+; } else if (A[25] == 0) {
+; tmp0 = A[i + 0];
+; tmp1 = A[i + 1];
+; tmp2 = A[i + 2];
+; tmp3 = A[i + 3];
+; } else if (A[50] == 0) {
+; tmp0 = A[i + 0];
+; tmp1 = A[i + 1];
+; tmp2 = A[i + 2];
+; tmp3 = A[i + 3];
+; } else if (A[75] == 0) {
+; tmp0 = A[i + 0];
+; tmp1 = A[i + 1];
+; tmp2 = A[i + 3];
+; tmp3 = A[i + 2];
+; }
+; }
+; B[0] = tmp0;
+; B[1] = tmp1;
+; B[2] = tmp2;
+; B[3] = tmp3;
+;}
+
+
+; Function Attrs: norecurse nounwind uwtable
+define void @phiUsingLoads(i32* noalias nocapture readonly %A, i32* noalias nocapture %B) local_unnamed_addr #0 {
+; CHECK-LABEL: @phiUsingLoads(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A:%.*]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[TMP0]], 0
+; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 25
+; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 50
+; CHECK-NEXT: [[ARRAYIDX44:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 75
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: [[ARRAYIDX64:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX65:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2
+; CHECK-NEXT: [[ARRAYIDX66:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[B]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[TMP34:%.*]], <4 x i32>* [[TMP1]], align 4
+; CHECK-NEXT: ret void
+; CHECK: for.body:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = phi <4 x i32> [ undef, [[ENTRY]] ], [ [[TMP34]], [[FOR_INC]] ]
+; CHECK-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 3
+; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[ARRAYIDX2]] to <4 x i32>*
+; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[TMP6]], align 4
+; CHECK-NEXT: br label [[FOR_INC]]
+; CHECK: if.else:
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX12]], align 4
+; CHECK-NEXT: [[CMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; CHECK-NEXT: br i1 [[CMP13]], label [[IF_THEN14:%.*]], label [[IF_ELSE27:%.*]]
+; CHECK: if.then14:
+; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP9:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
+; CHECK-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP10]]
+; CHECK-NEXT: [[TMP11:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 3
+; CHECK-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = bitcast i32* [[ARRAYIDX17]] to <4 x i32>*
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, <4 x i32>* [[TMP12]], align 4
+; CHECK-NEXT: br label [[FOR_INC]]
+; CHECK: if.else27:
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX28]], align 4
+; CHECK-NEXT: [[CMP29:%.*]] = icmp eq i32 [[TMP14]], 0
+; CHECK-NEXT: br i1 [[CMP29]], label [[IF_THEN30:%.*]], label [[IF_ELSE43:%.*]]
+; CHECK: if.then30:
+; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP15:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP16:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
+; CHECK-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 3
+; CHECK-NEXT: [[ARRAYIDX42:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP17]]
+; CHECK-NEXT: [[TMP18:%.*]] = bitcast i32* [[ARRAYIDX33]] to <4 x i32>*
+; CHECK-NEXT: [[TMP19:%.*]] = load <4 x i32>, <4 x i32>* [[TMP18]], align 4
+; CHECK-NEXT: br label [[FOR_INC]]
+; CHECK: if.else43:
+; CHECK-NEXT: [[TMP20:%.*]] = load i32, i32* [[ARRAYIDX44]], align 4
+; CHECK-NEXT: [[CMP45:%.*]] = icmp eq i32 [[TMP20]], 0
+; CHECK-NEXT: br i1 [[CMP45]], label [[IF_THEN46:%.*]], label [[FOR_INC]]
+; CHECK: if.then46:
+; CHECK-NEXT: [[ARRAYIDX49:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP21:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[ARRAYIDX52:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP22:%.*]] = bitcast i32* [[ARRAYIDX49]] to <2 x i32>*
+; CHECK-NEXT: [[TMP23:%.*]] = load <2 x i32>, <2 x i32>* [[TMP22]], align 4
+; CHECK-NEXT: [[TMP24:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 3
+; CHECK-NEXT: [[ARRAYIDX55:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP24]]
+; CHECK-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX55]], align 4
+; CHECK-NEXT: [[TMP26:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
+; CHECK-NEXT: [[ARRAYIDX58:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP26]]
+; CHECK-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX58]], align 4
+; CHECK-NEXT: [[TMP28:%.*]] = extractelement <2 x i32> [[TMP23]], i32 0
+; CHECK-NEXT: [[TMP29:%.*]] = insertelement <4 x i32> undef, i32 [[TMP28]], i32 0
+; CHECK-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[TMP23]], i32 1
+; CHECK-NEXT: [[TMP31:%.*]] = insertelement <4 x i32> [[TMP29]], i32 [[TMP30]], i32 1
+; CHECK-NEXT: [[TMP32:%.*]] = insertelement <4 x i32> [[TMP31]], i32 [[TMP25]], i32 2
+; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> [[TMP32]], i32 [[TMP27]], i32 3
+; CHECK-NEXT: br label [[FOR_INC]]
+; CHECK: for.inc:
+; CHECK-NEXT: [[TMP34]] = phi <4 x i32> [ [[TMP7]], [[IF_THEN]] ], [ [[TMP13]], [[IF_THEN14]] ], [ [[TMP19]], [[IF_THEN30]] ], [ [[TMP33]], [[IF_THEN46]] ], [ [[TMP2]], [[IF_ELSE43]] ]
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
+;
+entry:
+ %0 = load i32, i32* %A, align 4
+ %cmp1 = icmp eq i32 %0, 0
+ %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 25
+ %arrayidx28 = getelementptr inbounds i32, i32* %A, i64 50
+ %arrayidx44 = getelementptr inbounds i32, i32* %A, i64 75
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.inc
+ store i32 %tmp0.1, i32* %B, align 4
+ %arrayidx64 = getelementptr inbounds i32, i32* %B, i64 1
+ store i32 %tmp1.1, i32* %arrayidx64, align 4
+ %arrayidx65 = getelementptr inbounds i32, i32* %B, i64 2
+ store i32 %tmp2.1, i32* %arrayidx65, align 4
+ %arrayidx66 = getelementptr inbounds i32, i32* %B, i64 3
+ store i32 %tmp3.1, i32* %arrayidx66, align 4
+ ret void
+
+for.body: ; preds = %for.inc, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
+ %tmp3.0111 = phi i32 [ undef, %entry ], [ %tmp3.1, %for.inc ]
+ %tmp2.0110 = phi i32 [ undef, %entry ], [ %tmp2.1, %for.inc ]
+ %tmp1.0109 = phi i32 [ undef, %entry ], [ %tmp1.1, %for.inc ]
+ %tmp0.0108 = phi i32 [ undef, %entry ], [ %tmp0.1, %for.inc ]
+ br i1 %cmp1, label %if.then, label %if.else
+
+if.then: ; preds = %for.body
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %1 = load i32, i32* %arrayidx2, align 4
+ %2 = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %2
+ %3 = load i32, i32* %arrayidx5, align 4
+ %4 = add nuw nsw i64 %indvars.iv, 2
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %4
+ %5 = load i32, i32* %arrayidx8, align 4
+ %6 = add nuw nsw i64 %indvars.iv, 3
+ %arrayidx11 = getelementptr inbounds i32, i32* %A, i64 %6
+ %7 = load i32, i32* %arrayidx11, align 4
+ br label %for.inc
+
+if.else: ; preds = %for.body
+ %8 = load i32, i32* %arrayidx12, align 4
+ %cmp13 = icmp eq i32 %8, 0
+ br i1 %cmp13, label %if.then14, label %if.else27
+
+if.then14: ; preds = %if.else
+ %arrayidx17 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %9 = load i32, i32* %arrayidx17, align 4
+ %10 = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx20 = getelementptr inbounds i32, i32* %A, i64 %10
+ %11 = load i32, i32* %arrayidx20, align 4
+ %12 = add nuw nsw i64 %indvars.iv, 2
+ %arrayidx23 = getelementptr inbounds i32, i32* %A, i64 %12
+ %13 = load i32, i32* %arrayidx23, align 4
+ %14 = add nuw nsw i64 %indvars.iv, 3
+ %arrayidx26 = getelementptr inbounds i32, i32* %A, i64 %14
+ %15 = load i32, i32* %arrayidx26, align 4
+ br label %for.inc
+
+if.else27: ; preds = %if.else
+ %16 = load i32, i32* %arrayidx28, align 4
+ %cmp29 = icmp eq i32 %16, 0
+ br i1 %cmp29, label %if.then30, label %if.else43
+
+if.then30: ; preds = %if.else27
+ %arrayidx33 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %17 = load i32, i32* %arrayidx33, align 4
+ %18 = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx36 = getelementptr inbounds i32, i32* %A, i64 %18
+ %19 = load i32, i32* %arrayidx36, align 4
+ %20 = add nuw nsw i64 %indvars.iv, 2
+ %arrayidx39 = getelementptr inbounds i32, i32* %A, i64 %20
+ %21 = load i32, i32* %arrayidx39, align 4
+ %22 = add nuw nsw i64 %indvars.iv, 3
+ %arrayidx42 = getelementptr inbounds i32, i32* %A, i64 %22
+ %23 = load i32, i32* %arrayidx42, align 4
+ br label %for.inc
+
+if.else43: ; preds = %if.else27
+ %24 = load i32, i32* %arrayidx44, align 4
+ %cmp45 = icmp eq i32 %24, 0
+ br i1 %cmp45, label %if.then46, label %for.inc
+
+if.then46: ; preds = %if.else43
+ %arrayidx49 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %25 = load i32, i32* %arrayidx49, align 4
+ %26 = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx52 = getelementptr inbounds i32, i32* %A, i64 %26
+ %27 = load i32, i32* %arrayidx52, align 4
+ %28 = add nuw nsw i64 %indvars.iv, 3
+ %arrayidx55 = getelementptr inbounds i32, i32* %A, i64 %28
+ %29 = load i32, i32* %arrayidx55, align 4
+ %30 = add nuw nsw i64 %indvars.iv, 2
+ %arrayidx58 = getelementptr inbounds i32, i32* %A, i64 %30
+ %31 = load i32, i32* %arrayidx58, align 4
+ br label %for.inc
+
+for.inc: ; preds = %if.then, %if.then30, %if.else43, %if.then46, %if.then14
+ %tmp0.1 = phi i32 [ %1, %if.then ], [ %9, %if.then14 ], [ %17, %if.then30 ], [ %25, %if.then46 ], [ %tmp0.0108, %if.else43 ]
+ %tmp1.1 = phi i32 [ %3, %if.then ], [ %11, %if.then14 ], [ %19, %if.then30 ], [ %27, %if.then46 ], [ %tmp1.0109, %if.else43 ]
+ %tmp2.1 = phi i32 [ %5, %if.then ], [ %13, %if.then14 ], [ %21, %if.then30 ], [ %29, %if.then46 ], [ %tmp2.0110, %if.else43 ]
+ %tmp3.1 = phi i32 [ %7, %if.then ], [ %15, %if.then14 ], [ %23, %if.then30 ], [ %31, %if.then46 ], [ %tmp3.0111, %if.else43 ]
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 100
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load.ll?rev=326303&r1=326302&r2=326303&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/jumbled-load.ll Tue Feb 27 20:19:34 2018
@@ -5,7 +5,7 @@
define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) {
; CHECK-LABEL: @jumbled-load(
-; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* %in, i64 0
+; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0
; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[IN_ADDR]], align 4
; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3
; CHECK-NEXT: [[LOAD_2:%.*]] = load i32, i32* [[GEP_1]], align 4
@@ -13,7 +13,7 @@ define i32 @jumbled-load(i32* noalias no
; CHECK-NEXT: [[LOAD_3:%.*]] = load i32, i32* [[GEP_2]], align 4
; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2
; CHECK-NEXT: [[LOAD_4:%.*]] = load i32, i32* [[GEP_3]], align 4
-; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* %inn, i64 0
+; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0
; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, i32* [[INN_ADDR]], align 4
; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2
; CHECK-NEXT: [[LOAD_6:%.*]] = load i32, i32* [[GEP_4]], align 4
@@ -25,13 +25,13 @@ define i32 @jumbled-load(i32* noalias no
; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[LOAD_2]], [[LOAD_8]]
; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 [[LOAD_4]], [[LOAD_7]]
; CHECK-NEXT: [[MUL_4:%.*]] = mul i32 [[LOAD_1]], [[LOAD_6]]
-; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* %out, i64 0
+; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0
; CHECK-NEXT: store i32 [[MUL_1]], i32* [[GEP_7]], align 4
-; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* %out, i64 1
+; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1
; CHECK-NEXT: store i32 [[MUL_2]], i32* [[GEP_8]], align 4
-; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* %out, i64 2
+; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2
; CHECK-NEXT: store i32 [[MUL_3]], i32* [[GEP_9]], align 4
-; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* %out, i64 3
+; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3
; CHECK-NEXT: store i32 [[MUL_4]], i32* [[GEP_10]], align 4
; CHECK-NEXT: ret i32 undef
;
@@ -58,6 +58,55 @@ define i32 @jumbled-load(i32* noalias no
%gep.7 = getelementptr inbounds i32, i32* %out, i64 0
store i32 %mul.1, i32* %gep.7, align 4
%gep.8 = getelementptr inbounds i32, i32* %out, i64 1
+ store i32 %mul.2, i32* %gep.8, align 4
+ %gep.9 = getelementptr inbounds i32, i32* %out, i64 2
+ store i32 %mul.3, i32* %gep.9, align 4
+ %gep.10 = getelementptr inbounds i32, i32* %out, i64 3
+ store i32 %mul.4, i32* %gep.10, align 4
+
+ ret i32 undef
+}
+
+
+define i32 @jumbled-load-multiuses(i32* noalias nocapture %in, i32* noalias nocapture %out) {
+; CHECK-LABEL: @jumbled-load-multiuses(
+; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0
+; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[IN_ADDR]], align 4
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3
+; CHECK-NEXT: [[LOAD_2:%.*]] = load i32, i32* [[GEP_1]], align 4
+; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1
+; CHECK-NEXT: [[LOAD_3:%.*]] = load i32, i32* [[GEP_2]], align 4
+; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2
+; CHECK-NEXT: [[LOAD_4:%.*]] = load i32, i32* [[GEP_3]], align 4
+; CHECK-NEXT: [[MUL_1:%.*]] = mul i32 [[LOAD_3]], [[LOAD_4]]
+; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[LOAD_2]], [[LOAD_2]]
+; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 [[LOAD_4]], [[LOAD_1]]
+; CHECK-NEXT: [[MUL_4:%.*]] = mul i32 [[LOAD_1]], [[LOAD_3]]
+; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0
+; CHECK-NEXT: store i32 [[MUL_1]], i32* [[GEP_7]], align 4
+; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1
+; CHECK-NEXT: store i32 [[MUL_2]], i32* [[GEP_8]], align 4
+; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2
+; CHECK-NEXT: store i32 [[MUL_3]], i32* [[GEP_9]], align 4
+; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3
+; CHECK-NEXT: store i32 [[MUL_4]], i32* [[GEP_10]], align 4
+; CHECK-NEXT: ret i32 undef
+;
+ %in.addr = getelementptr inbounds i32, i32* %in, i64 0
+ %load.1 = load i32, i32* %in.addr, align 4
+ %gep.1 = getelementptr inbounds i32, i32* %in.addr, i64 3
+ %load.2 = load i32, i32* %gep.1, align 4
+ %gep.2 = getelementptr inbounds i32, i32* %in.addr, i64 1
+ %load.3 = load i32, i32* %gep.2, align 4
+ %gep.3 = getelementptr inbounds i32, i32* %in.addr, i64 2
+ %load.4 = load i32, i32* %gep.3, align 4
+ %mul.1 = mul i32 %load.3, %load.4
+ %mul.2 = mul i32 %load.2, %load.2
+ %mul.3 = mul i32 %load.4, %load.1
+ %mul.4 = mul i32 %load.1, %load.3
+ %gep.7 = getelementptr inbounds i32, i32* %out, i64 0
+ store i32 %mul.1, i32* %gep.7, align 4
+ %gep.8 = getelementptr inbounds i32, i32* %out, i64 1
store i32 %mul.2, i32* %gep.8, align 4
%gep.9 = getelementptr inbounds i32, i32* %out, i64 2
store i32 %mul.3, i32* %gep.9, align 4
More information about the llvm-commits
mailing list