[llvm] a7648eb - [LV] Use patterns in some induction tests, to make more robust. (NFC)

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 24 05:32:37 PST 2021


Author: Florian Hahn
Date: 2021-11-24T13:32:24Z
New Revision: a7648eb2aaf848e903dca46bb9efb75809570ef1

URL: https://github.com/llvm/llvm-project/commit/a7648eb2aaf848e903dca46bb9efb75809570ef1
DIFF: https://github.com/llvm/llvm-project/commit/a7648eb2aaf848e903dca46bb9efb75809570ef1.diff

LOG: [LV] Use patterns in some induction tests, to make more robust. (NFC)

Added: 
    

Modified: 
    llvm/test/Transforms/LoopVectorize/induction.ll
    llvm/test/Transforms/LoopVectorize/induction_plus.ll
    llvm/test/Transforms/LoopVectorize/vplan-printing.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll
index 1a4167f5c3b9b..cdb1dc04a604f 100644
--- a/llvm/test/Transforms/LoopVectorize/induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction.ll
@@ -400,30 +400,30 @@ for.end:
 ; CHECK-LABEL: @iv_vector_and_scalar_users(
 ; CHECK: vector.body:
 ; CHECK:   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; CHECK:   %vec.ind = phi <2 x i64> [ <i64 0, i64 1>, %vector.ph ], [ %vec.ind.next, %vector.body ]
-; CHECK:   %vec.ind1 = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ %vec.ind.next2, %vector.body ]
+; CHECK:   [[VEC_IV_1:%.+]] = phi <2 x i64> [ <i64 0, i64 1>, %vector.ph ], [ [[VEC_IV_1_NEXT:%.+]], %vector.body ]
+; CHECK:   [[VEC_IV_2:%.+]] = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ [[VEC_IV_2_NEXT:%.+]], %vector.body ]
 ; CHECK:   %[[i0:.+]] = add i64 %index, 0
 ; CHECK:   %[[i1:.+]] = add i64 %index, 1
 ; CHECK:   getelementptr inbounds %pair.i16, %pair.i16* %p, i64 %[[i0]], i32 1
 ; CHECK:   getelementptr inbounds %pair.i16, %pair.i16* %p, i64 %[[i1]], i32 1
 ; CHECK:   %index.next = add nuw i64 %index, 2
-; CHECK:   %vec.ind.next = add <2 x i64> %vec.ind, <i64 2, i64 2>
-; CHECK:   %vec.ind.next2 = add <2 x i32> %vec.ind1, <i32 2, i32 2>
+; CHECK:   [[VEC_IV_1_NEXT]] = add <2 x i64> [[VEC_IV_1]], <i64 2, i64 2>
+; CHECK:   [[VEC_IV_2_NEXT]] = add <2 x i32> [[VEC_IV_2]], <i32 2, i32 2>
 ;
 ; IND-LABEL: @iv_vector_and_scalar_users(
 ; IND: vector.body:
 ; IND:   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; IND:   %vec.ind1 = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ %vec.ind.next2, %vector.body ]
+; IND:   [[VEC_IV:%.+]] = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ [[VEC_IV_NEXT:%.+]], %vector.body ]
 ; IND:   %[[i1:.+]] = or i64 %index, 1
 ; IND:   getelementptr inbounds %pair.i16, %pair.i16* %p, i64 %index, i32 1
 ; IND:   getelementptr inbounds %pair.i16, %pair.i16* %p, i64 %[[i1]], i32 1
 ; IND:   %index.next = add nuw i64 %index, 2
-; IND:   %vec.ind.next2 = add <2 x i32> %vec.ind1, <i32 2, i32 2>
+; IND:   [[VEC_IV_NEXT]] = add <2 x i32> [[VEC_IV]], <i32 2, i32 2>
 ;
 ; UNROLL-LABEL: @iv_vector_and_scalar_users(
 ; UNROLL: vector.body:
 ; UNROLL:   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; UNROLL:   %vec.ind2 = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ %vec.ind.next5, %vector.body ]
+; UNROLL:   [[VEC_IV:%.+]] = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ [[VEC_IV_NEXT:%.+]], %vector.body ]
 ; UNROLL:   %[[i1:.+]] = or i64 %index, 1
 ; UNROLL:   %[[i2:.+]] = or i64 %index, 2
 ; UNROLL:   %[[i3:.+]] = or i64 %index, 3
@@ -433,7 +433,7 @@ for.end:
 ; UNROLL:   getelementptr inbounds %pair.i16, %pair.i16* %p, i64 %[[i2]], i32 1
 ; UNROLL:   getelementptr inbounds %pair.i16, %pair.i16* %p, i64 %[[i3]], i32 1
 ; UNROLL:   %index.next = add nuw i64 %index, 4
-; UNROLL:   %vec.ind.next5 = add <2 x i32> %vec.ind2, <i32 4, i32 4>
+; UNROLL:   [[VEC_IV_NEXT]] = add <2 x i32> [[VEC_IV]], <i32 4, i32 4>
 
 %pair.i16 = type { i16, i16 }
 define void @iv_vector_and_scalar_users(%pair.i16* %p, i32 %a, i32 %n) {
@@ -860,12 +860,12 @@ define i64 @trunc_with_first_order_recurrence() {
 ; CHECK-LABEL: vector.body:
 ; CHECK-NEXT:    %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
 ; CHECK-NEXT:    %vec.phi = phi <2 x i64>
-; CHECK-NEXT:    %vec.ind = phi <2 x i64> [ <i64 1, i64 2>, %vector.ph ], [ %vec.ind.next, %vector.body ]
-; CHECK-NEXT:    %vec.ind2 = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ %vec.ind.next3, %vector.body ]
-; CHECK-NEXT:    %vector.recur = phi <2 x i32> [ <i32 poison, i32 42>, %vector.ph ], [ %vec.ind4, %vector.body ]
-; CHECK-NEXT:    %vec.ind4 = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ %vec.ind.next5, %vector.body ]
-; CHECK-NEXT:    %vec.ind6 = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ %vec.ind.next7, %vector.body ]
-; CHECK-NEXT:    shufflevector <2 x i32> %vector.recur, <2 x i32> %vec.ind4, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT:    [[VEC_IV_1:%.+]] = phi <2 x i64> [ <i64 1, i64 2>, %vector.ph ], [ [[VEC_IV_1_NEXT:%.+]], %vector.body ]
+; CHECK-NEXT:    [[VEC_IV_2:%.+]] = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ [[VEC_IV_2_NEXT:%.+]], %vector.body ]
+; CHECK-NEXT:    [[VEC_RECUR:%.+]] = phi <2 x i32> [ <i32 poison, i32 42>, %vector.ph ], [ [[VEC_IV_3:%.+]], %vector.body ]
+; CHECK-NEXT:    [[VEC_IV_3]] = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ [[VEC_IV_3_NEXT:%.+]], %vector.body ]
+; CHECK-NEXT:    [[VEC_IV_4:%.+]] = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ [[VEC_IV_4_NEXT:%.+]], %vector.body ]
+; CHECK-NEXT:    shufflevector <2 x i32> [[VEC_RECUR]], <2 x i32> [[VEC_IV_3]], <2 x i32> <i32 1, i32 2>
 entry:
   br label %loop
 

diff  --git a/llvm/test/Transforms/LoopVectorize/induction_plus.ll b/llvm/test/Transforms/LoopVectorize/induction_plus.ll
index b09b278cb4b46..5b56446bf571f 100644
--- a/llvm/test/Transforms/LoopVectorize/induction_plus.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction_plus.ll
@@ -6,13 +6,13 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 
 ;CHECK-LABEL: @array_at_plus_one(
 ;CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-;CHECK: %vec.ind = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
-;CHECK: %vec.ind1 = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next2, %vector.body ]
+;CHECK: [[VEC_IV_1:%.+]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %vector.ph ], [ [[VEC_IV_1_NEXT:%.+]], %vector.body ]
+;CHECK: [[VEC_IV_2:%.+]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ [[VEC_IV_2_NEXT:%.+]], %vector.body ]
 ;CHECK: %[[T1:.+]] = add i64 %index, 0
 ;CHECK: %[[T2:.+]] = add nsw i64 %[[T1]], 12
 ;CHECK: getelementptr inbounds [1024 x i32], [1024 x i32]* @array, i64 0, i64 %[[T2]]
-;CHECK: %vec.ind.next = add <4 x i64> %vec.ind, <i64 4, i64 4, i64 4, i64 4>
-;CHECK: %vec.ind.next2 = add <4 x i32> %vec.ind1, <i32 4, i32 4, i32 4, i32 4>
+;CHECK: [[VEC_IV_1_NEXT]] = add <4 x i64> [[VEC_IV_1]], <i64 4, i64 4, i64 4, i64 4>
+;CHECK: [[VEC_IV_2_NEXT]] = add <4 x i32> [[VEC_IV_2]], <i32 4, i32 4, i32 4, i32 4>
 ;CHECK: ret i32
 define i32 @array_at_plus_one(i32 %n) nounwind uwtable ssp {
   %1 = icmp sgt i32 %n, 0

diff  --git a/llvm/test/Transforms/LoopVectorize/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing.ll
index fbadb8ce4c7f2..a5fb747163211 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-printing.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing.ll
@@ -250,11 +250,11 @@ define float @print_fmuladd_strict(float* %a, float* %b, i64 %n) {
 ; CHECK-NEXT:   WIDEN-INDUCTION %iv = phi 0, %iv.next
 ; CHECK-NEXT:   WIDEN-REDUCTION-PHI ir<%sum.07> = phi ir<0.000000e+00>, ir<%muladd>
 ; CHECK-NEXT:   CLONE ir<%arrayidx> = getelementptr ir<%a>, ir<%iv>
-; CHECK-NEXT:   WIDEN ir<%0> = load ir<%arrayidx>
+; CHECK-NEXT:   WIDEN ir<%l.a> = load ir<%arrayidx>
 ; CHECK-NEXT:   CLONE ir<%arrayidx2> = getelementptr ir<%b>, ir<%iv>
-; CHECK-NEXT:   WIDEN ir<%1> = load ir<%arrayidx2>
-; CHECK-NEXT:   EMIT vp<%6> = fmul nnan ninf nsz ir<%0> ir<%1>
-; CHECK-NEXT:   REDUCE ir<%muladd> = ir<%sum.07> + nnan ninf nsz reduce.fadd (vp<%6>)
+; CHECK-NEXT:   WIDEN ir<%l.b> = load ir<%arrayidx2>
+; CHECK-NEXT:   EMIT vp<[[FMUL:%.]]> = fmul nnan ninf nsz ir<%l.a> ir<%l.b>
+; CHECK-NEXT:   REDUCE ir<[[MULADD:%.+]]> = ir<%sum.07> + nnan ninf nsz reduce.fadd (vp<[[FMUL]]>)
 ; CHECK-NEXT:   No successors
 ; CHECK-NEXT: }
 
@@ -265,10 +265,10 @@ for.body:
   %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
   %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
   %arrayidx = getelementptr inbounds float, float* %a, i64 %iv
-  %0 = load float, float* %arrayidx, align 4
+  %l.a = load float, float* %arrayidx, align 4
   %arrayidx2 = getelementptr inbounds float, float* %b, i64 %iv
-  %1 = load float, float* %arrayidx2, align 4
-  %muladd = tail call nnan ninf nsz float @llvm.fmuladd.f32(float %0, float %1, float %sum.07)
+  %l.b = load float, float* %arrayidx2, align 4
+  %muladd = tail call nnan ninf nsz float @llvm.fmuladd.f32(float %l.a, float %l.b, float %sum.07)
   %iv.next = add nuw nsw i64 %iv, 1
   %exitcond.not = icmp eq i64 %iv.next, %n
   br i1 %exitcond.not, label %for.end, label %for.body


        


More information about the llvm-commits mailing list