[llvm] f5ed863 - Revert "[VPlan] Allow derived IVs and scalar-steps in narrowing interleave."

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Sun Jun 29 06:40:28 PDT 2025


Author: Florian Hahn
Date: 2025-06-29T14:40:03+01:00
New Revision: f5ed863176dd286462cd5558723dfe445967fedf

URL: https://github.com/llvm/llvm-project/commit/f5ed863176dd286462cd5558723dfe445967fedf
DIFF: https://github.com/llvm/llvm-project/commit/f5ed863176dd286462cd5558723dfe445967fedf.diff

LOG: Revert "[VPlan] Allow derived IVs and scalar-steps in narrowing interleave."

This reverts commit 2787759ef2e41b19f8bfde06fe9a26b25d1f5834.

This exposed a crash on some build bots. Revert to investigate.

Added: 
    

Modified: 
    llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
    llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-derived-ivs.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 077f0b79f25a5..bcfb889469ea1 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -3195,10 +3195,6 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
         match(&R, m_BranchOnCount(m_VPValue(), m_VPValue())))
       continue;
 
-    if (isa<VPDerivedIVRecipe, VPScalarIVStepsRecipe>(&R) &&
-        vputils::onlyFirstLaneUsed(cast<VPSingleDefRecipe>(&R)))
-      continue;
-
     // Bail out on recipes not supported at the moment:
     //  * phi recipes other than the canonical induction
     //  * recipes writing to memory except interleave groups

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-derived-ivs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-derived-ivs.ll
index 3cde3f3422cf9..5efd821ba990f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-derived-ivs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-derived-ivs.ll
@@ -1,6 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph\:" --version 5
 ; RUN: opt -p loop-vectorize -force-vector-width=2 -force-vector-interleave=1 -S %s | FileCheck --check-prefixes=VF2 %s
-; RUN: opt -p loop-vectorize -force-vector-width=2 -force-vector-interleave=2 -S %s | FileCheck --check-prefixes=VF2IC2 %s
 ; RUN: opt -p loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck --check-prefixes=VF4 %s
 
 target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-Fn32"
@@ -26,10 +25,14 @@ define void @derived_int_ivs(ptr noalias %a, ptr noalias %b, i64 %end) {
 ; VF2-NEXT:    [[TMP5:%.*]] = mul i64 [[INDEX]], 16
 ; VF2-NEXT:    [[OFFSET_IDX:%.*]] = add i64 16, [[TMP5]]
 ; VF2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[OFFSET_IDX]]
-; VF2-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP6]], align 8
+; VF2-NEXT:    [[WIDE_VEC:%.*]] = load <4 x double>, ptr [[TMP6]], align 8
+; VF2-NEXT:    [[STRIDED_VEC:%.*]] = shufflevector <4 x double> [[WIDE_VEC]], <4 x double> poison, <2 x i32> <i32 0, i32 2>
+; VF2-NEXT:    [[STRIDED_VEC1:%.*]] = shufflevector <4 x double> [[WIDE_VEC]], <4 x double> poison, <2 x i32> <i32 1, i32 3>
 ; VF2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[OFFSET_IDX]]
-; VF2-NEXT:    store <2 x double> [[WIDE_LOAD]], ptr [[TMP7]], align 8
-; VF2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 1
+; VF2-NEXT:    [[TMP8:%.*]] = shufflevector <2 x double> [[STRIDED_VEC]], <2 x double> [[STRIDED_VEC1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; VF2-NEXT:    [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x double> [[TMP8]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; VF2-NEXT:    store <4 x double> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 8
+; VF2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
 ; VF2-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
 ; VF2-NEXT:    br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
 ; VF2:       [[MIDDLE_BLOCK]]:
@@ -37,41 +40,6 @@ define void @derived_int_ivs(ptr noalias %a, ptr noalias %b, i64 %end) {
 ; VF2-NEXT:    br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
 ; VF2:       [[SCALAR_PH]]:
 ;
-; VF2IC2-LABEL: define void @derived_int_ivs(
-; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[END:%.*]]) {
-; VF2IC2-NEXT:  [[ENTRY:.*:]]
-; VF2IC2-NEXT:    [[TMP0:%.*]] = add i64 [[END]], -32
-; VF2IC2-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 4
-; VF2IC2-NEXT:    [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; VF2IC2-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
-; VF2IC2-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; VF2IC2:       [[VECTOR_PH]]:
-; VF2IC2-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 4
-; VF2IC2-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
-; VF2IC2-NEXT:    [[TMP3:%.*]] = mul i64 [[N_VEC]], 16
-; VF2IC2-NEXT:    [[TMP4:%.*]] = add i64 16, [[TMP3]]
-; VF2IC2-NEXT:    br label %[[VECTOR_BODY:.*]]
-; VF2IC2:       [[VECTOR_BODY]]:
-; VF2IC2-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; VF2IC2-NEXT:    [[TMP5:%.*]] = mul i64 [[INDEX]], 16
-; VF2IC2-NEXT:    [[OFFSET_IDX:%.*]] = add i64 16, [[TMP5]]
-; VF2IC2-NEXT:    [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 16
-; VF2IC2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[OFFSET_IDX]]
-; VF2IC2-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP6]]
-; VF2IC2-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP7]], align 8
-; VF2IC2-NEXT:    [[WIDE_LOAD1:%.*]] = load <2 x double>, ptr [[TMP8]], align 8
-; VF2IC2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[OFFSET_IDX]]
-; VF2IC2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP6]]
-; VF2IC2-NEXT:    store <2 x double> [[WIDE_LOAD]], ptr [[TMP9]], align 8
-; VF2IC2-NEXT:    store <2 x double> [[WIDE_LOAD1]], ptr [[TMP10]], align 8
-; VF2IC2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
-; VF2IC2-NEXT:    [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; VF2IC2-NEXT:    br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; VF2IC2:       [[MIDDLE_BLOCK]]:
-; VF2IC2-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
-; VF2IC2-NEXT:    br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
-; VF2IC2:       [[SCALAR_PH]]:
-;
 ; VF4-LABEL: define void @derived_int_ivs(
 ; VF4-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[END:%.*]]) {
 ; VF4-NEXT:  [[ENTRY:.*:]]
@@ -167,9 +135,13 @@ define void @derived_pointer_ivs(ptr noalias %a, ptr noalias %b, ptr %end) {
 ; VF2-NEXT:    [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]]
 ; VF2-NEXT:    [[OFFSET_IDX6:%.*]] = mul i64 [[INDEX]], 16
 ; VF2-NEXT:    [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX6]]
-; VF2-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[NEXT_GEP]], align 8
-; VF2-NEXT:    store <2 x double> [[WIDE_LOAD]], ptr [[NEXT_GEP7]], align 8
-; VF2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 1
+; VF2-NEXT:    [[WIDE_VEC:%.*]] = load <4 x double>, ptr [[NEXT_GEP]], align 8
+; VF2-NEXT:    [[STRIDED_VEC:%.*]] = shufflevector <4 x double> [[WIDE_VEC]], <4 x double> poison, <2 x i32> <i32 0, i32 2>
+; VF2-NEXT:    [[STRIDED_VEC8:%.*]] = shufflevector <4 x double> [[WIDE_VEC]], <4 x double> poison, <2 x i32> <i32 1, i32 3>
+; VF2-NEXT:    [[TMP13:%.*]] = shufflevector <2 x double> [[STRIDED_VEC]], <2 x double> [[STRIDED_VEC8]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; VF2-NEXT:    [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x double> [[TMP13]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; VF2-NEXT:    store <4 x double> [[INTERLEAVED_VEC]], ptr [[NEXT_GEP7]], align 8
+; VF2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
 ; VF2-NEXT:    [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
 ; VF2-NEXT:    br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
 ; VF2:       [[MIDDLE_BLOCK]]:
@@ -177,61 +149,6 @@ define void @derived_pointer_ivs(ptr noalias %a, ptr noalias %b, ptr %end) {
 ; VF2-NEXT:    br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
 ; VF2:       [[SCALAR_PH]]:
 ;
-; VF2IC2-LABEL: define void @derived_pointer_ivs(
-; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr [[END:%.*]]) {
-; VF2IC2-NEXT:  [[ENTRY:.*:]]
-; VF2IC2-NEXT:    [[A5:%.*]] = ptrtoint ptr [[A]] to i64
-; VF2IC2-NEXT:    [[END4:%.*]] = ptrtoint ptr [[END]] to i64
-; VF2IC2-NEXT:    [[A2:%.*]] = ptrtoint ptr [[A]] to i64
-; VF2IC2-NEXT:    [[END1:%.*]] = ptrtoint ptr [[END]] to i64
-; VF2IC2-NEXT:    [[TMP0:%.*]] = add i64 [[END4]], -16
-; VF2IC2-NEXT:    [[TMP1:%.*]] = sub i64 [[TMP0]], [[A5]]
-; VF2IC2-NEXT:    [[TMP2:%.*]] = lshr i64 [[TMP1]], 4
-; VF2IC2-NEXT:    [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1
-; VF2IC2-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 4
-; VF2IC2-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
-; VF2IC2:       [[VECTOR_MEMCHECK]]:
-; VF2IC2-NEXT:    [[TMP4:%.*]] = add i64 [[END1]], -16
-; VF2IC2-NEXT:    [[TMP5:%.*]] = sub i64 [[TMP4]], [[A2]]
-; VF2IC2-NEXT:    [[TMP6:%.*]] = lshr i64 [[TMP5]], 4
-; VF2IC2-NEXT:    [[TMP7:%.*]] = shl nuw i64 [[TMP6]], 4
-; VF2IC2-NEXT:    [[TMP8:%.*]] = add i64 [[TMP7]], 16
-; VF2IC2-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP8]]
-; VF2IC2-NEXT:    [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP8]]
-; VF2IC2-NEXT:    [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP3]]
-; VF2IC2-NEXT:    [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
-; VF2IC2-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
-; VF2IC2-NEXT:    br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
-; VF2IC2:       [[VECTOR_PH]]:
-; VF2IC2-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 4
-; VF2IC2-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
-; VF2IC2-NEXT:    [[TMP9:%.*]] = mul i64 [[N_VEC]], 16
-; VF2IC2-NEXT:    [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP9]]
-; VF2IC2-NEXT:    [[TMP11:%.*]] = mul i64 [[N_VEC]], 16
-; VF2IC2-NEXT:    [[TMP12:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP11]]
-; VF2IC2-NEXT:    br label %[[VECTOR_BODY:.*]]
-; VF2IC2:       [[VECTOR_BODY]]:
-; VF2IC2-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; VF2IC2-NEXT:    [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 16
-; VF2IC2-NEXT:    [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 16
-; VF2IC2-NEXT:    [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]]
-; VF2IC2-NEXT:    [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP13]]
-; VF2IC2-NEXT:    [[OFFSET_IDX7:%.*]] = mul i64 [[INDEX]], 16
-; VF2IC2-NEXT:    [[TMP14:%.*]] = add i64 [[OFFSET_IDX7]], 16
-; VF2IC2-NEXT:    [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX7]]
-; VF2IC2-NEXT:    [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP14]]
-; VF2IC2-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[NEXT_GEP]], align 8
-; VF2IC2-NEXT:    [[WIDE_LOAD10:%.*]] = load <2 x double>, ptr [[NEXT_GEP6]], align 8
-; VF2IC2-NEXT:    store <2 x double> [[WIDE_LOAD]], ptr [[NEXT_GEP8]], align 8
-; VF2IC2-NEXT:    store <2 x double> [[WIDE_LOAD10]], ptr [[NEXT_GEP9]], align 8
-; VF2IC2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
-; VF2IC2-NEXT:    [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; VF2IC2-NEXT:    br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
-; VF2IC2:       [[MIDDLE_BLOCK]]:
-; VF2IC2-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
-; VF2IC2-NEXT:    br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
-; VF2IC2:       [[SCALAR_PH]]:
-;
 ; VF4-LABEL: define void @derived_pointer_ivs(
 ; VF4-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr [[END:%.*]]) {
 ; VF4-NEXT:  [[ENTRY:.*:]]
@@ -318,43 +235,21 @@ define void @narrow_with_uniform_add_and_gep(ptr noalias %p) {
 ; VF2-NEXT:    [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
 ; VF2-NEXT:    [[TMP0:%.*]] = add nuw nsw i64 [[OFFSET_IDX]], 0
 ; VF2-NEXT:    [[TMP1:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP0]]
-; VF2-NEXT:    [[STRIDED_VEC1:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8
+; VF2-NEXT:    [[WIDE_VEC:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
+; VF2-NEXT:    [[STRIDED_VEC:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 0, i32 2>
+; VF2-NEXT:    [[STRIDED_VEC1:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 1, i32 3>
+; VF2-NEXT:    [[TMP2:%.*]] = add <2 x i64> [[STRIDED_VEC]], splat (i64 1)
 ; VF2-NEXT:    [[TMP3:%.*]] = add <2 x i64> [[STRIDED_VEC1]], splat (i64 1)
-; VF2-NEXT:    store <2 x i64> [[TMP3]], ptr [[TMP1]], align 8
-; VF2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 1
+; VF2-NEXT:    [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; VF2-NEXT:    [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x i64> [[TMP4]], <4 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; VF2-NEXT:    store <4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
+; VF2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
 ; VF2-NEXT:    [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
 ; VF2-NEXT:    br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
 ; VF2:       [[MIDDLE_BLOCK]]:
 ; VF2-NEXT:    br i1 true, [[EXIT:label %.*]], label %[[SCALAR_PH]]
 ; VF2:       [[SCALAR_PH]]:
 ;
-; VF2IC2-LABEL: define void @narrow_with_uniform_add_and_gep(
-; VF2IC2-SAME: ptr noalias [[P:%.*]]) {
-; VF2IC2-NEXT:  [[ENTRY:.*:]]
-; VF2IC2-NEXT:    br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; VF2IC2:       [[VECTOR_PH]]:
-; VF2IC2-NEXT:    br label %[[VECTOR_BODY:.*]]
-; VF2IC2:       [[VECTOR_BODY]]:
-; VF2IC2-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; VF2IC2-NEXT:    [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
-; VF2IC2-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 2
-; VF2IC2-NEXT:    [[TMP1:%.*]] = add nuw nsw i64 [[OFFSET_IDX]], 0
-; VF2IC2-NEXT:    [[TMP2:%.*]] = add nuw nsw i64 [[TMP0]], 0
-; VF2IC2-NEXT:    [[TMP3:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]]
-; VF2IC2-NEXT:    [[TMP4:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]]
-; VF2IC2-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP3]], align 8
-; VF2IC2-NEXT:    [[WIDE_LOAD1:%.*]] = load <2 x i64>, ptr [[TMP4]], align 8
-; VF2IC2-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[WIDE_LOAD]], splat (i64 1)
-; VF2IC2-NEXT:    [[TMP6:%.*]] = add <2 x i64> [[WIDE_LOAD1]], splat (i64 1)
-; VF2IC2-NEXT:    store <2 x i64> [[TMP5]], ptr [[TMP3]], align 8
-; VF2IC2-NEXT:    store <2 x i64> [[TMP6]], ptr [[TMP4]], align 8
-; VF2IC2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
-; VF2IC2-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
-; VF2IC2-NEXT:    br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
-; VF2IC2:       [[MIDDLE_BLOCK]]:
-; VF2IC2-NEXT:    br i1 true, [[EXIT:label %.*]], label %[[SCALAR_PH]]
-; VF2IC2:       [[SCALAR_PH]]:
-;
 ; VF4-LABEL: define void @narrow_with_uniform_add_and_gep(
 ; VF4-SAME: ptr noalias [[P:%.*]]) {
 ; VF4-NEXT:  [[ENTRY:.*:]]


        


More information about the llvm-commits mailing list