[llvm] [LV][VPlan] Add initial support for CSA vectorization (PR #106560)

Ramkumar Ramachandra via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 4 10:44:59 PDT 2024


================
@@ -0,0 +1,3091 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -S -passes=loop-vectorize -force-tail-folding-style=data-with-evl \
+; RUN:   -enable-csa-vectorization -scalable-vectorization=on \
+; RUN:   -force-target-supports-scalable-vectors -force-target-instruction-cost=1 \
+; RUN:   | FileCheck %s -check-prefix=EVL
+; RUN: opt < %s -S -passes=loop-vectorize -force-tail-folding-style=none \
+; RUN:   -enable-csa-vectorization -scalable-vectorization=on \
+; RUN:   -force-target-supports-scalable-vectors -force-target-instruction-cost=1 \
+; RUN:   | FileCheck %s -check-prefix=NO-EVL
+; RUN: opt < %s -S -passes=loop-vectorize -force-tail-folding-style=data \
+; RUN:   -enable-csa-vectorization -scalable-vectorization=on \
+; RUN:   -force-target-supports-scalable-vectors -force-target-instruction-cost=1 \
+; RUN:   | FileCheck %s -check-prefix=DATA
+
+; This function is generated from the following C/C++ program:
+; int simple_csa_int_select(int N, int *data, int a) {
+;   int t = -1;
+;   for (int i = 0; i < N; i++) {
+;     if (a < data[i])
+;       t = data[i];
+;   }
+;   return t; // use t
+; }
+define i32 @simple_csa_int_select(i32 %N, ptr %data, i64 %a) {
+; EVL-LABEL: @simple_csa_int_select(
+; EVL-NEXT:  entry:
+; EVL-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; EVL-NEXT:    br i1 [[CMP9]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; EVL:       for.body.preheader:
+; EVL-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64
+; EVL-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; EVL-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP0]]
+; EVL-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; EVL:       vector.ph:
+; EVL-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
+; EVL-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; EVL-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
+; EVL-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; EVL-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[A:%.*]], i64 0
+; EVL-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+; EVL-NEXT:    br label [[VECTOR_BODY:%.*]]
+; EVL:       vector.body:
+; EVL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; EVL-NEXT:    [[CSA_MASK_PHI:%.*]] = phi <vscale x 1 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[CSA_MASK_SEL:%.*]], [[VECTOR_BODY]] ]
+; EVL-NEXT:    [[CSA_DATA_PHI:%.*]] = phi <vscale x 1 x i32> [ poison, [[VECTOR_PH]] ], [ [[CSA_DATA_SEL:%.*]], [[VECTOR_BODY]] ]
+; EVL-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 0
+; EVL-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 [[TMP3]]
+; EVL-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0
+; EVL-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 1 x i32>, ptr [[TMP5]], align 4
+; EVL-NEXT:    [[TMP6:%.*]] = sext <vscale x 1 x i32> [[WIDE_LOAD]] to <vscale x 1 x i64>
+; EVL-NEXT:    [[TMP7:%.*]] = icmp slt <vscale x 1 x i64> [[BROADCAST_SPLAT]], [[TMP6]]
+; EVL-NEXT:    [[TMP8:%.*]] = call i1 @llvm.vector.reduce.or.nxv1i1(<vscale x 1 x i1> [[TMP7]])
+; EVL-NEXT:    [[CSA_MASK_SEL]] = select i1 [[TMP8]], <vscale x 1 x i1> [[TMP7]], <vscale x 1 x i1> [[CSA_MASK_PHI]]
+; EVL-NEXT:    [[CSA_DATA_SEL]] = select i1 [[TMP8]], <vscale x 1 x i32> [[WIDE_LOAD]], <vscale x 1 x i32> [[CSA_DATA_PHI]]
+; EVL-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
+; EVL-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; EVL-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; EVL:       middle.block:
+; EVL-NEXT:    [[CSA_STEP:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; EVL-NEXT:    [[TMP10:%.*]] = select <vscale x 1 x i1> [[CSA_MASK_SEL]], <vscale x 1 x i32> [[CSA_STEP]], <vscale x 1 x i32> zeroinitializer
+; EVL-NEXT:    [[TMP11:%.*]] = call i32 @llvm.vector.reduce.umax.nxv1i32(<vscale x 1 x i32> [[TMP10]])
+; EVL-NEXT:    [[TMP12:%.*]] = extractelement <vscale x 1 x i1> [[CSA_MASK_SEL]], i64 0
+; EVL-NEXT:    [[TMP13:%.*]] = icmp eq i32 [[TMP11]], 0
+; EVL-NEXT:    [[TMP14:%.*]] = and i1 [[TMP12]], [[TMP13]]
+; EVL-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 0, i32 -1
+; EVL-NEXT:    [[CSA_EXTRACT:%.*]] = extractelement <vscale x 1 x i32> [[CSA_DATA_SEL]], i32 [[TMP15]]
+; EVL-NEXT:    [[TMP16:%.*]] = icmp sge i32 [[TMP15]], 0
+; EVL-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[CSA_EXTRACT]], i32 -1
+; EVL-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
+; EVL-NEXT:    br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
+; EVL:       scalar.ph:
+; EVL-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; EVL-NEXT:    br label [[FOR_BODY:%.*]]
+; EVL:       for.cond.cleanup.loopexit:
+; EVL-NEXT:    [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT:%.*]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
+; EVL-NEXT:    br label [[FOR_COND_CLEANUP]]
+; EVL:       for.cond.cleanup:
+; EVL-NEXT:    [[T_0_LCSSA:%.*]] = phi i32 [ -1, [[ENTRY:%.*]] ], [ [[SPEC_SELECT_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; EVL-NEXT:    ret i32 [[T_0_LCSSA]]
+; EVL:       for.body:
+; EVL-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; EVL-NEXT:    [[T_010:%.*]] = phi i32 [ -1, [[SCALAR_PH]] ], [ [[SPEC_SELECT]], [[FOR_BODY]] ]
+; EVL-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i64 [[INDVARS_IV]]
+; EVL-NEXT:    [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; EVL-NEXT:    [[TMP19:%.*]] = sext i32 [[TMP18]] to i64
+; EVL-NEXT:    [[CMP1:%.*]] = icmp slt i64 [[A]], [[TMP19]]
+; EVL-NEXT:    [[SPEC_SELECT]] = select i1 [[CMP1]], i32 [[TMP18]], i32 [[T_010]]
+; EVL-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; EVL-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; EVL-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+; NO-EVL-LABEL: @simple_csa_int_select(
+; NO-EVL-NEXT:  entry:
+; NO-EVL-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; NO-EVL-NEXT:    br i1 [[CMP9]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; NO-EVL:       for.body.preheader:
+; NO-EVL-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64
+; NO-EVL-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; NO-EVL-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP0]]
+; NO-EVL-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; NO-EVL:       vector.ph:
+; NO-EVL-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
+; NO-EVL-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; NO-EVL-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
+; NO-EVL-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-EVL-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[A:%.*]], i64 0
+; NO-EVL-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+; NO-EVL-NEXT:    br label [[VECTOR_BODY:%.*]]
+; NO-EVL:       vector.body:
+; NO-EVL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-EVL-NEXT:    [[CSA_MASK_PHI:%.*]] = phi <vscale x 1 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[CSA_MASK_SEL:%.*]], [[VECTOR_BODY]] ]
+; NO-EVL-NEXT:    [[CSA_DATA_PHI:%.*]] = phi <vscale x 1 x i32> [ poison, [[VECTOR_PH]] ], [ [[CSA_DATA_SEL:%.*]], [[VECTOR_BODY]] ]
+; NO-EVL-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 0
+; NO-EVL-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 [[TMP3]]
+; NO-EVL-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0
+; NO-EVL-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 1 x i32>, ptr [[TMP5]], align 4
+; NO-EVL-NEXT:    [[TMP6:%.*]] = sext <vscale x 1 x i32> [[WIDE_LOAD]] to <vscale x 1 x i64>
+; NO-EVL-NEXT:    [[TMP7:%.*]] = icmp slt <vscale x 1 x i64> [[BROADCAST_SPLAT]], [[TMP6]]
+; NO-EVL-NEXT:    [[TMP8:%.*]] = call i1 @llvm.vector.reduce.or.nxv1i1(<vscale x 1 x i1> [[TMP7]])
+; NO-EVL-NEXT:    [[CSA_MASK_SEL]] = select i1 [[TMP8]], <vscale x 1 x i1> [[TMP7]], <vscale x 1 x i1> [[CSA_MASK_PHI]]
+; NO-EVL-NEXT:    [[CSA_DATA_SEL]] = select i1 [[TMP8]], <vscale x 1 x i32> [[WIDE_LOAD]], <vscale x 1 x i32> [[CSA_DATA_PHI]]
+; NO-EVL-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
+; NO-EVL-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-EVL-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-EVL:       middle.block:
+; NO-EVL-NEXT:    [[CSA_STEP:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; NO-EVL-NEXT:    [[TMP10:%.*]] = select <vscale x 1 x i1> [[CSA_MASK_SEL]], <vscale x 1 x i32> [[CSA_STEP]], <vscale x 1 x i32> zeroinitializer
+; NO-EVL-NEXT:    [[TMP11:%.*]] = call i32 @llvm.vector.reduce.umax.nxv1i32(<vscale x 1 x i32> [[TMP10]])
+; NO-EVL-NEXT:    [[TMP12:%.*]] = extractelement <vscale x 1 x i1> [[CSA_MASK_SEL]], i64 0
+; NO-EVL-NEXT:    [[TMP13:%.*]] = icmp eq i32 [[TMP11]], 0
+; NO-EVL-NEXT:    [[TMP14:%.*]] = and i1 [[TMP12]], [[TMP13]]
+; NO-EVL-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 0, i32 -1
+; NO-EVL-NEXT:    [[CSA_EXTRACT:%.*]] = extractelement <vscale x 1 x i32> [[CSA_DATA_SEL]], i32 [[TMP15]]
+; NO-EVL-NEXT:    [[TMP16:%.*]] = icmp sge i32 [[TMP15]], 0
+; NO-EVL-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[CSA_EXTRACT]], i32 -1
+; NO-EVL-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
+; NO-EVL-NEXT:    br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
+; NO-EVL:       scalar.ph:
+; NO-EVL-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; NO-EVL-NEXT:    br label [[FOR_BODY:%.*]]
+; NO-EVL:       for.cond.cleanup.loopexit:
+; NO-EVL-NEXT:    [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT:%.*]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
+; NO-EVL-NEXT:    br label [[FOR_COND_CLEANUP]]
+; NO-EVL:       for.cond.cleanup:
+; NO-EVL-NEXT:    [[T_0_LCSSA:%.*]] = phi i32 [ -1, [[ENTRY:%.*]] ], [ [[SPEC_SELECT_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; NO-EVL-NEXT:    ret i32 [[T_0_LCSSA]]
+; NO-EVL:       for.body:
+; NO-EVL-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-EVL-NEXT:    [[T_010:%.*]] = phi i32 [ -1, [[SCALAR_PH]] ], [ [[SPEC_SELECT]], [[FOR_BODY]] ]
+; NO-EVL-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i64 [[INDVARS_IV]]
+; NO-EVL-NEXT:    [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; NO-EVL-NEXT:    [[TMP19:%.*]] = sext i32 [[TMP18]] to i64
+; NO-EVL-NEXT:    [[CMP1:%.*]] = icmp slt i64 [[A]], [[TMP19]]
+; NO-EVL-NEXT:    [[SPEC_SELECT]] = select i1 [[CMP1]], i32 [[TMP18]], i32 [[T_010]]
+; NO-EVL-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; NO-EVL-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; NO-EVL-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+; DATA-LABEL: @simple_csa_int_select(
+; DATA-NEXT:  entry:
+; DATA-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; DATA-NEXT:    br i1 [[CMP9]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; DATA:       for.body.preheader:
+; DATA-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64
+; DATA-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; DATA-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP0]]
+; DATA-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; DATA:       vector.ph:
+; DATA-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
+; DATA-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; DATA-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
+; DATA-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; DATA-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[A:%.*]], i64 0
+; DATA-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+; DATA-NEXT:    br label [[VECTOR_BODY:%.*]]
+; DATA:       vector.body:
+; DATA-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; DATA-NEXT:    [[CSA_MASK_PHI:%.*]] = phi <vscale x 1 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[CSA_MASK_SEL:%.*]], [[VECTOR_BODY]] ]
+; DATA-NEXT:    [[CSA_DATA_PHI:%.*]] = phi <vscale x 1 x i32> [ poison, [[VECTOR_PH]] ], [ [[CSA_DATA_SEL:%.*]], [[VECTOR_BODY]] ]
+; DATA-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 0
+; DATA-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 [[TMP3]]
+; DATA-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0
+; DATA-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 1 x i32>, ptr [[TMP5]], align 4
+; DATA-NEXT:    [[TMP6:%.*]] = sext <vscale x 1 x i32> [[WIDE_LOAD]] to <vscale x 1 x i64>
+; DATA-NEXT:    [[TMP7:%.*]] = icmp slt <vscale x 1 x i64> [[BROADCAST_SPLAT]], [[TMP6]]
+; DATA-NEXT:    [[TMP8:%.*]] = call i1 @llvm.vector.reduce.or.nxv1i1(<vscale x 1 x i1> [[TMP7]])
+; DATA-NEXT:    [[CSA_MASK_SEL]] = select i1 [[TMP8]], <vscale x 1 x i1> [[TMP7]], <vscale x 1 x i1> [[CSA_MASK_PHI]]
+; DATA-NEXT:    [[CSA_DATA_SEL]] = select i1 [[TMP8]], <vscale x 1 x i32> [[WIDE_LOAD]], <vscale x 1 x i32> [[CSA_DATA_PHI]]
+; DATA-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
+; DATA-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; DATA-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; DATA:       middle.block:
+; DATA-NEXT:    [[CSA_STEP:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; DATA-NEXT:    [[TMP10:%.*]] = select <vscale x 1 x i1> [[CSA_MASK_SEL]], <vscale x 1 x i32> [[CSA_STEP]], <vscale x 1 x i32> zeroinitializer
+; DATA-NEXT:    [[TMP11:%.*]] = call i32 @llvm.vector.reduce.umax.nxv1i32(<vscale x 1 x i32> [[TMP10]])
+; DATA-NEXT:    [[TMP12:%.*]] = extractelement <vscale x 1 x i1> [[CSA_MASK_SEL]], i64 0
+; DATA-NEXT:    [[TMP13:%.*]] = icmp eq i32 [[TMP11]], 0
+; DATA-NEXT:    [[TMP14:%.*]] = and i1 [[TMP12]], [[TMP13]]
+; DATA-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 0, i32 -1
+; DATA-NEXT:    [[CSA_EXTRACT:%.*]] = extractelement <vscale x 1 x i32> [[CSA_DATA_SEL]], i32 [[TMP15]]
+; DATA-NEXT:    [[TMP16:%.*]] = icmp sge i32 [[TMP15]], 0
+; DATA-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[CSA_EXTRACT]], i32 -1
+; DATA-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
+; DATA-NEXT:    br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
+; DATA:       scalar.ph:
+; DATA-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; DATA-NEXT:    br label [[FOR_BODY:%.*]]
+; DATA:       for.cond.cleanup.loopexit:
+; DATA-NEXT:    [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT:%.*]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
+; DATA-NEXT:    br label [[FOR_COND_CLEANUP]]
+; DATA:       for.cond.cleanup:
+; DATA-NEXT:    [[T_0_LCSSA:%.*]] = phi i32 [ -1, [[ENTRY:%.*]] ], [ [[SPEC_SELECT_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; DATA-NEXT:    ret i32 [[T_0_LCSSA]]
+; DATA:       for.body:
+; DATA-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; DATA-NEXT:    [[T_010:%.*]] = phi i32 [ -1, [[SCALAR_PH]] ], [ [[SPEC_SELECT]], [[FOR_BODY]] ]
+; DATA-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i64 [[INDVARS_IV]]
+; DATA-NEXT:    [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; DATA-NEXT:    [[TMP19:%.*]] = sext i32 [[TMP18]] to i64
+; DATA-NEXT:    [[CMP1:%.*]] = icmp slt i64 [[A]], [[TMP19]]
+; DATA-NEXT:    [[SPEC_SELECT]] = select i1 [[CMP1]], i32 [[TMP18]], i32 [[T_010]]
+; DATA-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; DATA-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; DATA-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+entry:
+  %cmp9 = icmp sgt i32 %N, 0
+  br i1 %cmp9, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %wide.trip.count = zext i32 %N to i64
+  br label %for.body
+
+for.cond.cleanup.loopexit:                        ; preds = %for.body
+  %spec.select.lcssa = phi i32 [ %spec.select, %for.body ]
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
+  %t.0.lcssa = phi i32 [ -1, %entry ], [ %spec.select.lcssa, %for.cond.cleanup.loopexit ]
+  ret i32 %t.0.lcssa
----------------
artagnon wrote:

Makes sense. Do keep in mind, however, that clang isn't the only frontend to LLVM, and other frontends (rust, swift etc) might not be running the optimization pipeline the same way clang is: do ensure coverage of those codepaths as well.

https://github.com/llvm/llvm-project/pull/106560


More information about the llvm-commits mailing list