[llvm] ab9178e - [ARM] Add a couple of new MVE reduction tests. NFC
David Green via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 8 06:32:11 PST 2024
Author: David Green
Date: 2024-11-08T14:32:06Z
New Revision: ab9178e3e73dc715463e1019ed2cd449dc18bb18
URL: https://github.com/llvm/llvm-project/commit/ab9178e3e73dc715463e1019ed2cd449dc18bb18
DIFF: https://github.com/llvm/llvm-project/commit/ab9178e3e73dc715463e1019ed2cd449dc18bb18.diff
LOG: [ARM] Add a couple of new MVE reduction tests. NFC
Nowadays we generate add(zext(mul(sext, sext)) with nneg zext and the multi-use
test is awkward to get right. This should help our test coverage with the vplan
cost transition.
Added:
Modified:
llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
index a7cb5c61ca5502..3dae408feeed7f 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
@@ -1445,4 +1445,224 @@ for.cond.cleanup: ; preds = %for.body, %entry
ret i32 %r.0.lcssa
}
+define i64 @mla_xx_sext_zext(ptr nocapture noundef readonly %x, i32 %n) #0 {
+; CHECK-LABEL: @mla_xx_sext_zext(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP9]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; CHECK: for.body.preheader:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 8
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483640
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
+; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
+; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <8 x i32> [[TMP1]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = zext nneg <8 x i32> [[TMP2]] to <8 x i64>
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP3]])
+; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: [[S_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i64 [[S_0_LCSSA]]
+; CHECK: for.body:
+; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[S_010:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_011]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV]], [[CONV]]
+; CHECK-NEXT: [[CONV3:%.*]] = zext nneg i32 [[MUL]] to i64
+; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[S_010]], [[CONV3]]
+; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_011]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
+;
+entry:
+ %cmp9 = icmp sgt i32 %n, 0
+ br i1 %cmp9, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ %s.0.lcssa = phi i64 [ 0, %entry ], [ %add, %for.body ]
+ ret i64 %s.0.lcssa
+
+for.body: ; preds = %entry, %for.body
+ %i.011 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %s.010 = phi i64 [ %add, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.011
+ %0 = load i16, ptr %arrayidx, align 2
+ %conv = sext i16 %0 to i32
+ %mul = mul nsw i32 %conv, %conv
+ %conv3 = zext nneg i32 %mul to i64
+ %add = add nuw nsw i64 %s.010, %conv3
+ %inc = add nuw nsw i32 %i.011, 1
+ %exitcond.not = icmp eq i32 %inc, %n
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+define i64 @mla_and_add_together_16_64(ptr nocapture noundef readonly %x, i32 noundef %n) #0 {
+; CHECK-LABEL: @mla_and_add_together_16_64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP16:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP16]])
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i32 [[N]], 8
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483640
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
+; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
+; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <8 x i32> [[TMP1]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = zext nneg <8 x i32> [[TMP2]] to <8 x i64>
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP3]])
+; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP1]])
+; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI1]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX2:%.*]] = phi i32 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ADD6_LCSSA:%.*]] = phi i32 [ [[ADD6:%.*]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[CONV7:%.*]] = sext i32 [[ADD6_LCSSA]] to i64
+; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[CONV7]], [[ADD_LCSSA]]
+; CHECK-NEXT: ret i64 [[DIV]]
+; CHECK: for.body:
+; CHECK-NEXT: [[I_019:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[T_018:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[S_017:%.*]] = phi i32 [ [[ADD6]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_019]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV]], [[CONV]]
+; CHECK-NEXT: [[CONV3:%.*]] = zext nneg i32 [[MUL]] to i64
+; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[T_018]], [[CONV3]]
+; CHECK-NEXT: [[ADD6]] = add nsw i32 [[S_017]], [[CONV]]
+; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_019]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
+;
+entry:
+ %cmp16 = icmp sgt i32 %n, 0
+ tail call void @llvm.assume(i1 %cmp16)
+ br label %for.body
+
+for.cond.cleanup:
+ %conv7 = sext i32 %add6 to i64
+ %div = sdiv i64 %conv7, %add
+ ret i64 %div
+
+for.body:
+ %i.019 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %t.018 = phi i64 [ %add, %for.body ], [ 0, %entry ]
+ %s.017 = phi i32 [ %add6, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.019
+ %0 = load i16, ptr %arrayidx, align 2
+ %conv = sext i16 %0 to i32
+ %mul = mul nsw i32 %conv, %conv
+ %conv3 = zext nneg i32 %mul to i64
+ %add = add nuw nsw i64 %t.018, %conv3
+ %add6 = add nsw i32 %s.017, %conv
+ %inc = add nuw nsw i32 %i.019, 1
+ %exitcond.not = icmp eq i32 %inc, %n
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+define i64 @interleave_doublereduct_i16_i64(ptr %x, ptr %y, i32 %n) {
+; CHECK-LABEL: @interleave_doublereduct_i16_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP23:%.*]] = icmp sgt i32 [[N:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP23]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: [[T_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD12:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: ret i64 [[T_0_LCSSA]]
+; CHECK: for.body:
+; CHECK-NEXT: [[I_025:%.*]] = phi i32 [ [[ADD13:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: [[T_024:%.*]] = phi i64 [ [[ADD12]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[I_025]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i32 [[I_025]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
+; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
+; CHECK-NEXT: [[CONV3:%.*]] = sext i32 [[MUL]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[T_024]], [[CONV3]]
+; CHECK-NEXT: [[ADD4:%.*]] = or disjoint i32 [[I_025]], 1
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[ADD4]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2
+; CHECK-NEXT: [[CONV6:%.*]] = sext i16 [[TMP2]] to i32
+; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, ptr [[Y]], i32 [[ADD4]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2
+; CHECK-NEXT: [[CONV9:%.*]] = sext i16 [[TMP3]] to i32
+; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[CONV9]], [[CONV6]]
+; CHECK-NEXT: [[CONV11:%.*]] = sext i32 [[MUL10]] to i64
+; CHECK-NEXT: [[ADD12]] = add nsw i64 [[ADD]], [[CONV11]]
+; CHECK-NEXT: [[ADD13]] = add nuw nsw i32 [[I_025]], 2
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD13]], [[N]]
+; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP]]
+;
+entry:
+ %cmp23 = icmp sgt i32 %n, 0
+ br i1 %cmp23, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+ %t.0.lcssa = phi i64 [ 0, %entry ], [ %add12, %for.body ]
+ ret i64 %t.0.lcssa
+
+for.body:
+ %i.025 = phi i32 [ %add13, %for.body ], [ 0, %entry ]
+ %t.024 = phi i64 [ %add12, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.025
+ %0 = load i16, ptr %arrayidx, align 2
+ %conv = sext i16 %0 to i32
+ %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %i.025
+ %1 = load i16, ptr %arrayidx1, align 2
+ %conv2 = sext i16 %1 to i32
+ %mul = mul nsw i32 %conv2, %conv
+ %conv3 = sext i32 %mul to i64
+ %add = add nsw i64 %t.024, %conv3
+ %add4 = or disjoint i32 %i.025, 1
+ %arrayidx5 = getelementptr inbounds i16, ptr %x, i32 %add4
+ %2 = load i16, ptr %arrayidx5, align 2
+ %conv6 = sext i16 %2 to i32
+ %arrayidx8 = getelementptr inbounds i16, ptr %y, i32 %add4
+ %3 = load i16, ptr %arrayidx8, align 2
+ %conv9 = sext i16 %3 to i32
+ %mul10 = mul nsw i32 %conv9, %conv6
+ %conv11 = sext i32 %mul10 to i64
+ %add12 = add nsw i64 %add, %conv11
+ %add13 = add nuw nsw i32 %i.025, 2
+ %cmp = icmp slt i32 %add13, %n
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+
attributes #0 = { "target-features"="+mve" }
More information about the llvm-commits
mailing list