[llvm-branch-commits] [llvm] f6929c0 - [SLP] add reduction tests for maxnum/minnum intrinsics; NFC
Sanjay Patel via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Dec 22 13:11:47 PST 2020
Author: Sanjay Patel
Date: 2020-12-22T16:05:39-05:00
New Revision: f6929c01952b3f144df620544ed937e801b9c945
URL: https://github.com/llvm/llvm-project/commit/f6929c01952b3f144df620544ed937e801b9c945
DIFF: https://github.com/llvm/llvm-project/commit/f6929c01952b3f144df620544ed937e801b9c945.diff
LOG: [SLP] add reduction tests for maxnum/minnum intrinsics; NFC
Added:
Modified:
llvm/test/Transforms/SLPVectorizer/X86/fmaxnum.ll
llvm/test/Transforms/SLPVectorizer/X86/fminnum.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/fmaxnum.ll b/llvm/test/Transforms/SLPVectorizer/X86/fmaxnum.ll
index e03f3f808a4f..23f2196b2425 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/fmaxnum.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/fmaxnum.ll
@@ -338,4 +338,151 @@ define void @fmaxnum_16f32() #0 {
ret void
}
+define float @reduction_v4f32_fast(float* %p) {
+; CHECK-LABEL: @reduction_v4f32_fast(
+; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
+; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
+; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
+; CHECK-NEXT: [[T0:%.*]] = load float, float* [[P]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load float, float* [[G1]], align 4
+; CHECK-NEXT: [[T2:%.*]] = load float, float* [[G2]], align 4
+; CHECK-NEXT: [[T3:%.*]] = load float, float* [[G3]], align 4
+; CHECK-NEXT: [[M1:%.*]] = tail call fast float @llvm.maxnum.f32(float [[T1]], float [[T0]])
+; CHECK-NEXT: [[M2:%.*]] = tail call fast float @llvm.maxnum.f32(float [[T2]], float [[M1]])
+; CHECK-NEXT: [[M3:%.*]] = tail call fast float @llvm.maxnum.f32(float [[T3]], float [[M2]])
+; CHECK-NEXT: ret float [[M3]]
+;
+ %g1 = getelementptr inbounds float, float* %p, i64 1
+ %g2 = getelementptr inbounds float, float* %p, i64 2
+ %g3 = getelementptr inbounds float, float* %p, i64 3
+ %t0 = load float, float* %p, align 4
+ %t1 = load float, float* %g1, align 4
+ %t2 = load float, float* %g2, align 4
+ %t3 = load float, float* %g3, align 4
+ %m1 = tail call fast float @llvm.maxnum.f32(float %t1, float %t0)
+ %m2 = tail call fast float @llvm.maxnum.f32(float %t2, float %m1)
+ %m3 = tail call fast float @llvm.maxnum.f32(float %t3, float %m2)
+ ret float %m3
+}
+
+define float @reduction_v4f32_nnan(float* %p) {
+; CHECK-LABEL: @reduction_v4f32_nnan(
+; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
+; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
+; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
+; CHECK-NEXT: [[T0:%.*]] = load float, float* [[P]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load float, float* [[G1]], align 4
+; CHECK-NEXT: [[T2:%.*]] = load float, float* [[G2]], align 4
+; CHECK-NEXT: [[T3:%.*]] = load float, float* [[G3]], align 4
+; CHECK-NEXT: [[M1:%.*]] = tail call nnan float @llvm.maxnum.f32(float [[T1]], float [[T0]])
+; CHECK-NEXT: [[M2:%.*]] = tail call nnan float @llvm.maxnum.f32(float [[T2]], float [[M1]])
+; CHECK-NEXT: [[M3:%.*]] = tail call nnan float @llvm.maxnum.f32(float [[T3]], float [[M2]])
+; CHECK-NEXT: ret float [[M3]]
+;
+ %g1 = getelementptr inbounds float, float* %p, i64 1
+ %g2 = getelementptr inbounds float, float* %p, i64 2
+ %g3 = getelementptr inbounds float, float* %p, i64 3
+ %t0 = load float, float* %p, align 4
+ %t1 = load float, float* %g1, align 4
+ %t2 = load float, float* %g2, align 4
+ %t3 = load float, float* %g3, align 4
+ %m1 = tail call nnan float @llvm.maxnum.f32(float %t1, float %t0)
+ %m2 = tail call nnan float @llvm.maxnum.f32(float %t2, float %m1)
+ %m3 = tail call nnan float @llvm.maxnum.f32(float %t3, float %m2)
+ ret float %m3
+}
+
+define float @reduction_v8f32_fast(float* %p) {
+; CHECK-LABEL: @reduction_v8f32_fast(
+; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
+; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
+; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
+; CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds float, float* [[P]], i64 4
+; CHECK-NEXT: [[G5:%.*]] = getelementptr inbounds float, float* [[P]], i64 5
+; CHECK-NEXT: [[G6:%.*]] = getelementptr inbounds float, float* [[P]], i64 6
+; CHECK-NEXT: [[G7:%.*]] = getelementptr inbounds float, float* [[P]], i64 7
+; CHECK-NEXT: [[T0:%.*]] = load float, float* [[P]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load float, float* [[G1]], align 4
+; CHECK-NEXT: [[T2:%.*]] = load float, float* [[G2]], align 4
+; CHECK-NEXT: [[T3:%.*]] = load float, float* [[G3]], align 4
+; CHECK-NEXT: [[T4:%.*]] = load float, float* [[G4]], align 4
+; CHECK-NEXT: [[T5:%.*]] = load float, float* [[G5]], align 4
+; CHECK-NEXT: [[T6:%.*]] = load float, float* [[G6]], align 4
+; CHECK-NEXT: [[T7:%.*]] = load float, float* [[G7]], align 4
+; CHECK-NEXT: [[M1:%.*]] = tail call fast float @llvm.maxnum.f32(float [[T1]], float [[T0]])
+; CHECK-NEXT: [[M2:%.*]] = tail call fast float @llvm.maxnum.f32(float [[T2]], float [[M1]])
+; CHECK-NEXT: [[M3:%.*]] = tail call fast float @llvm.maxnum.f32(float [[T3]], float [[M2]])
+; CHECK-NEXT: [[M4:%.*]] = tail call fast float @llvm.maxnum.f32(float [[T4]], float [[M3]])
+; CHECK-NEXT: [[M5:%.*]] = tail call fast float @llvm.maxnum.f32(float [[M4]], float [[T6]])
+; CHECK-NEXT: [[M6:%.*]] = tail call fast float @llvm.maxnum.f32(float [[M5]], float [[T5]])
+; CHECK-NEXT: [[M7:%.*]] = tail call fast float @llvm.maxnum.f32(float [[M6]], float [[T7]])
+; CHECK-NEXT: ret float [[M7]]
+;
+ %g1 = getelementptr inbounds float, float* %p, i64 1
+ %g2 = getelementptr inbounds float, float* %p, i64 2
+ %g3 = getelementptr inbounds float, float* %p, i64 3
+ %g4 = getelementptr inbounds float, float* %p, i64 4
+ %g5 = getelementptr inbounds float, float* %p, i64 5
+ %g6 = getelementptr inbounds float, float* %p, i64 6
+ %g7 = getelementptr inbounds float, float* %p, i64 7
+ %t0 = load float, float* %p, align 4
+ %t1 = load float, float* %g1, align 4
+ %t2 = load float, float* %g2, align 4
+ %t3 = load float, float* %g3, align 4
+ %t4 = load float, float* %g4, align 4
+ %t5 = load float, float* %g5, align 4
+ %t6 = load float, float* %g6, align 4
+ %t7 = load float, float* %g7, align 4
+ %m1 = tail call fast float @llvm.maxnum.f32(float %t1, float %t0)
+ %m2 = tail call fast float @llvm.maxnum.f32(float %t2, float %m1)
+ %m3 = tail call fast float @llvm.maxnum.f32(float %t3, float %m2)
+ %m4 = tail call fast float @llvm.maxnum.f32(float %t4, float %m3)
+ %m5 = tail call fast float @llvm.maxnum.f32(float %m4, float %t6)
+ %m6 = tail call fast float @llvm.maxnum.f32(float %m5, float %t5)
+ %m7 = tail call fast float @llvm.maxnum.f32(float %m6, float %t7)
+ ret float %m7
+}
+
+define double @reduction_v2f64_fast(double* %p) {
+; CHECK-LABEL: @reduction_v2f64_fast(
+; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 1
+; CHECK-NEXT: [[T0:%.*]] = load double, double* [[P]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load double, double* [[G1]], align 4
+; CHECK-NEXT: [[M1:%.*]] = tail call fast double @llvm.maxnum.f64(double [[T1]], double [[T0]])
+; CHECK-NEXT: ret double [[M1]]
+;
+ %g1 = getelementptr inbounds double, double* %p, i64 1
+ %t0 = load double, double* %p, align 4
+ %t1 = load double, double* %g1, align 4
+ %m1 = tail call fast double @llvm.maxnum.f64(double %t1, double %t0)
+ ret double %m1
+}
+
+define double @reduction_v4f64_fast(double* %p) {
+; CHECK-LABEL: @reduction_v4f64_fast(
+; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 1
+; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds double, double* [[P]], i64 2
+; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds double, double* [[P]], i64 3
+; CHECK-NEXT: [[T0:%.*]] = load double, double* [[P]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load double, double* [[G1]], align 4
+; CHECK-NEXT: [[T2:%.*]] = load double, double* [[G2]], align 4
+; CHECK-NEXT: [[T3:%.*]] = load double, double* [[G3]], align 4
+; CHECK-NEXT: [[M1:%.*]] = tail call fast double @llvm.maxnum.f64(double [[T1]], double [[T0]])
+; CHECK-NEXT: [[M2:%.*]] = tail call fast double @llvm.maxnum.f64(double [[T2]], double [[M1]])
+; CHECK-NEXT: [[M3:%.*]] = tail call fast double @llvm.maxnum.f64(double [[T3]], double [[M2]])
+; CHECK-NEXT: ret double [[M3]]
+;
+ %g1 = getelementptr inbounds double, double* %p, i64 1
+ %g2 = getelementptr inbounds double, double* %p, i64 2
+ %g3 = getelementptr inbounds double, double* %p, i64 3
+ %t0 = load double, double* %p, align 4
+ %t1 = load double, double* %g1, align 4
+ %t2 = load double, double* %g2, align 4
+ %t3 = load double, double* %g3, align 4
+ %m1 = tail call fast double @llvm.maxnum.f64(double %t1, double %t0)
+ %m2 = tail call fast double @llvm.maxnum.f64(double %t2, double %m1)
+ %m3 = tail call fast double @llvm.maxnum.f64(double %t3, double %m2)
+ ret double %m3
+}
+
attributes #0 = { nounwind }
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/fminnum.ll b/llvm/test/Transforms/SLPVectorizer/X86/fminnum.ll
index b830d826c2e9..81bcfb2f1e9b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/fminnum.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/fminnum.ll
@@ -338,4 +338,151 @@ define void @fminnum_16f32() #0 {
ret void
}
+define float @reduction_v4f32_fast(float* %p) {
+; CHECK-LABEL: @reduction_v4f32_fast(
+; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
+; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
+; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
+; CHECK-NEXT: [[T0:%.*]] = load float, float* [[P]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load float, float* [[G1]], align 4
+; CHECK-NEXT: [[T2:%.*]] = load float, float* [[G2]], align 4
+; CHECK-NEXT: [[T3:%.*]] = load float, float* [[G3]], align 4
+; CHECK-NEXT: [[M1:%.*]] = tail call fast float @llvm.minnum.f32(float [[T1]], float [[T0]])
+; CHECK-NEXT: [[M2:%.*]] = tail call fast float @llvm.minnum.f32(float [[T2]], float [[M1]])
+; CHECK-NEXT: [[M3:%.*]] = tail call fast float @llvm.minnum.f32(float [[T3]], float [[M2]])
+; CHECK-NEXT: ret float [[M3]]
+;
+ %g1 = getelementptr inbounds float, float* %p, i64 1
+ %g2 = getelementptr inbounds float, float* %p, i64 2
+ %g3 = getelementptr inbounds float, float* %p, i64 3
+ %t0 = load float, float* %p, align 4
+ %t1 = load float, float* %g1, align 4
+ %t2 = load float, float* %g2, align 4
+ %t3 = load float, float* %g3, align 4
+ %m1 = tail call fast float @llvm.minnum.f32(float %t1, float %t0)
+ %m2 = tail call fast float @llvm.minnum.f32(float %t2, float %m1)
+ %m3 = tail call fast float @llvm.minnum.f32(float %t3, float %m2)
+ ret float %m3
+}
+
+define float @reduction_v4f32_nnan(float* %p) {
+; CHECK-LABEL: @reduction_v4f32_nnan(
+; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
+; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
+; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
+; CHECK-NEXT: [[T0:%.*]] = load float, float* [[P]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load float, float* [[G1]], align 4
+; CHECK-NEXT: [[T2:%.*]] = load float, float* [[G2]], align 4
+; CHECK-NEXT: [[T3:%.*]] = load float, float* [[G3]], align 4
+; CHECK-NEXT: [[M1:%.*]] = tail call nnan float @llvm.minnum.f32(float [[T1]], float [[T0]])
+; CHECK-NEXT: [[M2:%.*]] = tail call nnan float @llvm.minnum.f32(float [[T2]], float [[M1]])
+; CHECK-NEXT: [[M3:%.*]] = tail call nnan float @llvm.minnum.f32(float [[T3]], float [[M2]])
+; CHECK-NEXT: ret float [[M3]]
+;
+ %g1 = getelementptr inbounds float, float* %p, i64 1
+ %g2 = getelementptr inbounds float, float* %p, i64 2
+ %g3 = getelementptr inbounds float, float* %p, i64 3
+ %t0 = load float, float* %p, align 4
+ %t1 = load float, float* %g1, align 4
+ %t2 = load float, float* %g2, align 4
+ %t3 = load float, float* %g3, align 4
+ %m1 = tail call nnan float @llvm.minnum.f32(float %t1, float %t0)
+ %m2 = tail call nnan float @llvm.minnum.f32(float %t2, float %m1)
+ %m3 = tail call nnan float @llvm.minnum.f32(float %t3, float %m2)
+ ret float %m3
+}
+
+define float @reduction_v8f32_fast(float* %p) {
+; CHECK-LABEL: @reduction_v8f32_fast(
+; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
+; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
+; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
+; CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds float, float* [[P]], i64 4
+; CHECK-NEXT: [[G5:%.*]] = getelementptr inbounds float, float* [[P]], i64 5
+; CHECK-NEXT: [[G6:%.*]] = getelementptr inbounds float, float* [[P]], i64 6
+; CHECK-NEXT: [[G7:%.*]] = getelementptr inbounds float, float* [[P]], i64 7
+; CHECK-NEXT: [[T0:%.*]] = load float, float* [[P]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load float, float* [[G1]], align 4
+; CHECK-NEXT: [[T2:%.*]] = load float, float* [[G2]], align 4
+; CHECK-NEXT: [[T3:%.*]] = load float, float* [[G3]], align 4
+; CHECK-NEXT: [[T4:%.*]] = load float, float* [[G4]], align 4
+; CHECK-NEXT: [[T5:%.*]] = load float, float* [[G5]], align 4
+; CHECK-NEXT: [[T6:%.*]] = load float, float* [[G6]], align 4
+; CHECK-NEXT: [[T7:%.*]] = load float, float* [[G7]], align 4
+; CHECK-NEXT: [[M1:%.*]] = tail call fast float @llvm.minnum.f32(float [[T1]], float [[T0]])
+; CHECK-NEXT: [[M2:%.*]] = tail call fast float @llvm.minnum.f32(float [[T2]], float [[M1]])
+; CHECK-NEXT: [[M3:%.*]] = tail call fast float @llvm.minnum.f32(float [[T3]], float [[M2]])
+; CHECK-NEXT: [[M4:%.*]] = tail call fast float @llvm.minnum.f32(float [[T4]], float [[M3]])
+; CHECK-NEXT: [[M5:%.*]] = tail call fast float @llvm.minnum.f32(float [[M4]], float [[T6]])
+; CHECK-NEXT: [[M6:%.*]] = tail call fast float @llvm.minnum.f32(float [[M5]], float [[T5]])
+; CHECK-NEXT: [[M7:%.*]] = tail call fast float @llvm.minnum.f32(float [[M6]], float [[T7]])
+; CHECK-NEXT: ret float [[M7]]
+;
+ %g1 = getelementptr inbounds float, float* %p, i64 1
+ %g2 = getelementptr inbounds float, float* %p, i64 2
+ %g3 = getelementptr inbounds float, float* %p, i64 3
+ %g4 = getelementptr inbounds float, float* %p, i64 4
+ %g5 = getelementptr inbounds float, float* %p, i64 5
+ %g6 = getelementptr inbounds float, float* %p, i64 6
+ %g7 = getelementptr inbounds float, float* %p, i64 7
+ %t0 = load float, float* %p, align 4
+ %t1 = load float, float* %g1, align 4
+ %t2 = load float, float* %g2, align 4
+ %t3 = load float, float* %g3, align 4
+ %t4 = load float, float* %g4, align 4
+ %t5 = load float, float* %g5, align 4
+ %t6 = load float, float* %g6, align 4
+ %t7 = load float, float* %g7, align 4
+ %m1 = tail call fast float @llvm.minnum.f32(float %t1, float %t0)
+ %m2 = tail call fast float @llvm.minnum.f32(float %t2, float %m1)
+ %m3 = tail call fast float @llvm.minnum.f32(float %t3, float %m2)
+ %m4 = tail call fast float @llvm.minnum.f32(float %t4, float %m3)
+ %m5 = tail call fast float @llvm.minnum.f32(float %m4, float %t6)
+ %m6 = tail call fast float @llvm.minnum.f32(float %m5, float %t5)
+ %m7 = tail call fast float @llvm.minnum.f32(float %m6, float %t7)
+ ret float %m7
+}
+
+define double @reduction_v2f64_fast(double* %p) {
+; CHECK-LABEL: @reduction_v2f64_fast(
+; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 1
+; CHECK-NEXT: [[T0:%.*]] = load double, double* [[P]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load double, double* [[G1]], align 4
+; CHECK-NEXT: [[M1:%.*]] = tail call fast double @llvm.minnum.f64(double [[T1]], double [[T0]])
+; CHECK-NEXT: ret double [[M1]]
+;
+ %g1 = getelementptr inbounds double, double* %p, i64 1
+ %t0 = load double, double* %p, align 4
+ %t1 = load double, double* %g1, align 4
+ %m1 = tail call fast double @llvm.minnum.f64(double %t1, double %t0)
+ ret double %m1
+}
+
+define double @reduction_v4f64_fast(double* %p) {
+; CHECK-LABEL: @reduction_v4f64_fast(
+; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 1
+; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds double, double* [[P]], i64 2
+; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds double, double* [[P]], i64 3
+; CHECK-NEXT: [[T0:%.*]] = load double, double* [[P]], align 4
+; CHECK-NEXT: [[T1:%.*]] = load double, double* [[G1]], align 4
+; CHECK-NEXT: [[T2:%.*]] = load double, double* [[G2]], align 4
+; CHECK-NEXT: [[T3:%.*]] = load double, double* [[G3]], align 4
+; CHECK-NEXT: [[M1:%.*]] = tail call fast double @llvm.minnum.f64(double [[T1]], double [[T0]])
+; CHECK-NEXT: [[M2:%.*]] = tail call fast double @llvm.minnum.f64(double [[T2]], double [[M1]])
+; CHECK-NEXT: [[M3:%.*]] = tail call fast double @llvm.minnum.f64(double [[T3]], double [[M2]])
+; CHECK-NEXT: ret double [[M3]]
+;
+ %g1 = getelementptr inbounds double, double* %p, i64 1
+ %g2 = getelementptr inbounds double, double* %p, i64 2
+ %g3 = getelementptr inbounds double, double* %p, i64 3
+ %t0 = load double, double* %p, align 4
+ %t1 = load double, double* %g1, align 4
+ %t2 = load double, double* %g2, align 4
+ %t3 = load double, double* %g3, align 4
+ %m1 = tail call fast double @llvm.minnum.f64(double %t1, double %t0)
+ %m2 = tail call fast double @llvm.minnum.f64(double %t2, double %m1)
+ %m3 = tail call fast double @llvm.minnum.f64(double %t3, double %m2)
+ ret double %m3
+}
+
attributes #0 = { nounwind }
More information about the llvm-branch-commits
mailing list