[llvm-branch-commits] [llvm] 77adbe6 - [SLP] fix fast-math requirements for fmin/fmax reductions
Sanjay Patel via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Sun Jan 24 06:15:28 PST 2021
Author: Sanjay Patel
Date: 2021-01-24T08:55:56-05:00
New Revision: 77adbe6a8c716bead04393560ec5aa88877ac1d2
URL: https://github.com/llvm/llvm-project/commit/77adbe6a8c716bead04393560ec5aa88877ac1d2
DIFF: https://github.com/llvm/llvm-project/commit/77adbe6a8c716bead04393560ec5aa88877ac1d2.diff
LOG: [SLP] fix fast-math requirements for fmin/fmax reductions
a6f0221276 enabled intersection of FMF on reduction instructions,
so it is safe to ease the check here.
There is still some room to improve here - it looks like we
have nearly duplicate flags propagation logic inside of the
LoopUtils helper but it is limited targets that do not form
reduction intrinsics (they form the shuffle expansion).
Added:
Modified:
llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
llvm/test/Transforms/SLPVectorizer/X86/fmaxnum.ll
llvm/test/Transforms/SLPVectorizer/X86/fminnum.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index c5cfc9e77d8a..7114b4d412fd 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -6422,9 +6422,7 @@ class HorizontalReduction {
// FP min/max are associative except for NaN and -0.0. We do not
// have to rule out -0.0 here because the intrinsic semantics do not
// specify a fixed result for it.
- // TODO: This is artificially restricted to fast because the code that
- // creates reductions assumes/produces fast ops.
- return I->getFastMathFlags().isFast();
+ return I->getFastMathFlags().noNaNs();
}
return I->isAssociative();
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/fmaxnum.ll b/llvm/test/Transforms/SLPVectorizer/X86/fmaxnum.ll
index fc134aa6deef..8136f2cb2dfe 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/fmaxnum.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/fmaxnum.ll
@@ -361,21 +361,15 @@ define float @reduction_v4f32_fast(float* %p) {
ret float %m3
}
-; TODO: This should become a reduce intrinsic.
-
define float @reduction_v4f32_nnan(float* %p) {
; CHECK-LABEL: @reduction_v4f32_nnan(
; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
-; CHECK-NEXT: [[T0:%.*]] = load float, float* [[P]], align 4
-; CHECK-NEXT: [[T1:%.*]] = load float, float* [[G1]], align 4
-; CHECK-NEXT: [[T2:%.*]] = load float, float* [[G2]], align 4
-; CHECK-NEXT: [[T3:%.*]] = load float, float* [[G3]], align 4
-; CHECK-NEXT: [[M1:%.*]] = tail call nnan float @llvm.maxnum.f32(float [[T1]], float [[T0]])
-; CHECK-NEXT: [[M2:%.*]] = tail call nnan float @llvm.maxnum.f32(float [[T2]], float [[M1]])
-; CHECK-NEXT: [[M3:%.*]] = tail call nnan float @llvm.maxnum.f32(float [[T3]], float [[M2]])
-; CHECK-NEXT: ret float [[M3]]
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[P]] to <4 x float>*
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = call nnan float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[TMP2]])
+; CHECK-NEXT: ret float [[TMP3]]
;
%g1 = getelementptr inbounds float, float* %p, i64 1
%g2 = getelementptr inbounds float, float* %p, i64 2
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/fminnum.ll b/llvm/test/Transforms/SLPVectorizer/X86/fminnum.ll
index e5a4fc235748..470dc8290eee 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/fminnum.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/fminnum.ll
@@ -361,21 +361,15 @@ define float @reduction_v4f32_fast(float* %p) {
ret float %m3
}
-; TODO: This should become a reduce intrinsic.
-
define float @reduction_v4f32_nnan(float* %p) {
; CHECK-LABEL: @reduction_v4f32_nnan(
; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds float, float* [[P:%.*]], i64 1
; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
-; CHECK-NEXT: [[T0:%.*]] = load float, float* [[P]], align 4
-; CHECK-NEXT: [[T1:%.*]] = load float, float* [[G1]], align 4
-; CHECK-NEXT: [[T2:%.*]] = load float, float* [[G2]], align 4
-; CHECK-NEXT: [[T3:%.*]] = load float, float* [[G3]], align 4
-; CHECK-NEXT: [[M1:%.*]] = tail call nnan float @llvm.minnum.f32(float [[T1]], float [[T0]])
-; CHECK-NEXT: [[M2:%.*]] = tail call nnan float @llvm.minnum.f32(float [[T2]], float [[M1]])
-; CHECK-NEXT: [[M3:%.*]] = tail call nnan float @llvm.minnum.f32(float [[T3]], float [[M2]])
-; CHECK-NEXT: ret float [[M3]]
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[P]] to <4 x float>*
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = call nnan float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[TMP2]])
+; CHECK-NEXT: ret float [[TMP3]]
;
%g1 = getelementptr inbounds float, float* %p, i64 1
%g2 = getelementptr inbounds float, float* %p, i64 2
More information about the llvm-branch-commits
mailing list