[llvm] f0cdf4b - [InstCombine] Check FPMathOperator for Ctx before FMF check
Anna Thomas via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 7 07:50:35 PST 2023
Author: Anna Thomas
Date: 2023-11-07T10:50:19-05:00
New Revision: f0cdf4b468f6ee48b0d0d51ce78145455e2f07a6
URL: https://github.com/llvm/llvm-project/commit/f0cdf4b468f6ee48b0d0d51ce78145455e2f07a6
DIFF: https://github.com/llvm/llvm-project/commit/f0cdf4b468f6ee48b0d0d51ce78145455e2f07a6.diff
LOG: [InstCombine] Check FPMathOperator for Ctx before FMF check
We need to check FPMathOperator for Ctx instruction before checking fast
math flag on this Ctx.
Ctx is not always an FPMathOperator, so explicitly check for it.
Fixes #71548.
Added:
Modified:
llvm/lib/Analysis/InstructionSimplify.cpp
llvm/test/Transforms/InstCombine/minimum.ll
Removed:
################################################################################
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 5fa0a62c5c9adf1..ad51e163062012e 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -6606,7 +6606,8 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
// float, if the ninf flag is set.
const APFloat *C;
if (match(Op1, m_APFloat(C)) &&
- (C->isInfinity() || (Q.CxtI->hasNoInfs() && C->isLargest()))) {
+ (C->isInfinity() || (isa<FPMathOperator>(Q.CxtI) &&
+ Q.CxtI->hasNoInfs() && C->isLargest()))) {
// minnum(X, -inf) -> -inf
// maxnum(X, +inf) -> +inf
// minimum(X, -inf) -> -inf if nnan
diff --git a/llvm/test/Transforms/InstCombine/minimum.ll b/llvm/test/Transforms/InstCombine/minimum.ll
index 71e6fec245c6b40..eb6eadf96e1323a 100644
--- a/llvm/test/Transforms/InstCombine/minimum.ll
+++ b/llvm/test/Transforms/InstCombine/minimum.ll
@@ -471,3 +471,29 @@ define double @negated_op_extra_use(double %x) {
%r = call double @llvm.minimum.f64(double %negx, double %x)
ret double %r
}
+
+; Testcase from PR 71548.
+define void @pr71548() {
+; CHECK-LABEL: @pr71548(
+; CHECK-NEXT: [[C0:%.*]] = load atomic double, ptr addrspace(1) null unordered, align 8
+; CHECK-NEXT: [[C1:%.*]] = load atomic i32, ptr addrspace(1) null unordered, align 4
+; CHECK-NEXT: [[C2:%.*]] = sitofp i32 [[C1]] to double
+; CHECK-NEXT: [[CRES_I:%.*]] = call noundef double @llvm.minimum.f64(double [[C0]], double [[C2]])
+; CHECK-NEXT: [[C3:%.*]] = fcmp ult double [[CRES_I]], 0.000000e+00
+; CHECK-NEXT: [[C_NOT16:%.*]] = icmp eq i32 [[C1]], 0
+; CHECK-NEXT: [[COR_COND45:%.*]] = or i1 [[C3]], [[C_NOT16]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[COR_COND45]])
+; CHECK-NEXT: ret void
+;
+ %c0 = load atomic double, ptr addrspace(1) null unordered, align 8
+ %c1 = load atomic i32, ptr addrspace(1) null unordered, align 4
+ %c2 = sitofp i32 %c1 to double
+ %cres.i = call noundef double @llvm.minimum.f64(double %c0, double %c2)
+ %c3 = fcmp ult double %cres.i, 0.000000e+00
+ %c.not16 = icmp eq i32 %c1, 0
+ %cor.cond45 = or i1 %c3, %c.not16
+ call void @llvm.assume(i1 %cor.cond45)
+ ret void
+}
+
+declare void @llvm.assume(i1)
More information about the llvm-commits
mailing list