[llvm-branch-commits] [llvm] InstCombine: Handle fptrunc in SimplifyDemandedFPClass (PR #175421)

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Sun Jan 11 01:23:41 PST 2026


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-transforms

Author: Matt Arsenault (arsenm)

<details>
<summary>Changes</summary>

Also handle llvm.fptrunc.round since it's the same.

---

Patch is 28.94 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/175421.diff


6 Files Affected:

- (modified) llvm/include/llvm/Support/KnownFPClass.h (+3) 
- (modified) llvm/lib/Analysis/ValueTracking.cpp (+1-9) 
- (modified) llvm/lib/Support/KnownFPClass.cpp (+14) 
- (modified) llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp (+140) 
- (modified) llvm/test/Transforms/InstCombine/simplify-demanded-fpclass-fptrunc-round.ll (+15-28) 
- (modified) llvm/test/Transforms/InstCombine/simplify-demanded-fpclass-fptrunc.ll (+15-30) 


``````````diff
diff --git a/llvm/include/llvm/Support/KnownFPClass.h b/llvm/include/llvm/Support/KnownFPClass.h
index 2348927fd9b9a..5f5744df3cfb5 100644
--- a/llvm/include/llvm/Support/KnownFPClass.h
+++ b/llvm/include/llvm/Support/KnownFPClass.h
@@ -295,6 +295,9 @@ struct KnownFPClass {
                                      const fltSemantics &DstTy,
                                      const fltSemantics &SrcTy);
 
+  /// Propagate known class for fptrunc.
+  static LLVM_ABI KnownFPClass fptrunc(const KnownFPClass &KnownSrc);
+
   void resetAll() { *this = KnownFPClass(); }
 };
 
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 0c76522efdc29..357d7bc8d3664 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -4905,15 +4905,7 @@ static void computeKnownFPClassForFPTrunc(const Operator *Op,
   KnownFPClass KnownSrc;
   computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
                       KnownSrc, Q, Depth + 1);
-
-  // Sign should be preserved
-  // TODO: Handle cannot be ordered greater than zero
-  if (KnownSrc.cannotBeOrderedLessThanZero())
-    Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
-
-  Known.propagateNaN(KnownSrc, true);
-
-  // Infinity needs a range check.
+  Known = KnownFPClass::fptrunc(KnownSrc);
 }
 
 static constexpr KnownFPClass::MinMaxKind getMinMaxKind(Intrinsic::ID IID) {
diff --git a/llvm/lib/Support/KnownFPClass.cpp b/llvm/lib/Support/KnownFPClass.cpp
index 7a60221f7e8f8..1e8f74ada4661 100644
--- a/llvm/lib/Support/KnownFPClass.cpp
+++ b/llvm/lib/Support/KnownFPClass.cpp
@@ -351,3 +351,17 @@ KnownFPClass KnownFPClass::fpext(const KnownFPClass &KnownSrc,
 
   return Known;
 }
+
+KnownFPClass KnownFPClass::fptrunc(const KnownFPClass &KnownSrc) {
+  KnownFPClass Known;
+
+  // Sign should be preserved
+  // TODO: Handle cannot be ordered greater than zero
+  if (KnownSrc.cannotBeOrderedLessThanZero())
+    Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+
+  Known.propagateNaN(KnownSrc, true);
+
+  // Infinity needs a range check.
+  return Known;
+}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 92b4bfe4ec54e..91ab7c75b47b7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -2033,6 +2033,36 @@ static Constant *getFPClassConstant(Type *Ty, FPClassTest Mask,
   }
 }
 
+static Value *simplifyDemandedUseFPClassFPTrunc(InstCombinerImpl &IC,
+                                                Instruction &I,
+                                                FPClassTest DemandedMask,
+                                                KnownFPClass &Known,
+                                                unsigned Depth) {
+  FPClassTest SrcDemandedMask = DemandedMask;
+
+  // Zero results may have been rounded from subnormal sources.
+  if (DemandedMask & fcNegZero)
+    SrcDemandedMask |= fcNegSubnormal;
+  if (DemandedMask & fcPosZero)
+    SrcDemandedMask |= fcPosSubnormal;
+
+  // Subnormal results may have been normal in the source type
+  if (DemandedMask & fcNegSubnormal)
+    SrcDemandedMask |= fcNegNormal;
+  if (DemandedMask & fcPosSubnormal)
+    SrcDemandedMask |= fcPosNormal;
+
+  KnownFPClass KnownSrc;
+  if (IC.SimplifyDemandedFPClass(&I, 0, SrcDemandedMask, KnownSrc, Depth + 1))
+    return &I;
+
+  Known = KnownFPClass::fptrunc(KnownSrc);
+
+  FPClassTest ValidResults = DemandedMask & Known.KnownFPClasses;
+  return getFPClassConstant(I.getType(), ValidResults,
+                            /*IsCanonicalizing=*/true);
+}
+
 Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Instruction *I,
                                                     FPClassTest DemandedMask,
                                                     KnownFPClass &Known,
@@ -2218,6 +2248,9 @@ Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Instruction *I,
     FPClassTest ValidResults = DemandedMask & Known.KnownFPClasses;
     return getFPClassConstant(VTy, ValidResults, /*IsCanonicalizing=*/true);
   }
+  case Instruction::FPTrunc:
+    return simplifyDemandedUseFPClassFPTrunc(*this, *I, DemandedMask, Known,
+                                             Depth);
   case Instruction::FPExt: {
     FPClassTest SrcDemandedMask = DemandedMask;
 
@@ -2600,6 +2633,113 @@ Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Instruction *I,
       FPClassTest ValidResults = DemandedMask & Known.KnownFPClasses;
       return getFPClassConstant(VTy, ValidResults, /*IsCanonicalizing=*/true);
     }
+    case Intrinsic::sqrt: {
+      FPClassTest DemandedSrcMask =
+          DemandedMask & (fcNegZero | fcPositive | fcNan);
+
+      if (DemandedMask & fcNan)
+        DemandedSrcMask |= (fcNegative & ~fcNegZero);
+
+      // sqrt(max_subnormal) is a normal value
+      if (DemandedMask & fcPosNormal)
+        DemandedSrcMask |= fcPosSubnormal;
+
+      KnownFPClass KnownSrc;
+      if (SimplifyDemandedFPClass(I, 0, DemandedSrcMask, KnownSrc, Depth + 1))
+        return I;
+
+      Type *EltTy = VTy->getScalarType();
+      DenormalMode Mode = F.getDenormalMode(EltTy->getFltSemantics());
+
+      // sqrt(-x) = nan, but be careful of negative subnormals flushed to 0.
+      if (KnownSrc.isKnownNever(fcPositive) &&
+          KnownSrc.isKnownNeverLogicalZero(Mode))
+        return ConstantFP::getQNaN(VTy);
+
+      Known = KnownFPClass::sqrt(KnownSrc, Mode);
+      FPClassTest ValidResults = DemandedMask & Known.KnownFPClasses;
+
+      if (ValidResults == fcZero) {
+        if (FMF.noSignedZeros())
+          return ConstantFP::getZero(VTy);
+
+        Value *Copysign = Builder.CreateCopySign(ConstantFP::getZero(VTy),
+                                                 CI->getArgOperand(0), FMF);
+        Copysign->takeName(CI);
+        return Copysign;
+      }
+
+      return getFPClassConstant(VTy, ValidResults, /*IsCanonicalizing=*/true);
+    }
+    case Intrinsic::trunc:
+    case Intrinsic::floor:
+    case Intrinsic::ceil:
+    case Intrinsic::rint:
+    case Intrinsic::nearbyint:
+    case Intrinsic::round:
+    case Intrinsic::roundeven: {
+      FPClassTest DemandedSrcMask = DemandedMask;
+
+      // Zero results imply valid subnormal sources.
+      if (DemandedMask & fcNegZero)
+        DemandedSrcMask |= fcNegSubnormal;
+
+      if (DemandedMask & fcPosZero)
+        DemandedSrcMask |= fcPosSubnormal;
+
+      KnownFPClass KnownSrc;
+      if (SimplifyDemandedFPClass(CI, 0, DemandedSrcMask, KnownSrc, Depth + 1))
+        return I;
+
+      // Note: Possibly dropping snan quiet.
+      if (KnownSrc.isKnownAlways(fcInf | fcNan))
+        return CI->getArgOperand(0);
+
+      // Propagate nnan-ness to source to simplify source checks.
+      if ((DemandedMask & fcNan) == fcNone)
+        KnownSrc.knownNot(fcNan);
+
+      bool IsRoundNearest =
+          IID == Intrinsic::round || IID == Intrinsic::roundeven ||
+          IID == Intrinsic::nearbyint || IID == Intrinsic::rint;
+
+      // Ignore denormals-as-zero, as canonicalization is not mandated.
+      if ((IID == Intrinsic::trunc || IID == Intrinsic::floor ||
+           IsRoundNearest) &&
+          (KnownSrc.isKnownAlways(fcPosZero | fcPosSubnormal)))
+        return ConstantFP::getZero(VTy);
+
+      if ((IID == Intrinsic::trunc || IsRoundNearest) &&
+          KnownSrc.isKnownAlways(fcNegZero | fcNegSubnormal))
+        return ConstantFP::getZero(VTy, true);
+
+      if (IID == Intrinsic::floor && KnownSrc.isKnownAlways(fcNegSubnormal))
+        return ConstantFP::get(VTy, -1.0);
+
+      if (IID == Intrinsic::ceil && KnownSrc.isKnownAlways(fcPosSubnormal))
+        return ConstantFP::get(VTy, 1.0);
+
+      Known = KnownFPClass::roundToIntegral(KnownSrc, IID == Intrinsic::trunc,
+                                            VTy->isMultiUnitFPType());
+
+      FPClassTest ValidResults = DemandedMask & Known.KnownFPClasses;
+      if (Constant *SingleVal =
+              getFPClassConstant(VTy, ValidResults, /*IsCanonicalizing=*/true))
+        return SingleVal;
+
+      if ((IID == Intrinsic::trunc || IsRoundNearest) &&
+          KnownSrc.isKnownAlways(fcZero | fcSubnormal)) {
+        Value *Copysign = Builder.CreateCopySign(ConstantFP::getZero(VTy),
+                                                 CI->getArgOperand(0));
+        Copysign->takeName(CI);
+        return Copysign;
+      }
+
+      return nullptr;
+    }
+    case Intrinsic::fptrunc_round:
+      return simplifyDemandedUseFPClassFPTrunc(*this, *CI, DemandedMask, Known,
+                                               Depth);
     case Intrinsic::canonicalize: {
       Type *EltTy = VTy->getScalarType();
 
diff --git a/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass-fptrunc-round.ll b/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass-fptrunc-round.ll
index 1997da17e18e0..d08d947294047 100644
--- a/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass-fptrunc-round.ll
+++ b/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass-fptrunc-round.ll
@@ -50,8 +50,7 @@ define nofpclass(inf norm sub zero snan) half @ret_only_qnan__fptrunc(float %x)
 define nofpclass(inf norm sub zero) half @ret_only_nan__fptrunc(float %x) {
 ; CHECK-LABEL: define nofpclass(inf zero sub norm) half @ret_only_nan__fptrunc(
 ; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[X]], metadata !"round.downward")
-; CHECK-NEXT:    ret half [[RESULT]]
+; CHECK-NEXT:    ret half 0xH7E00
 ;
   %result = call half @llvm.fptrunc.round.f16.f32(float %x, metadata !"round.downward")
   ret half %result
@@ -118,8 +117,7 @@ define nofpclass(nan) half @ret_no_nan__fptrunc__select_nan_or_unknown(i1 %cond,
 ; CHECK-LABEL: define nofpclass(nan) half @ret_no_nan__fptrunc__select_nan_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[NAN:%.*]] = call float @returns_nan_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[NAN]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %nan = call float @returns_nan_f32()
@@ -133,8 +131,7 @@ define nofpclass(pinf) half @ret_no_pinf__fptrunc__select_pinf_or_unknown(i1 %co
 ; CHECK-LABEL: define nofpclass(pinf) half @ret_no_pinf__fptrunc__select_pinf_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[PINF:%.*]] = call float @returns_pinf_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[PINF]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %pinf = call float @returns_pinf_f32()
@@ -148,8 +145,7 @@ define nofpclass(ninf) half @ret_no_ninf__fptrunc__select_ninf_or_unknown(i1 %co
 ; CHECK-LABEL: define nofpclass(ninf) half @ret_no_ninf__fptrunc__select_ninf_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[NINF:%.*]] = call float @returns_ninf_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[NINF]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %ninf = call float @returns_ninf_f32()
@@ -163,8 +159,7 @@ define nofpclass(inf) half @ret_no_inf__fptrunc__select_inf_or_unknown(i1 %cond,
 ; CHECK-LABEL: define nofpclass(inf) half @ret_no_inf__fptrunc__select_inf_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[INF:%.*]] = call float @returns_inf_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[INF]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %inf = call float @returns_inf_f32()
@@ -178,8 +173,7 @@ define nofpclass(nan inf) half @ret_no_inf_no_nan__fptrunc__select_inf_or_nan_or
 ; CHECK-LABEL: define nofpclass(nan inf) half @ret_no_inf_no_nan__fptrunc__select_inf_or_nan_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[INF_OR_NAN:%.*]] = call float @returns_inf_or_nan_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[INF_OR_NAN]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %inf.or.nan = call float @returns_inf_or_nan_f32()
@@ -235,8 +229,7 @@ define nofpclass(pinf pnorm psub pzero) half @ret_no_positive__fptrunc__select_p
 ; CHECK-LABEL: define nofpclass(pinf pzero psub pnorm) half @ret_no_positive__fptrunc__select_positive_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[POSITIVE:%.*]] = call float @returns_positive_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[POSITIVE]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %positive = call float @returns_positive_f32()
@@ -265,8 +258,7 @@ define nofpclass(nan pinf pnorm psub pzero) half @ret_no_positive_no_nan__fptrun
 ; CHECK-LABEL: define nofpclass(nan pinf pzero psub pnorm) half @ret_no_positive_no_nan__fptrunc__select_positive_nan_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[POSITIVE_OR_NAN:%.*]] = call float @returns_positive_or_nan_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[POSITIVE_OR_NAN]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %positive.or.nan = call float @returns_positive_or_nan_f32()
@@ -280,8 +272,7 @@ define nofpclass(ninf nnorm nsub nzero) half @ret_no_negative__fptrunc__select_n
 ; CHECK-LABEL: define nofpclass(ninf nzero nsub nnorm) half @ret_no_negative__fptrunc__select_negative_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[NEGATIVE:%.*]] = call float @returns_negative_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[NEGATIVE]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %negative = call float @returns_negative_f32()
@@ -310,8 +301,7 @@ define nofpclass(nan ninf nnorm nsub nzero) half @ret_no_negative_no_nan__fptrun
 ; CHECK-LABEL: define nofpclass(nan ninf nzero nsub nnorm) half @ret_no_negative_no_nan__fptrunc__select_negative_nan_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[NEGATIVE_OR_NAN:%.*]] = call float @returns_negative_or_nan_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[NEGATIVE_OR_NAN]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %negative.or.nan = call float @returns_negative_or_nan_f32()
@@ -428,7 +418,7 @@ define nofpclass(inf nan norm pzero psub) half @ret_only_nsub_nzero__fptrunc(flo
 define nofpclass(ninf) half @ret_no_ninf__fptrunc__inf() {
 ; CHECK-LABEL: define nofpclass(ninf) half @ret_no_ninf__fptrunc__inf() {
 ; CHECK-NEXT:    [[INF:%.*]] = call float @returns_inf_f32()
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[INF]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float 0x7FF0000000000000, metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %inf = call float @returns_inf_f32()
@@ -440,7 +430,7 @@ define nofpclass(ninf) half @ret_no_ninf__fptrunc__inf() {
 define nofpclass(pinf) half @ret_no_pinf__fptrunc__inf() {
 ; CHECK-LABEL: define nofpclass(pinf) half @ret_no_pinf__fptrunc__inf() {
 ; CHECK-NEXT:    [[INF:%.*]] = call float @returns_inf_f32()
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[INF]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float 0xFFF0000000000000, metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %inf = call float @returns_inf_f32()
@@ -452,8 +442,7 @@ define nofpclass(nzero) half @ret_no_nzero__fptrunc__select_nzero_or_unknown(i1
 ; CHECK-LABEL: define nofpclass(nzero) half @ret_no_nzero__fptrunc__select_nzero_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[NZERO:%.*]] = call float @returns_nzero_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[NZERO]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %nzero = call float @returns_nzero_f32()
@@ -466,8 +455,7 @@ define nofpclass(pzero) half @ret_no_pzero__fptrunc__select_pzero_or_unknown(i1
 ; CHECK-LABEL: define nofpclass(pzero) half @ret_no_pzero__fptrunc__select_pzero_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[PZERO:%.*]] = call float @returns_pzero_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[PZERO]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %pzero = call float @returns_pzero_f32()
@@ -480,8 +468,7 @@ define nofpclass(zero) half @ret_no_zero__fptrunc__select_zero_or_unknown(i1 %co
 ; CHECK-LABEL: define nofpclass(zero) half @ret_no_zero__fptrunc__select_zero_or_unknown(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[UNKNOWN:%.*]]) {
 ; CHECK-NEXT:    [[ZERO:%.*]] = call float @returns_zero_f32()
-; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[COND]], float [[ZERO]], float [[UNKNOWN]]
-; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[SELECT]], metadata !"round.downward")
+; CHECK-NEXT:    [[RESULT:%.*]] = call half @llvm.fptrunc.round.f16.f32(float [[UNKNOWN]], metadata !"round.downward")
 ; CHECK-NEXT:    ret half [[RESULT]]
 ;
   %zero = call float @returns_zero_f32()
diff --git a/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass-fptrunc.ll b/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass-fptrunc.ll
index b777ad82b67d8..725761f337860 100644
--- a/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass-fptrunc.ll
+++ b/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass-fptrunc.ll
@@ -50,8 +50,7 @@ define nofpclass(inf norm sub zero snan) half @ret_only_qnan__fptrunc(float %x)
 define nofpclass(inf norm sub zero) ha...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/175421


More information about the llvm-branch-commits mailing list