[llvm] [GISel] Add KnownFPClass Analysis to GISelValueTrackingPass (PR #134611)

Tim Gymnich via llvm-commits llvm-commits at lists.llvm.org
Fri May 23 00:27:25 PDT 2025


https://github.com/tgymnich updated https://github.com/llvm/llvm-project/pull/134611

>From 68b10e378d5d08f36924f13fed327bf24bb4bfd8 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Fri, 21 Mar 2025 16:52:17 +0000
Subject: [PATCH 1/3] [GlobalISel] Add computeFPClass to GlobaISelValueTracking

---
 .../CodeGen/GlobalISel/GISelValueTracking.h   |   43 +
 .../llvm/CodeGen/GlobalISel/MIPatternMatch.h  |   38 +
 llvm/include/llvm/CodeGen/GlobalISel/Utils.h  |   20 +
 llvm/include/llvm/CodeGen/TargetLowering.h    |    8 +
 .../CodeGen/GlobalISel/GISelValueTracking.cpp | 1033 +++++++++++++++++
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |    7 +
 6 files changed, 1149 insertions(+)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GISelValueTracking.h b/llvm/include/llvm/CodeGen/GlobalISel/GISelValueTracking.h
index d4b4a4e731da7..6272fc8c6854e 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GISelValueTracking.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GISelValueTracking.h
@@ -14,13 +14,16 @@
 #ifndef LLVM_CODEGEN_GLOBALISEL_GISELVALUETRACKING_H
 #define LLVM_CODEGEN_GLOBALISEL_GISELVALUETRACKING_H
 
+#include "llvm/ADT/APFloat.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/CodeGen/Register.h"
+#include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/InitializePasses.h"
 #include "llvm/Support/KnownBits.h"
+#include "llvm/Support/KnownFPClass.h"
 
 namespace llvm {
 
@@ -42,6 +45,18 @@ class GISelValueTracking : public GISelChangeObserver {
   unsigned computeNumSignBitsMin(Register Src0, Register Src1,
                                  const APInt &DemandedElts, unsigned Depth = 0);
 
+  void computeKnownFPClass(Register R, KnownFPClass &Known,
+                           FPClassTest InterestedClasses, unsigned Depth);
+
+  void computeKnownFPClassForFPTrunc(const MachineInstr &MI,
+                                     const APInt &DemandedElts,
+                                     FPClassTest InterestedClasses,
+                                     KnownFPClass &Known, unsigned Depth);
+
+  void computeKnownFPClass(Register R, const APInt &DemandedElts,
+                           FPClassTest InterestedClasses, KnownFPClass &Known,
+                           unsigned Depth);
+
 public:
   GISelValueTracking(MachineFunction &MF, unsigned MaxDepth = 6);
   virtual ~GISelValueTracking() = default;
@@ -87,6 +102,34 @@ class GISelValueTracking : public GISelChangeObserver {
   /// \return The known alignment for the pointer-like value \p R.
   Align computeKnownAlignment(Register R, unsigned Depth = 0);
 
+  /// Determine which floating-point classes are valid for \p V, and return them
+  /// in KnownFPClass bit sets.
+  ///
+  /// This function is defined on values with floating-point type, values
+  /// vectors of floating-point type, and arrays of floating-point type.
+
+  /// \p InterestedClasses is a compile time optimization hint for which
+  /// floating point classes should be queried. Queries not specified in \p
+  /// InterestedClasses should be reliable if they are determined during the
+  /// query.
+  KnownFPClass computeKnownFPClass(Register R, const APInt &DemandedElts,
+                                   FPClassTest InterestedClasses,
+                                   unsigned Depth);
+
+  KnownFPClass computeKnownFPClass(Register R,
+                                   FPClassTest InterestedClasses = fcAllFlags,
+                                   unsigned Depth = 0);
+
+  /// Wrapper to account for known fast math flags at the use instruction.
+  KnownFPClass computeKnownFPClass(Register R, const APInt &DemandedElts,
+                                   uint32_t Flags,
+                                   FPClassTest InterestedClasses,
+                                   unsigned Depth);
+
+  KnownFPClass computeKnownFPClass(Register R, uint32_t Flags,
+                                   FPClassTest InterestedClasses,
+                                   unsigned Depth);
+
   // Observer API. No-op for non-caching implementation.
   void erasingInstr(MachineInstr &MI) override {}
   void createdInstr(MachineInstr &MI) override {}
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index 72483fbea5805..c0d3a12cbcb41 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -14,8 +14,11 @@
 #define LLVM_CODEGEN_GLOBALISEL_MIPATTERNMATCH_H
 
 #include "llvm/ADT/APInt.h"
+#include "llvm/ADT/FloatingPointMode.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/IR/InstrTypes.h"
 
 namespace llvm {
@@ -393,6 +396,7 @@ inline bind_ty<const MachineInstr *> m_MInstr(const MachineInstr *&MI) {
 inline bind_ty<LLT> m_Type(LLT &Ty) { return Ty; }
 inline bind_ty<CmpInst::Predicate> m_Pred(CmpInst::Predicate &P) { return P; }
 inline operand_type_match m_Pred() { return operand_type_match(); }
+inline bind_ty<FPClassTest> m_FPClassTest(FPClassTest &T) { return T; }
 
 template <typename BindTy> struct deferred_helper {
   static bool match(const MachineRegisterInfo &MRI, BindTy &VR, BindTy &V) {
@@ -762,6 +766,32 @@ struct CompareOp_match {
   }
 };
 
+template <typename LHS_P, typename Test_P, unsigned Opcode>
+struct ClassifyOp_match {
+  LHS_P L;
+  Test_P T;
+
+  ClassifyOp_match(const LHS_P &LHS, const Test_P &Tst) : L(LHS), T(Tst) {}
+
+  template <typename OpTy>
+  bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
+    MachineInstr *TmpMI;
+    if (!mi_match(Op, MRI, m_MInstr(TmpMI)) || TmpMI->getOpcode() != Opcode)
+      return false;
+
+    Register LHS = TmpMI->getOperand(1).getReg();
+    if (!L.match(MRI, LHS))
+      return false;
+
+    FPClassTest TmpClass =
+        static_cast<FPClassTest>(TmpMI->getOperand(2).getImm());
+    if (T.match(MRI, TmpClass))
+      return true;
+
+    return false;
+  }
+};
+
 template <typename Pred, typename LHS, typename RHS>
 inline CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_ICMP>
 m_GICmp(const Pred &P, const LHS &L, const RHS &R) {
@@ -804,6 +834,14 @@ m_c_GFCmp(const Pred &P, const LHS &L, const RHS &R) {
   return CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_FCMP, true>(P, L, R);
 }
 
+/// Matches the register and immediate used in a fpclass test
+/// G_IS_FPCLASS %val, 96
+template <typename LHS, typename Test>
+inline ClassifyOp_match<LHS, Test, TargetOpcode::G_IS_FPCLASS>
+m_GIsFPClass(const LHS &L, const Test &T) {
+  return ClassifyOp_match<LHS, Test, TargetOpcode::G_IS_FPCLASS>(L, T);
+}
+
 // Helper for checking if a Reg is of specific type.
 struct CheckType {
   LLT Ty;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index 35d21aa1d66d9..684a9bf554fb1 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -655,6 +655,9 @@ class GIConstant {
 /// }
 /// provides low-level access.
 class GFConstant {
+  using VecTy = SmallVector<APFloat>;
+  using const_iterator = VecTy::const_iterator;
+
 public:
   enum class GFConstantKind { Scalar, FixedVector, ScalableVector };
 
@@ -672,6 +675,23 @@ class GFConstant {
   /// Returns the kind of of this constant, e.g, Scalar.
   GFConstantKind getKind() const { return Kind; }
 
+  const_iterator begin() const {
+    assert(Kind != GFConstantKind::ScalableVector &&
+           "Expected fixed vector or scalar constant");
+    return Values.begin();
+  }
+
+  const_iterator end() const {
+    assert(Kind != GFConstantKind::ScalableVector &&
+           "Expected fixed vector or scalar constant");
+    return Values.end();
+  }
+
+  size_t size() const {
+    assert(Kind == GFConstantKind::FixedVector && "Expected fixed vector");
+    return Values.size();
+  }
+
   /// Returns the value, if this constant is a scalar.
   APFloat getScalarValue() const;
 
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index f208a10bcc203..d9fce57c6e43a 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -51,6 +51,7 @@
 #include "llvm/Support/AtomicOrdering.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownFPClass.h"
 #include <algorithm>
 #include <cassert>
 #include <climits>
@@ -4233,6 +4234,13 @@ class TargetLowering : public TargetLoweringBase {
                                               const MachineRegisterInfo &MRI,
                                               unsigned Depth = 0) const;
 
+  virtual void computeKnownFPClassForTargetInstr(GISelValueTracking &Analysis,
+                                                 Register R,
+                                                 KnownFPClass &Known,
+                                                 const APInt &DemandedElts,
+                                                 const MachineRegisterInfo &MRI,
+                                                 unsigned Depth = 0) const;
+
   /// Determine the known alignment for the pointer value \p R. This is can
   /// typically be inferred from the number of low known 0 bits. However, for a
   /// pointer with a non-integral address space, the alignment value may be
diff --git a/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
index d16eef1178cbc..6d86c6f0837a6 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
@@ -12,21 +12,36 @@
 //
 //===----------------------------------------------------------------------===//
 #include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/FloatingPointMode.h"
+#include "llvm/CodeGen/GlobalISel/MachineFloatingPointPredicateUtils.h"
+#include "llvm/ADT/ScopeExit.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/Analysis/VectorUtils.h"
 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
+#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/LowLevelTypeUtils.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineOperand.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Register.h"
 #include "llvm/CodeGen/TargetLowering.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/FMF.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Support/KnownFPClass.h"
 #include "llvm/Target/TargetMachine.h"
 
 #define DEBUG_TYPE "gisel-known-bits"
 
 using namespace llvm;
+using namespace MIPatternMatch;
 
 char llvm::GISelValueTrackingAnalysisLegacy::ID = 0;
 
@@ -668,6 +683,1024 @@ void GISelValueTracking::computeKnownBitsImpl(Register R, KnownBits &Known,
   ComputeKnownBitsCache[R] = Known;
 }
 
+static bool outputDenormalIsIEEEOrPosZero(const MachineFunction &MF, LLT Ty) {
+  Ty = Ty.getScalarType();
+  DenormalMode Mode = MF.getDenormalMode(getFltSemanticForLLT(Ty));
+  return Mode.Output == DenormalMode::IEEE ||
+         Mode.Output == DenormalMode::PositiveZero;
+}
+
+void GISelValueTracking::computeKnownFPClass(Register R, KnownFPClass &Known,
+                                             FPClassTest InterestedClasses,
+                                             unsigned Depth) {
+  LLT Ty = MRI.getType(R);
+  APInt DemandedElts =
+      Ty.isFixedVector() ? APInt::getAllOnes(Ty.getNumElements()) : APInt(1, 1);
+  computeKnownFPClass(R, DemandedElts, InterestedClasses, Known, Depth);
+}
+
+void GISelValueTracking::computeKnownFPClassForFPTrunc(
+    const MachineInstr &MI, const APInt &DemandedElts,
+    FPClassTest InterestedClasses, KnownFPClass &Known, unsigned Depth) {
+  if ((InterestedClasses & (KnownFPClass::OrderedLessThanZeroMask | fcNan)) ==
+      fcNone)
+    return;
+
+  Register Val = MI.getOperand(1).getReg();
+  KnownFPClass KnownSrc;
+  computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
+                      Depth + 1);
+
+  // Sign should be preserved
+  // TODO: Handle cannot be ordered greater than zero
+  if (KnownSrc.cannotBeOrderedLessThanZero())
+    Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+
+  Known.propagateNaN(KnownSrc, true);
+
+  // Infinity needs a range check.
+}
+
+void GISelValueTracking::computeKnownFPClass(Register R,
+                                             const APInt &DemandedElts,
+                                             FPClassTest InterestedClasses,
+                                             KnownFPClass &Known,
+                                             unsigned Depth) {
+  assert(Known.isUnknown() && "should not be called with known information");
+
+  if (!DemandedElts) {
+    // No demanded elts, better to assume we don't know anything.
+    Known.resetAll();
+    return;
+  }
+
+  assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
+
+  MachineInstr &MI = *MRI.getVRegDef(R);
+  unsigned Opcode = MI.getOpcode();
+  LLT DstTy = MRI.getType(R);
+
+  if (!DstTy.isValid()) {
+    Known.resetAll();
+    return;
+  }
+
+  if (auto Cst = GFConstant::getConstant(R, MRI)) {
+    switch (Cst->getKind()) {
+    case GFConstant::GFConstantKind::Scalar: {
+      auto APF = Cst->getScalarValue();
+      Known.KnownFPClasses = APF.classify();
+      Known.SignBit = APF.isNegative();
+      break;
+    }
+    case GFConstant::GFConstantKind::FixedVector: {
+      Known.KnownFPClasses = fcNone;
+      bool SignBitAllZero = true;
+      bool SignBitAllOne = true;
+
+      for (auto C : *Cst) {
+        Known.KnownFPClasses |= C.classify();
+        if (C.isNegative())
+          SignBitAllZero = false;
+        else
+          SignBitAllOne = false;
+      }
+
+      if (SignBitAllOne != SignBitAllZero)
+        Known.SignBit = SignBitAllOne;
+
+      break;
+    }
+    case GFConstant::GFConstantKind::ScalableVector: {
+      Known.resetAll();
+      break;
+    }
+    }
+
+    return;
+  }
+
+  FPClassTest KnownNotFromFlags = fcNone;
+  if (MI.getFlag(MachineInstr::MIFlag::FmNoNans))
+    KnownNotFromFlags |= fcNan;
+  if (MI.getFlag(MachineInstr::MIFlag::FmNoInfs))
+    KnownNotFromFlags |= fcInf;
+
+  // We no longer need to find out about these bits from inputs if we can
+  // assume this from flags/attributes.
+  InterestedClasses &= ~KnownNotFromFlags;
+
+  auto ClearClassesFromFlags =
+      make_scope_exit([=, &Known] { Known.knownNot(KnownNotFromFlags); });
+
+  // All recursive calls that increase depth must come after this.
+  if (Depth == MaxAnalysisRecursionDepth)
+    return;
+
+  const MachineFunction *MF = MI.getMF();
+
+  switch (Opcode) {
+  default:
+    TL.computeKnownFPClassForTargetInstr(*this, R, Known, DemandedElts, MRI,
+                                         Depth);
+    break;
+  case TargetOpcode::G_FNEG: {
+    Register Val = MI.getOperand(1).getReg();
+    computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known, Depth + 1);
+    Known.fneg();
+    break;
+  }
+  case TargetOpcode::G_SELECT: {
+    GSelect &SelMI = cast<GSelect>(MI);
+    Register Cond = SelMI.getCondReg();
+    Register LHS = SelMI.getTrueReg();
+    Register RHS = SelMI.getFalseReg();
+
+    FPClassTest FilterLHS = fcAllFlags;
+    FPClassTest FilterRHS = fcAllFlags;
+
+    Register TestedValue;
+    FPClassTest MaskIfTrue = fcAllFlags;
+    FPClassTest MaskIfFalse = fcAllFlags;
+    FPClassTest ClassVal = fcNone;
+
+    CmpInst::Predicate Pred;
+    Register CmpLHS, CmpRHS;
+    if (mi_match(Cond, MRI,
+                 m_GFCmp(m_Pred(Pred), m_Reg(CmpLHS), m_Reg(CmpRHS)))) {
+      // If the select filters out a value based on the class, it no longer
+      // participates in the class of the result
+
+      // TODO: In some degenerate cases we can infer something if we try again
+      // without looking through sign operations.
+      bool LookThroughFAbsFNeg = CmpLHS != LHS && CmpLHS != RHS;
+      std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
+          fcmpImpliesClass(Pred, *MF, CmpLHS, CmpRHS, LookThroughFAbsFNeg);
+    } else if (mi_match(
+                   Cond, MRI,
+                   m_GIsFPClass(m_Reg(TestedValue), m_FPClassTest(ClassVal)))) {
+      FPClassTest TestedMask = ClassVal;
+      MaskIfTrue = TestedMask;
+      MaskIfFalse = ~TestedMask;
+    }
+
+    if (TestedValue == LHS) {
+      // match !isnan(x) ? x : y
+      FilterLHS = MaskIfTrue;
+    } else if (TestedValue == RHS) { // && IsExactClass
+      // match !isnan(x) ? y : x
+      FilterRHS = MaskIfFalse;
+    }
+
+    KnownFPClass Known2;
+    computeKnownFPClass(LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
+                        Depth + 1);
+    Known.KnownFPClasses &= FilterLHS;
+
+    computeKnownFPClass(RHS, DemandedElts, InterestedClasses & FilterRHS,
+                        Known2, Depth + 1);
+    Known2.KnownFPClasses &= FilterRHS;
+
+    Known |= Known2;
+    break;
+  }
+  case TargetOpcode::G_FCOPYSIGN: {
+    Register Magnitude = MI.getOperand(1).getReg();
+    Register Sign = MI.getOperand(2).getReg();
+
+    KnownFPClass KnownSign;
+
+    computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,
+                        Depth + 1);
+    computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,
+                        Depth + 1);
+    Known.copysign(KnownSign);
+    break;
+  }
+  case TargetOpcode::G_FMA:
+  case TargetOpcode::G_STRICT_FMA:
+  case TargetOpcode::G_FMAD: {
+    if ((InterestedClasses & fcNegative) == fcNone)
+      break;
+
+    Register A = MI.getOperand(1).getReg();
+    Register B = MI.getOperand(2).getReg();
+    Register C = MI.getOperand(3).getReg();
+
+    if (A != B)
+      break;
+
+    // The multiply cannot be -0 and therefore the add can't be -0
+    Known.knownNot(fcNegZero);
+
+    // x * x + y is non-negative if y is non-negative.
+    KnownFPClass KnownAddend;
+    computeKnownFPClass(C, DemandedElts, InterestedClasses, KnownAddend,
+                        Depth + 1);
+
+    if (KnownAddend.cannotBeOrderedLessThanZero())
+      Known.knownNot(fcNegative);
+    break;
+  }
+  case TargetOpcode::G_FSQRT:
+  case TargetOpcode::G_STRICT_FSQRT: {
+    KnownFPClass KnownSrc;
+    FPClassTest InterestedSrcs = InterestedClasses;
+    if (InterestedClasses & fcNan)
+      InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
+
+    Register Val = MI.getOperand(1).getReg();
+
+    computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);
+
+    if (KnownSrc.isKnownNeverPosInfinity())
+      Known.knownNot(fcPosInf);
+    if (KnownSrc.isKnownNever(fcSNan))
+      Known.knownNot(fcSNan);
+
+    // Any negative value besides -0 returns a nan.
+    if (KnownSrc.isKnownNeverNaN() && KnownSrc.cannotBeOrderedLessThanZero())
+      Known.knownNot(fcNan);
+
+    // The only negative value that can be returned is -0 for -0 inputs.
+    Known.knownNot(fcNegInf | fcNegSubnormal | fcNegNormal);
+    break;
+  }
+  case TargetOpcode::G_FABS: {
+    if ((InterestedClasses & (fcNan | fcPositive)) != fcNone) {
+      Register Val = MI.getOperand(1).getReg();
+      // If we only care about the sign bit we don't need to inspect the
+      // operand.
+      computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
+                          Depth + 1);
+    }
+    Known.fabs();
+    break;
+  }
+  case TargetOpcode::G_FSIN:
+  case TargetOpcode::G_FCOS:
+  case TargetOpcode::G_FSINCOS: {
+    // Return NaN on infinite inputs.
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+
+    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
+                        Depth + 1);
+    Known.knownNot(fcInf);
+
+    if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity())
+      Known.knownNot(fcNan);
+    break;
+  }
+  case TargetOpcode::G_FMAXNUM:
+  case TargetOpcode::G_FMINNUM:
+  case TargetOpcode::G_FMINNUM_IEEE:
+  case TargetOpcode::G_FMAXIMUM:
+  case TargetOpcode::G_FMINIMUM:
+  case TargetOpcode::G_FMAXNUM_IEEE: {
+    Register LHS = MI.getOperand(1).getReg();
+    Register RHS = MI.getOperand(2).getReg();
+    KnownFPClass KnownLHS, KnownRHS;
+
+    computeKnownFPClass(LHS, DemandedElts, InterestedClasses, KnownLHS,
+                        Depth + 1);
+    computeKnownFPClass(RHS, DemandedElts, InterestedClasses, KnownRHS,
+                        Depth + 1);
+
+    bool NeverNaN = KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN();
+    Known = KnownLHS | KnownRHS;
+
+    // If either operand is not NaN, the result is not NaN.
+    if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
+                     Opcode == TargetOpcode::G_FMAXNUM))
+      Known.knownNot(fcNan);
+
+    if (Opcode == TargetOpcode::G_FMAXNUM) {
+      // If at least one operand is known to be positive, the result must be
+      // positive.
+      if ((KnownLHS.cannotBeOrderedLessThanZero() &&
+           KnownLHS.isKnownNeverNaN()) ||
+          (KnownRHS.cannotBeOrderedLessThanZero() &&
+           KnownRHS.isKnownNeverNaN()))
+        Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+    } else if (Opcode == TargetOpcode::G_FMAXIMUM) {
+      // If at least one operand is known to be positive, the result must be
+      // positive.
+      if (KnownLHS.cannotBeOrderedLessThanZero() ||
+          KnownRHS.cannotBeOrderedLessThanZero())
+        Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+    } else if (Opcode == TargetOpcode::G_FMINNUM) {
+      // If at least one operand is known to be negative, the result must be
+      // negative.
+      if ((KnownLHS.cannotBeOrderedGreaterThanZero() &&
+           KnownLHS.isKnownNeverNaN()) ||
+          (KnownRHS.cannotBeOrderedGreaterThanZero() &&
+           KnownRHS.isKnownNeverNaN()))
+        Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
+    } else if (Opcode == TargetOpcode::G_FMINNUM_IEEE) {
+      // TODO:
+    } else if (Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
+      // TODO:
+    } else {
+      // If at least one operand is known to be negative, the result must be
+      // negative.
+      if (KnownLHS.cannotBeOrderedGreaterThanZero() ||
+          KnownRHS.cannotBeOrderedGreaterThanZero())
+        Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
+    }
+
+    // Fixup zero handling if denormals could be returned as a zero.
+    //
+    // As there's no spec for denormal flushing, be conservative with the
+    // treatment of denormals that could be flushed to zero. For older
+    // subtargets on AMDGPU the min/max instructions would not flush the
+    // output and return the original value.
+    //
+    if ((Known.KnownFPClasses & fcZero) != fcNone &&
+        !Known.isKnownNeverSubnormal()) {
+      DenormalMode Mode = MF->getDenormalMode(getFltSemanticForLLT(DstTy));
+      if (Mode != DenormalMode::getIEEE())
+        Known.KnownFPClasses |= fcZero;
+    }
+
+    if (Known.isKnownNeverNaN()) {
+      if (KnownLHS.SignBit && KnownRHS.SignBit &&
+          *KnownLHS.SignBit == *KnownRHS.SignBit) {
+        if (*KnownLHS.SignBit)
+          Known.signBitMustBeOne();
+        else
+          Known.signBitMustBeZero();
+      } else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
+                  Opcode == TargetOpcode::G_FMINIMUM) ||
+                 ((KnownLHS.isKnownNeverNegZero() ||
+                   KnownRHS.isKnownNeverPosZero()) &&
+                  (KnownLHS.isKnownNeverPosZero() ||
+                   KnownRHS.isKnownNeverNegZero()))) {
+        if ((Opcode == TargetOpcode::G_FMAXIMUM ||
+             Opcode == TargetOpcode::G_FMAXNUM) &&
+            (KnownLHS.SignBit == false || KnownRHS.SignBit == false))
+          Known.signBitMustBeZero();
+        else if ((Opcode == TargetOpcode::G_FMINIMUM ||
+                  Opcode == TargetOpcode::G_FMINNUM) &&
+                 (KnownLHS.SignBit == true || KnownRHS.SignBit == true))
+          Known.signBitMustBeOne();
+      }
+    }
+    break;
+  }
+  case TargetOpcode::G_FCANONICALIZE: {
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
+                        Depth + 1);
+
+    // This is essentially a stronger form of
+    // propagateCanonicalizingSrc. Other "canonicalizing" operations don't
+    // actually have an IR canonicalization guarantee.
+
+    // Canonicalize may flush denormals to zero, so we have to consider the
+    // denormal mode to preserve known-not-0 knowledge.
+    Known.KnownFPClasses = KnownSrc.KnownFPClasses | fcZero | fcQNan;
+
+    // Stronger version of propagateNaN
+    // Canonicalize is guaranteed to quiet signaling nans.
+    if (KnownSrc.isKnownNeverNaN())
+      Known.knownNot(fcNan);
+    else
+      Known.knownNot(fcSNan);
+
+    // If the parent function flushes denormals, the canonical output cannot
+    // be a denormal.
+    LLT Ty = MRI.getType(Val);
+    const fltSemantics &FPType = getFltSemanticForLLT(Ty.getScalarType());
+    DenormalMode DenormMode = MF->getDenormalMode(FPType);
+    if (DenormMode == DenormalMode::getIEEE()) {
+      if (KnownSrc.isKnownNever(fcPosZero))
+        Known.knownNot(fcPosZero);
+      if (KnownSrc.isKnownNever(fcNegZero))
+        Known.knownNot(fcNegZero);
+      break;
+    }
+
+    if (DenormMode.inputsAreZero() || DenormMode.outputsAreZero())
+      Known.knownNot(fcSubnormal);
+
+    if (DenormMode.Input == DenormalMode::PositiveZero ||
+        (DenormMode.Output == DenormalMode::PositiveZero &&
+         DenormMode.Input == DenormalMode::IEEE))
+      Known.knownNot(fcNegZero);
+
+    break;
+  }
+  case TargetOpcode::G_VECREDUCE_FMAX:
+  case TargetOpcode::G_VECREDUCE_FMIN:
+  case TargetOpcode::G_VECREDUCE_FMAXIMUM:
+  case TargetOpcode::G_VECREDUCE_FMINIMUM: {
+    Register Val = MI.getOperand(1).getReg();
+    // reduce min/max will choose an element from one of the vector elements,
+    // so we can infer and class information that is common to all elements.
+
+    Known =
+        computeKnownFPClass(Val, MI.getFlags(), InterestedClasses, Depth + 1);
+    // Can only propagate sign if output is never NaN.
+    if (!Known.isKnownNeverNaN())
+      Known.SignBit.reset();
+    break;
+  }
+  case TargetOpcode::G_TRUNC:
+  case TargetOpcode::G_FFLOOR:
+  case TargetOpcode::G_FCEIL:
+  case TargetOpcode::G_FRINT:
+  case TargetOpcode::G_FNEARBYINT:
+  case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
+  case TargetOpcode::G_INTRINSIC_ROUND: {
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    FPClassTest InterestedSrcs = InterestedClasses;
+    if (InterestedSrcs & fcPosFinite)
+      InterestedSrcs |= fcPosFinite;
+    if (InterestedSrcs & fcNegFinite)
+      InterestedSrcs |= fcNegFinite;
+    computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);
+
+    // Integer results cannot be subnormal.
+    Known.knownNot(fcSubnormal);
+
+    Known.propagateNaN(KnownSrc, true);
+
+    // TODO: handle multi unit FPTypes once LLT FPInfo lands
+
+    // Negative round ups to 0 produce -0
+    if (KnownSrc.isKnownNever(fcPosFinite))
+      Known.knownNot(fcPosFinite);
+    if (KnownSrc.isKnownNever(fcNegFinite))
+      Known.knownNot(fcNegFinite);
+
+    break;
+  }
+  case TargetOpcode::G_FEXP:
+  case TargetOpcode::G_FEXP2:
+  case TargetOpcode::G_FEXP10: {
+    Known.knownNot(fcNegative);
+    if ((InterestedClasses & fcNan) == fcNone)
+      break;
+
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
+                        Depth + 1);
+    if (KnownSrc.isKnownNeverNaN()) {
+      Known.knownNot(fcNan);
+      Known.signBitMustBeZero();
+    }
+
+    break;
+  }
+  case TargetOpcode::G_FLOG:
+  case TargetOpcode::G_FLOG2:
+  case TargetOpcode::G_FLOG10: {
+    // log(+inf) -> +inf
+    // log([+-]0.0) -> -inf
+    // log(-inf) -> nan
+    // log(-x) -> nan
+    if ((InterestedClasses & (fcNan | fcInf)) == fcNone)
+      break;
+
+    FPClassTest InterestedSrcs = InterestedClasses;
+    if ((InterestedClasses & fcNegInf) != fcNone)
+      InterestedSrcs |= fcZero | fcSubnormal;
+    if ((InterestedClasses & fcNan) != fcNone)
+      InterestedSrcs |= fcNan | (fcNegative & ~fcNan);
+
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);
+
+    if (KnownSrc.isKnownNeverPosInfinity())
+      Known.knownNot(fcPosInf);
+
+    if (KnownSrc.isKnownNeverNaN() && KnownSrc.cannotBeOrderedLessThanZero())
+      Known.knownNot(fcNan);
+
+    LLT Ty = MRI.getType(Val);
+    const fltSemantics &FltSem = getFltSemanticForLLT(Ty.getScalarType());
+    DenormalMode Mode = MF->getDenormalMode(FltSem);
+
+    if (KnownSrc.isKnownNeverLogicalZero(Mode))
+      Known.knownNot(fcNegInf);
+
+    break;
+  }
+  case TargetOpcode::G_FPOWI: {
+    if ((InterestedClasses & fcNegative) == fcNone)
+      break;
+
+    Register Exp = MI.getOperand(2).getReg();
+    LLT ExpTy = MRI.getType(Exp);
+    KnownBits ExponentKnownBits = getKnownBits(
+        Exp, ExpTy.isVector() ? DemandedElts : APInt(1, 1), Depth + 1);
+
+    if (ExponentKnownBits.Zero[0]) { // Is even
+      Known.knownNot(fcNegative);
+      break;
+    }
+
+    // Given that exp is an integer, here are the
+    // ways that pow can return a negative value:
+    //
+    //   pow(-x, exp)   --> negative if exp is odd and x is negative.
+    //   pow(-0, exp)   --> -inf if exp is negative odd.
+    //   pow(-0, exp)   --> -0 if exp is positive odd.
+    //   pow(-inf, exp) --> -0 if exp is negative odd.
+    //   pow(-inf, exp) --> -inf if exp is positive odd.
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    computeKnownFPClass(Val, DemandedElts, fcNegative, KnownSrc, Depth + 1);
+    if (KnownSrc.isKnownNever(fcNegative))
+      Known.knownNot(fcNegative);
+    break;
+  }
+  case TargetOpcode::G_FLDEXP:
+  case TargetOpcode::G_STRICT_FLDEXP: {
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
+                        Depth + 1);
+    Known.propagateNaN(KnownSrc, /*PropagateSign=*/true);
+
+    // Sign is preserved, but underflows may produce zeroes.
+    if (KnownSrc.isKnownNever(fcNegative))
+      Known.knownNot(fcNegative);
+    else if (KnownSrc.cannotBeOrderedLessThanZero())
+      Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+
+    if (KnownSrc.isKnownNever(fcPositive))
+      Known.knownNot(fcPositive);
+    else if (KnownSrc.cannotBeOrderedGreaterThanZero())
+      Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
+
+    // Can refine inf/zero handling based on the exponent operand.
+    const FPClassTest ExpInfoMask = fcZero | fcSubnormal | fcInf;
+    if ((InterestedClasses & ExpInfoMask) == fcNone)
+      break;
+    if ((KnownSrc.KnownFPClasses & ExpInfoMask) == fcNone)
+      break;
+
+    // TODO: Handle constant range of Exp
+
+    break;
+  }
+  case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
+    computeKnownFPClassForFPTrunc(MI, DemandedElts, InterestedClasses, Known,
+                                  Depth);
+    break;
+  }
+  case TargetOpcode::G_FADD:
+  case TargetOpcode::G_STRICT_FADD:
+  case TargetOpcode::G_FSUB:
+  case TargetOpcode::G_STRICT_FSUB: {
+    Register LHS = MI.getOperand(1).getReg();
+    Register RHS = MI.getOperand(2).getReg();
+    KnownFPClass KnownLHS, KnownRHS;
+    bool WantNegative =
+        (Opcode == TargetOpcode::G_FADD ||
+         Opcode == TargetOpcode::G_STRICT_FADD) &&
+        (InterestedClasses & KnownFPClass::OrderedLessThanZeroMask) != fcNone;
+    bool WantNaN = (InterestedClasses & fcNan) != fcNone;
+    bool WantNegZero = (InterestedClasses & fcNegZero) != fcNone;
+
+    if (!WantNaN && !WantNegative && !WantNegZero)
+      break;
+
+    FPClassTest InterestedSrcs = InterestedClasses;
+    if (WantNegative)
+      InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
+    if (InterestedClasses & fcNan)
+      InterestedSrcs |= fcInf;
+    computeKnownFPClass(RHS, DemandedElts, InterestedSrcs, KnownRHS, Depth + 1);
+
+    if ((WantNaN && KnownRHS.isKnownNeverNaN()) ||
+        (WantNegative && KnownRHS.cannotBeOrderedLessThanZero()) ||
+        WantNegZero ||
+        (Opcode == TargetOpcode::G_FSUB ||
+         Opcode == TargetOpcode::G_STRICT_FSUB)) {
+
+      // RHS is canonically cheaper to compute. Skip inspecting the LHS if
+      // there's no point.
+      computeKnownFPClass(LHS, DemandedElts, InterestedSrcs, KnownLHS,
+                          Depth + 1);
+      // Adding positive and negative infinity produces NaN.
+      // TODO: Check sign of infinities.
+      if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
+          (KnownLHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverInfinity()))
+        Known.knownNot(fcNan);
+
+      if (Opcode == Instruction::FAdd) {
+        if (KnownLHS.cannotBeOrderedLessThanZero() &&
+            KnownRHS.cannotBeOrderedLessThanZero())
+          Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+
+        // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
+        if ((KnownLHS.isKnownNeverLogicalNegZero(
+                 MF->getDenormalMode(getFltSemanticForLLT(DstTy))) ||
+             KnownRHS.isKnownNeverLogicalNegZero(
+                 MF->getDenormalMode(getFltSemanticForLLT(DstTy)))) &&
+            // Make sure output negative denormal can't flush to -0
+            outputDenormalIsIEEEOrPosZero(*MF, DstTy))
+          Known.knownNot(fcNegZero);
+      } else {
+        // Only fsub -0, +0 can return -0
+        if ((KnownLHS.isKnownNeverLogicalNegZero(
+                 MF->getDenormalMode(getFltSemanticForLLT(DstTy))) ||
+             KnownRHS.isKnownNeverLogicalPosZero(
+                 MF->getDenormalMode(getFltSemanticForLLT(DstTy)))) &&
+            // Make sure output negative denormal can't flush to -0
+            outputDenormalIsIEEEOrPosZero(*MF, DstTy))
+          Known.knownNot(fcNegZero);
+      }
+    }
+
+    break;
+  }
+  case TargetOpcode::G_FMUL:
+  case TargetOpcode::G_STRICT_FMUL: {
+    Register LHS = MI.getOperand(1).getReg();
+    Register RHS = MI.getOperand(2).getReg();
+    // X * X is always non-negative or a NaN.
+    if (LHS == RHS)
+      Known.knownNot(fcNegative);
+
+    if ((InterestedClasses & fcNan) != fcNan)
+      break;
+
+    // fcSubnormal is only needed in case of DAZ.
+    const FPClassTest NeedForNan = fcNan | fcInf | fcZero | fcSubnormal;
+
+    KnownFPClass KnownLHS, KnownRHS;
+    computeKnownFPClass(RHS, DemandedElts, NeedForNan, KnownRHS, Depth + 1);
+    if (!KnownRHS.isKnownNeverNaN())
+      break;
+
+    computeKnownFPClass(LHS, DemandedElts, NeedForNan, KnownLHS, Depth + 1);
+    if (!KnownLHS.isKnownNeverNaN())
+      break;
+
+    if (KnownLHS.SignBit && KnownRHS.SignBit) {
+      if (*KnownLHS.SignBit == *KnownRHS.SignBit)
+        Known.signBitMustBeZero();
+      else
+        Known.signBitMustBeOne();
+    }
+
+    // If 0 * +/-inf produces NaN.
+    if (KnownLHS.isKnownNeverInfinity() && KnownRHS.isKnownNeverInfinity()) {
+      Known.knownNot(fcNan);
+      break;
+    }
+
+    if ((KnownRHS.isKnownNeverInfinity() ||
+         KnownLHS.isKnownNeverLogicalZero(
+             MF->getDenormalMode(getFltSemanticForLLT(DstTy)))) &&
+        (KnownLHS.isKnownNeverInfinity() ||
+         KnownRHS.isKnownNeverLogicalZero(
+             MF->getDenormalMode(getFltSemanticForLLT(DstTy)))))
+      Known.knownNot(fcNan);
+
+    break;
+  }
+  case TargetOpcode::G_FDIV:
+  case TargetOpcode::G_FREM: {
+    Register LHS = MI.getOperand(1).getReg();
+    Register RHS = MI.getOperand(2).getReg();
+
+    if (LHS == RHS) {
+      // TODO: Could filter out snan if we inspect the operand
+      if (Opcode == TargetOpcode::G_FDIV) {
+        // X / X is always exactly 1.0 or a NaN.
+        Known.KnownFPClasses = fcNan | fcPosNormal;
+      } else {
+        // X % X is always exactly [+-]0.0 or a NaN.
+        Known.KnownFPClasses = fcNan | fcZero;
+      }
+
+      break;
+    }
+
+    const bool WantNan = (InterestedClasses & fcNan) != fcNone;
+    const bool WantNegative = (InterestedClasses & fcNegative) != fcNone;
+    const bool WantPositive = Opcode == TargetOpcode::G_FREM &&
+                              (InterestedClasses & fcPositive) != fcNone;
+    if (!WantNan && !WantNegative && !WantPositive)
+      break;
+
+    KnownFPClass KnownLHS, KnownRHS;
+
+    computeKnownFPClass(RHS, DemandedElts, fcNan | fcInf | fcZero | fcNegative,
+                        KnownRHS, Depth + 1);
+
+    bool KnowSomethingUseful =
+        KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative);
+
+    if (KnowSomethingUseful || WantPositive) {
+      const FPClassTest InterestedLHS =
+          WantPositive ? fcAllFlags
+                       : fcNan | fcInf | fcZero | fcSubnormal | fcNegative;
+
+      computeKnownFPClass(LHS, DemandedElts, InterestedClasses & InterestedLHS,
+                          KnownLHS, Depth + 1);
+    }
+
+    if (Opcode == Instruction::FDiv) {
+      // Only 0/0, Inf/Inf produce NaN.
+      if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
+          (KnownLHS.isKnownNeverInfinity() ||
+           KnownRHS.isKnownNeverInfinity()) &&
+          ((KnownLHS.isKnownNeverLogicalZero(
+               MF->getDenormalMode(getFltSemanticForLLT(DstTy)))) ||
+           (KnownRHS.isKnownNeverLogicalZero(
+               MF->getDenormalMode(getFltSemanticForLLT(DstTy)))))) {
+        Known.knownNot(fcNan);
+      }
+
+      // X / -0.0 is -Inf (or NaN).
+      // +X / +X is +X
+      if (KnownLHS.isKnownNever(fcNegative) &&
+          KnownRHS.isKnownNever(fcNegative))
+        Known.knownNot(fcNegative);
+    } else {
+      // Inf REM x and x REM 0 produce NaN.
+      if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
+          KnownLHS.isKnownNeverInfinity() &&
+          KnownRHS.isKnownNeverLogicalZero(
+              MF->getDenormalMode(getFltSemanticForLLT(DstTy)))) {
+        Known.knownNot(fcNan);
+      }
+
+      // The sign for frem is the same as the first operand.
+      if (KnownLHS.cannotBeOrderedLessThanZero())
+        Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+      if (KnownLHS.cannotBeOrderedGreaterThanZero())
+        Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
+
+      // See if we can be more aggressive about the sign of 0.
+      if (KnownLHS.isKnownNever(fcNegative))
+        Known.knownNot(fcNegative);
+      if (KnownLHS.isKnownNever(fcPositive))
+        Known.knownNot(fcPositive);
+    }
+
+    break;
+  }
+  case TargetOpcode::G_FPEXT: {
+    Register Dst = MI.getOperand(0).getReg();
+    Register Src = MI.getOperand(1).getReg();
+    // Infinity, nan and zero propagate from source.
+    computeKnownFPClass(R, DemandedElts, InterestedClasses, Known, Depth + 1);
+
+    LLT DstTy = MRI.getType(Dst);
+    const fltSemantics &DstSem = getFltSemanticForLLT(DstTy.getScalarType());
+    LLT SrcTy = MRI.getType(Src);
+    const fltSemantics &SrcSem = getFltSemanticForLLT(SrcTy.getScalarType());
+
+    // All subnormal inputs should be in the normal range in the result type.
+    if (APFloat::isRepresentableAsNormalIn(SrcSem, DstSem)) {
+      if (Known.KnownFPClasses & fcPosSubnormal)
+        Known.KnownFPClasses |= fcPosNormal;
+      if (Known.KnownFPClasses & fcNegSubnormal)
+        Known.KnownFPClasses |= fcNegNormal;
+      Known.knownNot(fcSubnormal);
+    }
+
+    // Sign bit of a nan isn't guaranteed.
+    if (!Known.isKnownNeverNaN())
+      Known.SignBit = std::nullopt;
+    break;
+  }
+  case TargetOpcode::G_FPTRUNC: {
+    computeKnownFPClassForFPTrunc(MI, DemandedElts, InterestedClasses, Known,
+                                  Depth);
+    break;
+  }
+  case TargetOpcode::G_SITOFP:
+  case TargetOpcode::G_UITOFP: {
+    // Cannot produce nan
+    Known.knownNot(fcNan);
+
+    // Integers cannot be subnormal
+    Known.knownNot(fcSubnormal);
+
+    // sitofp and uitofp turn into +0.0 for zero.
+    Known.knownNot(fcNegZero);
+    if (Opcode == TargetOpcode::G_UITOFP)
+      Known.signBitMustBeZero();
+
+    Register Val = MI.getOperand(1).getReg();
+    LLT Ty = MRI.getType(Val);
+
+    if (InterestedClasses & fcInf) {
+      // Get width of largest magnitude integer (remove a bit if signed).
+      // This still works for a signed minimum value because the largest FP
+      // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).;
+      int IntSize = Ty.getScalarSizeInBits();
+      if (Opcode == TargetOpcode::G_SITOFP)
+        --IntSize;
+
+      // If the exponent of the largest finite FP value can hold the largest
+      // integer, the result of the cast must be finite.
+      LLT FPTy = DstTy.getScalarType();
+      const fltSemantics &FltSem = getFltSemanticForLLT(FPTy);
+      if (ilogb(APFloat::getLargest(FltSem)) >= IntSize)
+        Known.knownNot(fcInf);
+    }
+
+    break;
+  }
+  // case TargetOpcode::G_MERGE_VALUES:
+  case TargetOpcode::G_BUILD_VECTOR:
+  case TargetOpcode::G_CONCAT_VECTORS: {
+    GMergeLikeInstr &Merge = cast<GMergeLikeInstr>(MI);
+
+    if (!DstTy.isFixedVector())
+      break;
+
+    bool First = true;
+    for (unsigned Idx = 0; Idx < Merge.getNumSources(); ++Idx) {
+      // We know the index we are inserting to, so clear it from Vec check.
+      bool NeedsElt = DemandedElts[Idx];
+
+      // Do we demand the inserted element?
+      if (NeedsElt) {
+        Register Src = Merge.getSourceReg(Idx);
+        if (First) {
+          computeKnownFPClass(Src, Known, InterestedClasses, Depth + 1);
+          First = false;
+        } else {
+          KnownFPClass Known2;
+          computeKnownFPClass(Src, Known2, InterestedClasses, Depth + 1);
+          Known |= Known2;
+        }
+
+        // If we don't know any bits, early out.
+        if (Known.isUnknown())
+          break;
+      }
+    }
+
+    break;
+  }
+  case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
+    // Look through extract element. If the index is non-constant or
+    // out-of-range demand all elements, otherwise just the extracted
+    // element.
+    GExtractVectorElement &Extract = cast<GExtractVectorElement>(MI);
+    Register Vec = Extract.getVectorReg();
+    Register Idx = Extract.getIndexReg();
+
+    auto CIdx = getIConstantVRegVal(Idx, MRI);
+
+    LLT VecTy = MRI.getType(Vec);
+
+    if (VecTy.isFixedVector()) {
+      unsigned NumElts = VecTy.getNumElements();
+      APInt DemandedVecElts = APInt::getAllOnes(NumElts);
+      if (CIdx && CIdx->ult(NumElts))
+        DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
+      return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
+                                 Depth + 1);
+    }
+
+    break;
+  }
+  case TargetOpcode::G_INSERT_VECTOR_ELT: {
+    GInsertVectorElement &Insert = cast<GInsertVectorElement>(MI);
+    Register Vec = Insert.getVectorReg();
+    Register Elt = Insert.getElementReg();
+    Register Idx = Insert.getIndexReg();
+
+    LLT VecTy = MRI.getType(Vec);
+
+    if (VecTy.isScalableVector())
+      return;
+
+    auto CIdx = getIConstantVRegVal(Idx, MRI);
+
+    unsigned NumElts = DemandedElts.getBitWidth();
+    APInt DemandedVecElts = DemandedElts;
+    bool NeedsElt = true;
+    // If we know the index we are inserting to, clear it from Vec check.
+    if (CIdx && CIdx->ult(NumElts)) {
+      DemandedVecElts.clearBit(CIdx->getZExtValue());
+      NeedsElt = DemandedElts[CIdx->getZExtValue()];
+    }
+
+    // Do we demand the inserted element?
+    if (NeedsElt) {
+      computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1);
+      // If we don't know any bits, early out.
+      if (Known.isUnknown())
+        break;
+    } else {
+      Known.KnownFPClasses = fcNone;
+    }
+
+    // Do we need anymore elements from Vec?
+    if (!DemandedVecElts.isZero()) {
+      KnownFPClass Known2;
+      computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
+                          Depth + 1);
+      Known |= Known2;
+    }
+
+    break;
+  }
+  case TargetOpcode::G_SHUFFLE_VECTOR: {
+    // For undef elements, we don't know anything about the common state of
+    // the shuffle result.
+    GShuffleVector &Shuf = cast<GShuffleVector>(MI);
+    APInt DemandedLHS, DemandedRHS;
+    if (DstTy.isScalableVector()) {
+      assert(DemandedElts == APInt(1, 1));
+      DemandedLHS = DemandedRHS = DemandedElts;
+    } else {
+      if (!llvm::getShuffleDemandedElts(DstTy.getNumElements(), Shuf.getMask(),
+                                        DemandedElts, DemandedLHS,
+                                        DemandedRHS)) {
+        Known.resetAll();
+        return;
+      }
+    }
+
+    if (!!DemandedLHS) {
+      Register LHS = Shuf.getSrc1Reg();
+      computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known,
+                          Depth + 1);
+
+      // If we don't know any bits, early out.
+      if (Known.isUnknown())
+        break;
+    } else {
+      Known.KnownFPClasses = fcNone;
+    }
+
+    if (!!DemandedRHS) {
+      KnownFPClass Known2;
+      Register RHS = Shuf.getSrc2Reg();
+      computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2,
+                          Depth + 1);
+      Known |= Known2;
+    }
+    break;
+  }
+  case TargetOpcode::COPY: {
+    Register Src = MI.getOperand(1).getReg();
+    computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Depth + 1);
+    break;
+  }
+  }
+}
+
+KnownFPClass
+GISelValueTracking::computeKnownFPClass(Register R, const APInt &DemandedElts,
+                                        FPClassTest InterestedClasses,
+                                        unsigned Depth) {
+  KnownFPClass KnownClasses;
+  computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses, Depth);
+  return KnownClasses;
+}
+
+KnownFPClass GISelValueTracking::computeKnownFPClass(
+    Register R, FPClassTest InterestedClasses, unsigned Depth) {
+  KnownFPClass Known;
+  computeKnownFPClass(R, Known, InterestedClasses, Depth);
+  return Known;
+}
+
+KnownFPClass GISelValueTracking::computeKnownFPClass(
+    Register R, const APInt &DemandedElts, uint32_t Flags,
+    FPClassTest InterestedClasses, unsigned Depth) {
+  if (Flags & MachineInstr::MIFlag::FmNoNans)
+    InterestedClasses &= ~fcNan;
+  if (Flags & MachineInstr::MIFlag::FmNoInfs)
+    InterestedClasses &= ~fcInf;
+
+  KnownFPClass Result =
+      computeKnownFPClass(R, DemandedElts, InterestedClasses, Depth);
+
+  if (Flags & MachineInstr::MIFlag::FmNoNans)
+    Result.KnownFPClasses &= ~fcNan;
+  if (Flags & MachineInstr::MIFlag::FmNoInfs)
+    Result.KnownFPClasses &= ~fcInf;
+  return Result;
+}
+
+KnownFPClass GISelValueTracking::computeKnownFPClass(
+    Register R, uint32_t Flags, FPClassTest InterestedClasses, unsigned Depth) {
+  LLT Ty = MRI.getType(R);
+  APInt DemandedElts =
+      Ty.isFixedVector() ? APInt::getAllOnes(Ty.getNumElements()) : APInt(1, 1);
+  return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses, Depth);
+}
+
 /// Compute number of sign bits for the intersection of \p Src0 and \p Src1
 unsigned GISelValueTracking::computeNumSignBitsMin(Register Src0, Register Src1,
                                                    const APInt &DemandedElts,
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 75c9bbaec7603..22d0bc9914585 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -3801,6 +3801,13 @@ void TargetLowering::computeKnownBitsForTargetInstr(
   Known.resetAll();
 }
 
+void TargetLowering::computeKnownFPClassForTargetInstr(
+    GISelValueTracking &Analysis, Register R, KnownFPClass &Known,
+    const APInt &DemandedElts, const MachineRegisterInfo &MRI,
+    unsigned Depth) const {
+  Known.resetAll();
+}
+
 void TargetLowering::computeKnownBitsForFrameIndex(
   const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const {
   // The low bits are known zero if the pointer is aligned.

>From f22cdcffe401c0932eacd033abab3915e2ce782b Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Thu, 3 Apr 2025 12:48:27 +0000
Subject: [PATCH 2/3] add tests

---
 .../CodeGen/GlobalISel/CMakeLists.txt         |    1 +
 .../CodeGen/GlobalISel/KnownFPClassTest.cpp   | 1062 +++++++++++++++++
 2 files changed, 1063 insertions(+)
 create mode 100644 llvm/unittests/CodeGen/GlobalISel/KnownFPClassTest.cpp

diff --git a/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt b/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
index bce91c1ed6173..4ef6aff943f73 100644
--- a/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
+++ b/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
@@ -24,6 +24,7 @@ add_llvm_unittest(GlobalISelTests
   GISelMITest.cpp
   PatternMatchTest.cpp
   KnownBitsTest.cpp
+  KnownFPClassTest.cpp
   KnownBitsVectorTest.cpp
   GISelUtilsTest.cpp
   GISelAliasTest.cpp
diff --git a/llvm/unittests/CodeGen/GlobalISel/KnownFPClassTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownFPClassTest.cpp
new file mode 100644
index 0000000000000..c094eee3b1fa9
--- /dev/null
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownFPClassTest.cpp
@@ -0,0 +1,1062 @@
+//===- KnownFPClassTest.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GISelMITest.h"
+#include "llvm/ADT/FloatingPointMode.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "gtest/gtest.h"
+#include <optional>
+
+TEST_F(AArch64GISelMITest, TestFPClassCstPosZero) {
+  StringRef MIRString = "  %3:_(s32) = G_FCONSTANT float 0.0\n"
+                        "  %4:_(s32) = COPY %3\n";
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+  unsigned CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosZero, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCstNegZero) {
+  StringRef MIRString = "  %3:_(s32) = G_FCONSTANT float -0.0\n"
+                        "  %4:_(s32) = COPY %3\n";
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegZero, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassUndef) {
+  StringRef MIRString = R"(
+    %def:_(s32) = G_IMPLICIT_DEF
+    %copy_def:_(s32) = COPY %def
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcAllFlags, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCstVecNegZero) {
+  StringRef MIRString = R"(
+   %c0:_(s32) = G_FCONSTANT float -0.0
+   %c1:_(s32) = G_FCONSTANT float -0.0
+   %c2:_(s32) = G_FCONSTANT float -0.0
+   %vector:_(<3 x s32>) = G_BUILD_VECTOR %c0, %c1, %c2
+   %copy_vector:_(<3 x s32>) = COPY %vector
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegZero, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectPos0) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float 0.0
+    %rhs:_(s32) = G_FCONSTANT float 0.0
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosZero, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectNeg0) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float -0.0
+    %rhs:_(s32) = G_FCONSTANT float -0.0
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegZero, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectPosOrNeg0) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float -0.0
+    %rhs:_(s32) = G_FCONSTANT float 0.0
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcZero, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectPosInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float 0x7FF0000000000000
+    %rhs:_(s32) = G_FCONSTANT float 0x7FF0000000000000
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosInf, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectNegInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float 0xFFF0000000000000
+    %rhs:_(s32) = G_FCONSTANT float 0xFFF0000000000000
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegInf, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectPosOrNegInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float 0x7FF0000000000000
+    %rhs:_(s32) = G_FCONSTANT float 0xFFF0000000000000
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectNNaN) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sel:_(s32) = nnan G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(~fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectNInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sel:_(s32) = ninf G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(~fcInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectNNaNNInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sel:_(s32) = nnan ninf G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(~(fcNan | fcInf), Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFNegNInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fneg:_(s32) = ninf G_FNEG %val
+    %copy_fneg:_(s32) = COPY %fneg
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(~fcInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFabsUnknown) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = G_FABS %val
+    %copy_fabs:_(s32) = COPY %fabs
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassVecFabsUnknown) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(<3 x s32>) = G_LOAD %ptr(p0) :: (load (<3 x s32>))
+    %fabs:_(<3 x s32>) = G_FABS %val
+    %copy_fabs:_(<3 x s32>) = COPY %fabs
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFnegFabs) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = G_FABS %val
+    %fneg:_(s32) = G_FNEG %fabs
+    %copy_fneg:_(s32) = COPY %fneg
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegative | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFnegFabsNInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = ninf G_FABS %val
+    %fneg:_(s32) = G_FNEG %fabs
+    %copy_fneg:_(s32) = COPY %fneg
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ((fcNegative & ~fcNegInf) | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFnegFabsNNan) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = nnan G_FABS %val
+    %fneg:_(s32) = G_FNEG %fabs
+    %copy_fneg:_(s32) = COPY %fneg
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegative, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCopySignNNanSrc0) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %mag:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sgn:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = nnan G_FABS %mag
+    %fcopysign:_(s32) = G_FCOPYSIGN %fabs, %sgn
+    %copy_fcopysign:_(s32) = COPY %fcopysign
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(~fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCopySignNInfSrc0_NegSign) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %mag:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sgn:_(s32) = G_FCONSTANT float -1.0
+    %fabs:_(s32) = ninf G_FLOG %mag
+    %fcopysign:_(s32) = G_FCOPYSIGN %fabs, %sgn
+    %copy_fcopysign:_(s32) = COPY %fcopysign
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegFinite | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCopySignNInfSrc0_PosSign) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %mag:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sgn:_(s32) = G_FCONSTANT float 1.0
+    %fabs:_(s32) = ninf G_FSQRT %mag
+    %fcopysign:_(s32) = G_FCOPYSIGN %fabs, %sgn
+    %copy_fcopysign:_(s32) = COPY %fcopysign
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassUIToFP) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %cast:_(s32) = G_UITOFP %val
+    %copy_cast:_(s32) = COPY %cast
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite & ~fcSubnormal, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSIToFP) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %cast:_(s32) = G_SITOFP %val
+    %copy_cast:_(s32) = COPY %cast
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcFinite & ~fcNegZero & ~fcSubnormal, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFAdd) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fadd:_(s32) = G_FADD %lhs, %rhs
+    %copy_fadd:_(s32) = COPY %fadd
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcAllFlags, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFMul) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fmul:_(s32) = G_FMUL %val, %val
+    %copy_fadd:_(s32) = COPY %fmul
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFMulZero) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_FCONSTANT float 0.0
+    %fabs:_(s32) = nnan ninf G_FABS %lhs
+    %fmul:_(s32) = G_FMUL %fabs, %rhs
+    %copy_fadd:_(s32) = COPY %fmul
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFLogNeg) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = nnan ninf G_FABS %val
+    %fneg:_(s32) = nnan ninf G_FNEG %fabs
+    %flog:_(s32) = G_FLOG %fneg
+    %copy_flog:_(s32) = COPY %flog
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcFinite | fcNan | fcNegInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFLogPosZero) {
+  StringRef MIRString = R"(
+    %val:_(s32) = G_FCONSTANT float 0.0
+    %flog:_(s32) = G_FLOG %val
+    %copy_flog:_(s32) = COPY %flog
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcFinite | fcNegInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFLogNegZero) {
+  StringRef MIRString = R"(
+    %val:_(s32) = G_FCONSTANT float -0.0
+    %flog:_(s32) = G_FLOG %val
+    %copy_flog:_(s32) = COPY %flog
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcFinite | fcNegInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCopy) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = G_FABS %val
+    %copy:_(s32) = COPY %fabs
+    %copy_copy:_(s32) = COPY %copy
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectIsFPClass) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_FCONSTANT float 0.0
+    %cond:_(s1) = G_IS_FPCLASS %lhs, 96
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs 
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcZero, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFLDExp) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %exp:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = G_FABS %val
+    %fldexp:_(s32) = G_FLDEXP %fabs, %exp
+    %copy_fldexp:_(s32) = COPY %fldexp
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFPowIEvenExp) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %pwr:_(s32) = G_CONSTANT i32 2
+    %fpowi:_(s32) = G_FPOWI %val, %pwr
+    %copy_fpowi:_(s32) = COPY %fpowi
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFPowIPos) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %pwr:_(s32) = G_CONSTANT i32 3
+    %fabs:_(s32) = nnan ninf G_FABS %val
+    %fpowi:_(s32) = G_FPOWI %fabs, %pwr
+    %copy_fpowi:_(s32) = COPY %fpowi
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFDiv) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fdiv:_(s32) = G_FDIV %val, %val
+    %copy_fdiv:_(s32) = COPY %fdiv
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosNormal | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFRem) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %frem:_(s32) = G_FREM %val, %val
+    %copy_frem:_(s32) = COPY %frem
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcZero | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassShuffleVec) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %vec:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
+    %fabs:_(<4 x s32>) = nnan ninf G_FABS %vec
+    %def:_(<4 x s32>) = G_IMPLICIT_DEF
+    %shuf:_(<4 x s32>) = G_SHUFFLE_VECTOR %fabs(<4 x s32>), %def, shufflemask(0, 0, 0, 0)
+    %copy_shuf:_(<4 x s32>) = COPY %shuf
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassBuildVec) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val1:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = nnan ninf G_FABS %val1
+    %val2:_(s32) = G_FCONSTANT float 3.0
+    %vec:_(<2 x s32>) = G_BUILD_VECTOR %fabs, %val2
+    %copy_vec:_(<2 x s32>) = COPY %vec
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassConcatVec) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %vec1:_(<2 x s32>) = G_LOAD %ptr(p0) :: (load (<2 x s32>))
+    %c1:_(s32) = G_FCONSTANT float 1.0
+    %c2:_(s32) = G_FCONSTANT float 2.0
+    %vec2:_(<2 x s32>) = G_BUILD_VECTOR %c1, %c2
+    %fabs1:_(<2 x s32>) = nnan ninf G_FABS %vec1
+    %fabs2:_(<2 x s32>) = nnan ninf G_FABS %vec2
+    %cat:_(<4 x s32>) = G_CONCAT_VECTORS %fabs1, %fabs2
+    %copy_cat:_(<4 x s32>) = COPY %cat
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassVecExtractElem) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %vec:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
+    %fabs:_(<4 x s32>) = nnan ninf G_FABS %vec
+    %idx:_(s64) = G_CONSTANT i64 1
+    %extract:_(s32) = G_EXTRACT_VECTOR_ELT %fabs, %idx
+    %copy_elem:_(s32) = COPY %extract
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassVecInsertElem) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %vec:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
+    %fabs1:_(<4 x s32>) = nnan ninf G_FABS %vec
+    %elem:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs2:_(s32) = nnan ninf G_FABS %elem
+    %idx:_(s64) = G_CONSTANT i64 1
+    %insert:_(<4 x s32>) = G_INSERT_VECTOR_ELT %fabs1, %fabs2, %idx
+    %copy_insert:_(<4 x s32>) = COPY %insert
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}

>From 98a2decd277f396219285d90d6a52850b00b079f Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Thu, 22 May 2025 17:36:04 +0000
Subject: [PATCH 3/3] handle G_FMINIMUMNUM and G_FMAXIMUMNUM

---
 .../CodeGen/GlobalISel/GISelValueTracking.cpp | 39 +++++++++++++------
 1 file changed, 27 insertions(+), 12 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
index 6d86c6f0837a6..67b1a449f8483 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
@@ -14,13 +14,13 @@
 #include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
 #include "llvm/ADT/APFloat.h"
 #include "llvm/ADT/FloatingPointMode.h"
-#include "llvm/CodeGen/GlobalISel/MachineFloatingPointPredicateUtils.h"
 #include "llvm/ADT/ScopeExit.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/Analysis/VectorUtils.h"
 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
+#include "llvm/CodeGen/GlobalISel/MachineFloatingPointPredicateUtils.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
 #include "llvm/CodeGen/LowLevelTypeUtils.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
@@ -957,7 +957,9 @@ void GISelValueTracking::computeKnownFPClass(Register R,
   case TargetOpcode::G_FMINNUM_IEEE:
   case TargetOpcode::G_FMAXIMUM:
   case TargetOpcode::G_FMINIMUM:
-  case TargetOpcode::G_FMAXNUM_IEEE: {
+  case TargetOpcode::G_FMAXNUM_IEEE:
+  case TargetOpcode::G_FMAXIMUMNUM:
+  case TargetOpcode::G_FMINIMUMNUM: {
     Register LHS = MI.getOperand(1).getReg();
     Register RHS = MI.getOperand(2).getReg();
     KnownFPClass KnownLHS, KnownRHS;
@@ -972,10 +974,14 @@ void GISelValueTracking::computeKnownFPClass(Register R,
 
     // If either operand is not NaN, the result is not NaN.
     if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
-                     Opcode == TargetOpcode::G_FMAXNUM))
+                     Opcode == TargetOpcode::G_FMAXNUM ||
+                     Opcode == TargetOpcode::G_FMINIMUMNUM ||
+                     Opcode == TargetOpcode::G_FMAXIMUMNUM))
       Known.knownNot(fcNan);
 
-    if (Opcode == TargetOpcode::G_FMAXNUM) {
+    if (Opcode == TargetOpcode::G_FMAXNUM ||
+        Opcode == TargetOpcode::G_FMAXIMUMNUM ||
+        Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
       // If at least one operand is known to be positive, the result must be
       // positive.
       if ((KnownLHS.cannotBeOrderedLessThanZero() &&
@@ -989,7 +995,9 @@ void GISelValueTracking::computeKnownFPClass(Register R,
       if (KnownLHS.cannotBeOrderedLessThanZero() ||
           KnownRHS.cannotBeOrderedLessThanZero())
         Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
-    } else if (Opcode == TargetOpcode::G_FMINNUM) {
+    } else if (Opcode == TargetOpcode::G_FMINNUM ||
+               Opcode == TargetOpcode::G_FMINIMUMNUM ||
+               Opcode == TargetOpcode::G_FMINNUM_IEEE) {
       // If at least one operand is known to be negative, the result must be
       // negative.
       if ((KnownLHS.cannotBeOrderedGreaterThanZero() &&
@@ -997,16 +1005,14 @@ void GISelValueTracking::computeKnownFPClass(Register R,
           (KnownRHS.cannotBeOrderedGreaterThanZero() &&
            KnownRHS.isKnownNeverNaN()))
         Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
-    } else if (Opcode == TargetOpcode::G_FMINNUM_IEEE) {
-      // TODO:
-    } else if (Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
-      // TODO:
-    } else {
+    } else if (Opcode == TargetOpcode::G_FMINIMUM) {
       // If at least one operand is known to be negative, the result must be
       // negative.
       if (KnownLHS.cannotBeOrderedGreaterThanZero() ||
           KnownRHS.cannotBeOrderedGreaterThanZero())
         Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
+    } else {
+      llvm_unreachable("unhandled intrinsic");
     }
 
     // Fixup zero handling if denormals could be returned as a zero.
@@ -1032,16 +1038,25 @@ void GISelValueTracking::computeKnownFPClass(Register R,
           Known.signBitMustBeZero();
       } else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
                   Opcode == TargetOpcode::G_FMINIMUM) ||
+                 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
+                 Opcode == TargetOpcode::G_FMINIMUMNUM ||
+                 Opcode == TargetOpcode::G_FMAXNUM_IEEE ||
+                 Opcode == TargetOpcode::G_FMINNUM_IEEE ||
+                 // FIXME: Should be using logical zero versions
                  ((KnownLHS.isKnownNeverNegZero() ||
                    KnownRHS.isKnownNeverPosZero()) &&
                   (KnownLHS.isKnownNeverPosZero() ||
                    KnownRHS.isKnownNeverNegZero()))) {
         if ((Opcode == TargetOpcode::G_FMAXIMUM ||
-             Opcode == TargetOpcode::G_FMAXNUM) &&
+             Opcode == TargetOpcode::G_FMAXNUM ||
+             Opcode == TargetOpcode::G_FMAXIMUMNUM ||
+             Opcode == TargetOpcode::G_FMAXNUM_IEEE) &&
             (KnownLHS.SignBit == false || KnownRHS.SignBit == false))
           Known.signBitMustBeZero();
         else if ((Opcode == TargetOpcode::G_FMINIMUM ||
-                  Opcode == TargetOpcode::G_FMINNUM) &&
+                  Opcode == TargetOpcode::G_FMINNUM ||
+                  Opcode == TargetOpcode::G_FMINIMUMNUM ||
+                  Opcode == TargetOpcode::G_FMINNUM_IEEE) &&
                  (KnownLHS.SignBit == true || KnownRHS.SignBit == true))
           Known.signBitMustBeOne();
       }



More information about the llvm-commits mailing list