[llvm] [GISel] Add KnownFPClass Analysis to GISelValueTrackingPass (PR #134611)

Tim Gymnich via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 7 08:21:41 PDT 2025


https://github.com/tgymnich updated https://github.com/llvm/llvm-project/pull/134611

>From 0245bc8db34534140d95fae989d280fd453bd87c Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Fri, 21 Mar 2025 16:52:17 +0000
Subject: [PATCH 1/6] [GlobalISel] Add computeFPClass to GlobaISelValueTracking

---
 .../CodeGen/GlobalISel/GISelValueTracking.h   |   89 +
 .../llvm/CodeGen/GlobalISel/MIPatternMatch.h  |   35 +
 llvm/include/llvm/CodeGen/GlobalISel/Utils.h  |   35 +
 llvm/include/llvm/CodeGen/TargetLowering.h    |    8 +
 llvm/lib/Analysis/ValueTracking.cpp           |    1 +
 .../CodeGen/GlobalISel/GISelValueTracking.cpp | 1468 ++++++++++++++++-
 llvm/lib/CodeGen/GlobalISel/Utils.cpp         |   23 +
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |    7 +
 8 files changed, 1662 insertions(+), 4 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GISelValueTracking.h b/llvm/include/llvm/CodeGen/GlobalISel/GISelValueTracking.h
index aa99bf321d2b1..1ae3b173d95ce 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GISelValueTracking.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GISelValueTracking.h
@@ -14,12 +14,15 @@
 #ifndef LLVM_CODEGEN_GLOBALISEL_GISELVALUETRACKING_H
 #define LLVM_CODEGEN_GLOBALISEL_GISELVALUETRACKING_H
 
+#include "llvm/ADT/APFloat.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/CodeGen/Register.h"
+#include "llvm/IR/InstrTypes.h"
 #include "llvm/InitializePasses.h"
 #include "llvm/Support/KnownBits.h"
+#include "llvm/Support/KnownFPClass.h"
 
 namespace llvm {
 
@@ -41,6 +44,64 @@ class GISelValueTracking : public GISelChangeObserver {
   unsigned computeNumSignBitsMin(Register Src0, Register Src1,
                                  const APInt &DemandedElts, unsigned Depth = 0);
 
+  /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
+  /// same result as an fcmp with the given operands.
+  ///
+  /// If \p LookThroughSrc is true, consider the input value when computing the
+  /// mask.
+  ///
+  /// If \p LookThroughSrc is false, ignore the source value (i.e. the first
+  /// pair element will always be LHS.
+  std::pair<Register, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
+                                                   const MachineFunction &MF,
+                                                   Register LHS, Value *RHS,
+                                                   bool LookThroughSrc = true);
+  std::pair<Register, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
+                                                   const MachineFunction &MF,
+                                                   Register LHS,
+                                                   const APFloat *ConstRHS,
+                                                   bool LookThroughSrc = true);
+
+  /// Compute the possible floating-point classes that \p LHS could be based on
+  /// fcmp \Pred \p LHS, \p RHS.
+  ///
+  /// \returns { TestedValue, ClassesIfTrue, ClassesIfFalse }
+  ///
+  /// If the compare returns an exact class test, ClassesIfTrue ==
+  /// ~ClassesIfFalse
+  ///
+  /// This is a less exact version of fcmpToClassTest (e.g. fcmpToClassTest will
+  /// only succeed for a test of x > 0 implies positive, but not x > 1).
+  ///
+  /// If \p LookThroughSrc is true, consider the input value when computing the
+  /// mask. This may look through sign bit operations.
+  ///
+  /// If \p LookThroughSrc is false, ignore the source value (i.e. the first
+  /// pair element will always be LHS.
+  ///
+  std::tuple<Register, FPClassTest, FPClassTest>
+  fcmpImpliesClass(CmpInst::Predicate Pred, const MachineFunction &MF,
+                   Register LHS, Register RHS, bool LookThroughSrc = true);
+  std::tuple<Register, FPClassTest, FPClassTest>
+  fcmpImpliesClass(CmpInst::Predicate Pred, const MachineFunction &MF,
+                   Register LHS, FPClassTest RHS, bool LookThroughSrc = true);
+  std::tuple<Register, FPClassTest, FPClassTest>
+  fcmpImpliesClass(CmpInst::Predicate Pred, const MachineFunction &MF,
+                   Register LHS, const APFloat &RHS,
+                   bool LookThroughSrc = true);
+
+  void computeKnownFPClass(Register R, KnownFPClass &Known,
+                           FPClassTest InterestedClasses, unsigned Depth);
+
+  void computeKnownFPClassForFPTrunc(const MachineInstr &MI,
+                                     const APInt &DemandedElts,
+                                     FPClassTest InterestedClasses,
+                                     KnownFPClass &Known, unsigned Depth);
+
+  void computeKnownFPClass(Register R, const APInt &DemandedElts,
+                           FPClassTest InterestedClasses, KnownFPClass &Known,
+                           unsigned Depth);
+
 public:
   GISelValueTracking(MachineFunction &MF, unsigned MaxDepth = 6);
   virtual ~GISelValueTracking() = default;
@@ -86,6 +147,34 @@ class GISelValueTracking : public GISelChangeObserver {
   /// \return The known alignment for the pointer-like value \p R.
   Align computeKnownAlignment(Register R, unsigned Depth = 0);
 
+  /// Determine which floating-point classes are valid for \p V, and return them
+  /// in KnownFPClass bit sets.
+  ///
+  /// This function is defined on values with floating-point type, values
+  /// vectors of floating-point type, and arrays of floating-point type.
+
+  /// \p InterestedClasses is a compile time optimization hint for which
+  /// floating point classes should be queried. Queries not specified in \p
+  /// InterestedClasses should be reliable if they are determined during the
+  /// query.
+  KnownFPClass computeKnownFPClass(Register R, const APInt &DemandedElts,
+                                   FPClassTest InterestedClasses,
+                                   unsigned Depth);
+
+  KnownFPClass computeKnownFPClass(Register R,
+                                   FPClassTest InterestedClasses = fcAllFlags,
+                                   unsigned Depth = 0);
+
+  /// Wrapper to account for known fast math flags at the use instruction.
+  KnownFPClass computeKnownFPClass(Register R, const APInt &DemandedElts,
+                                   uint32_t Flags,
+                                   FPClassTest InterestedClasses,
+                                   unsigned Depth);
+
+  KnownFPClass computeKnownFPClass(Register R, uint32_t Flags,
+                                   FPClassTest InterestedClasses,
+                                   unsigned Depth);
+
   // Observer API. No-op for non-caching implementation.
   void erasingInstr(MachineInstr &MI) override {}
   void createdInstr(MachineInstr &MI) override {}
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index 72483fbea5805..5387a88f385c1 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -14,6 +14,7 @@
 #define LLVM_CODEGEN_GLOBALISEL_MIPATTERNMATCH_H
 
 #include "llvm/ADT/APInt.h"
+#include "llvm/ADT/FloatingPointMode.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/IR/InstrTypes.h"
@@ -393,6 +394,7 @@ inline bind_ty<const MachineInstr *> m_MInstr(const MachineInstr *&MI) {
 inline bind_ty<LLT> m_Type(LLT &Ty) { return Ty; }
 inline bind_ty<CmpInst::Predicate> m_Pred(CmpInst::Predicate &P) { return P; }
 inline operand_type_match m_Pred() { return operand_type_match(); }
+inline bind_ty<FPClassTest> m_FPClassTest(FPClassTest &T) { return T; }
 
 template <typename BindTy> struct deferred_helper {
   static bool match(const MachineRegisterInfo &MRI, BindTy &VR, BindTy &V) {
@@ -762,6 +764,32 @@ struct CompareOp_match {
   }
 };
 
+template <typename LHS_P, typename Test_P, unsigned Opcode>
+struct ClassifyOp_match {
+  LHS_P L;
+  Test_P T;
+
+  ClassifyOp_match(const LHS_P &LHS, const Test_P &Tst) : L(LHS), T(Tst) {}
+
+  template <typename OpTy>
+  bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
+    MachineInstr *TmpMI;
+    if (!mi_match(Op, MRI, m_MInstr(TmpMI)) || TmpMI->getOpcode() != Opcode)
+      return false;
+
+    Register LHS = TmpMI->getOperand(1).getReg();
+    if (!L.match(MRI, LHS))
+      return false;
+
+    FPClassTest TmpClass =
+        static_cast<FPClassTest>(TmpMI->getOperand(2).getImm());
+    if (T.match(MRI, TmpClass))
+      return true;
+
+    return false;
+  }
+};
+
 template <typename Pred, typename LHS, typename RHS>
 inline CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_ICMP>
 m_GICmp(const Pred &P, const LHS &L, const RHS &R) {
@@ -804,6 +832,13 @@ m_c_GFCmp(const Pred &P, const LHS &L, const RHS &R) {
   return CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_FCMP, true>(P, L, R);
 }
 
+/// Matches a floating point class test
+template <typename LHS, typename Test>
+inline ClassifyOp_match<LHS, Test, TargetOpcode::G_IS_FPCLASS>
+m_GIsFPClass(const LHS &L, const Test &T) {
+  return ClassifyOp_match<LHS, Test, TargetOpcode::G_IS_FPCLASS>(L, T);
+}
+
 // Helper for checking if a Reg is of specific type.
 struct CheckType {
   LLT Ty;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index 44141844f42f4..f6101d5d589d2 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -183,6 +183,10 @@ std::optional<APInt> getIConstantVRegVal(Register VReg,
 std::optional<int64_t> getIConstantVRegSExtVal(Register VReg,
                                                const MachineRegisterInfo &MRI);
 
+/// If \p VReg is defined by a G_CONSTANT fits in uint64_t returns it.
+std::optional<uint64_t> getIConstantVRegZExtVal(Register VReg,
+                                                const MachineRegisterInfo &MRI);
+
 /// \p VReg is defined by a G_CONSTANT, return the corresponding value.
 const APInt &getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI);
 
@@ -438,6 +442,17 @@ std::optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
 std::optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
                                                 const MachineRegisterInfo &MRI);
 
+/// \returns the scalar sign extended integral splat value of \p Reg if
+/// possible.
+std::optional<uint64_t>
+getIConstantSplatZExtVal(const Register Reg, const MachineRegisterInfo &MRI);
+
+/// \returns the scalar sign extended integral splat value defined by \p MI if
+/// possible.
+std::optional<uint64_t>
+getIConstantSplatZExtVal(const MachineInstr &MI,
+                         const MachineRegisterInfo &MRI);
+
 /// Returns a floating point scalar constant of a build vector splat if it
 /// exists. When \p AllowUndef == true some elements can be undef but not all.
 std::optional<FPValueAndVReg> getFConstantSplat(Register VReg,
@@ -654,6 +669,9 @@ class GIConstant {
 /// }
 /// provides low-level access.
 class GFConstant {
+  using VecTy = SmallVector<APFloat>;
+  using const_iterator = VecTy::const_iterator;
+
 public:
   enum class GFConstantKind { Scalar, FixedVector, ScalableVector };
 
@@ -671,6 +689,23 @@ class GFConstant {
   /// Returns the kind of of this constant, e.g, Scalar.
   GFConstantKind getKind() const { return Kind; }
 
+  const_iterator begin() const {
+    assert(Kind != GFConstantKind::ScalableVector &&
+           "Expected fixed vector or scalar constant");
+    return Values.begin();
+  }
+
+  const_iterator end() const {
+    assert(Kind != GFConstantKind::ScalableVector &&
+           "Expected fixed vector or scalar constant");
+    return Values.end();
+  }
+
+  size_t size() const {
+    assert(Kind == GFConstantKind::FixedVector && "Expected fixed vector");
+    return Values.size();
+  }
+
   /// Returns the value, if this constant is a scalar.
   APFloat getScalarValue() const;
 
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 16066226f1896..f339344704f34 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -51,6 +51,7 @@
 #include "llvm/Support/AtomicOrdering.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownFPClass.h"
 #include <algorithm>
 #include <cassert>
 #include <climits>
@@ -4165,6 +4166,13 @@ class TargetLowering : public TargetLoweringBase {
                                               const MachineRegisterInfo &MRI,
                                               unsigned Depth = 0) const;
 
+  virtual void computeKnownFPClassForTargetInstr(GISelValueTracking &Analysis,
+                                                 Register R,
+                                                 KnownFPClass &Known,
+                                                 const APInt &DemandedElts,
+                                                 const MachineRegisterInfo &MRI,
+                                                 unsigned Depth = 0) const;
+
   /// Determine the known alignment for the pointer value \p R. This is can
   /// typically be inferred from the number of low known 0 bits. However, for a
   /// pointer with a non-integral address space, the alignment value may be
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 3b0249f91d6d7..3d9c8afed0de0 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -15,6 +15,7 @@
 #include "llvm/ADT/APFloat.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FloatingPointMode.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/ScopeExit.h"
 #include "llvm/ADT/SmallPtrSet.h"
diff --git a/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
index 12fe28b29e5c8..9428d1d1babf6 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
@@ -12,20 +12,34 @@
 //
 //===----------------------------------------------------------------------===//
 #include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/ScopeExit.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/VectorUtils.h"
 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
+#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/LowLevelTypeUtils.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineOperand.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Register.h"
 #include "llvm/CodeGen/TargetLowering.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/FMF.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Support/KnownFPClass.h"
 #include "llvm/Target/TargetMachine.h"
 
 #define DEBUG_TYPE "gisel-known-bits"
 
 using namespace llvm;
+using namespace MIPatternMatch;
 
 char llvm::GISelValueTrackingAnalysis::ID = 0;
 
@@ -100,8 +114,9 @@ APInt GISelValueTracking::getKnownOnes(Register R) {
   return getKnownBits(R).One;
 }
 
-LLVM_ATTRIBUTE_UNUSED static void
-dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth) {
+LLVM_ATTRIBUTE_UNUSED static void dumpKnownBitsResult(const MachineInstr &MI,
+                                                      const KnownBits &Known,
+                                                      unsigned Depth) {
   dbgs() << "[" << Depth << "] Compute known bits: " << MI << "[" << Depth
          << "] Computed for: " << MI << "[" << Depth << "] Known: 0x"
          << toString(Known.Zero | Known.One, 16, false) << "\n"
@@ -111,6 +126,15 @@ dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth) {
          << "\n";
 }
 
+LLVM_ATTRIBUTE_UNUSED static void
+dumpKnownFPClassResult(const MachineInstr &MI, const KnownFPClass &Known,
+                       unsigned Depth) {
+  dbgs() << "[" << Depth << "] Compute known FP class: " << MI << "[" << Depth
+         << "] Computed for: " << MI << "[" << Depth
+         << "] KnownFPClasses: " << Known.KnownFPClasses << "\n"
+         << "[" << Depth << "] SignBit: " << Known.SignBit << "\n";
+}
+
 /// Compute known bits for the intersection of \p Src0 and \p Src1
 void GISelValueTracking::computeKnownBitsMin(Register Src0, Register Src1,
                                              KnownBits &Known,
@@ -175,7 +199,7 @@ void GISelValueTracking::computeKnownBitsImpl(Register R, KnownBits &Known,
   if (CacheEntry != ComputeKnownBitsCache.end()) {
     Known = CacheEntry->second;
     LLVM_DEBUG(dbgs() << "Cache hit at ");
-    LLVM_DEBUG(dumpResult(MI, Known, Depth));
+    LLVM_DEBUG(dumpKnownBitsResult(MI, Known, Depth));
     assert(Known.getBitWidth() == BitWidth && "Cache entry size doesn't match");
     return;
   }
@@ -631,12 +655,1448 @@ void GISelValueTracking::computeKnownBitsImpl(Register R, KnownBits &Known,
   }
   }
 
-  LLVM_DEBUG(dumpResult(MI, Known, Depth));
+  LLVM_DEBUG(dumpKnownBitsResult(MI, Known, Depth));
 
   // Update the cache.
   ComputeKnownBitsCache[R] = Known;
 }
 
+/// Return true if it's possible to assume IEEE treatment of input denormals in
+/// \p MF for \p Ty.
+static bool inputDenormalIsIEEE(const MachineFunction &MF, LLT Ty) {
+  Ty = Ty.getScalarType();
+  return MF.getDenormalMode(getFltSemanticForLLT(Ty)).Input ==
+         DenormalMode::IEEE;
+}
+
+static bool outputDenormalIsIEEEOrPosZero(const MachineFunction &MF, LLT Ty) {
+  Ty = Ty.getScalarType();
+  DenormalMode Mode = MF.getDenormalMode(getFltSemanticForLLT(Ty));
+  return Mode.Output == DenormalMode::IEEE ||
+         Mode.Output == DenormalMode::PositiveZero;
+}
+
+std::pair<Register, FPClassTest> GISelValueTracking::fcmpToClassTest(
+    FCmpInst::Predicate Pred, const MachineFunction &MF, Register LHS,
+    const APFloat *ConstRHS, bool LookThroughSrc) {
+
+  auto [Src, ClassIfTrue, ClassIfFalse] =
+      fcmpImpliesClass(Pred, MF, LHS, *ConstRHS, LookThroughSrc);
+  if (Src && ClassIfTrue == ~ClassIfFalse)
+    return {Src, ClassIfTrue};
+
+  return {Register(), fcAllFlags};
+}
+
+/// Return the return value for fcmpImpliesClass for a compare that produces an
+/// exact class test.
+static std::tuple<Register, FPClassTest, FPClassTest>
+exactClass(Register V, FPClassTest M) {
+  return {V, M, ~M};
+}
+
+std::tuple<Register, FPClassTest, FPClassTest>
+GISelValueTracking::fcmpImpliesClass(CmpInst::Predicate Pred,
+                                     const MachineFunction &MF, Register LHS,
+                                     FPClassTest RHSClass,
+                                     bool LookThroughSrc) {
+  assert(RHSClass != fcNone);
+  Register Src = LHS;
+
+  if (Pred == FCmpInst::FCMP_TRUE)
+    return exactClass(Src, fcAllFlags);
+
+  if (Pred == FCmpInst::FCMP_FALSE)
+    return exactClass(Src, fcNone);
+
+  const FPClassTest OrigClass = RHSClass;
+
+  const bool IsNegativeRHS = (RHSClass & fcNegative) == RHSClass;
+  const bool IsPositiveRHS = (RHSClass & fcPositive) == RHSClass;
+  const bool IsNaN = (RHSClass & ~fcNan) == fcNone;
+
+  if (IsNaN) {
+    // fcmp o__ x, nan -> false
+    // fcmp u__ x, nan -> true
+    return exactClass(Src, CmpInst::isOrdered(Pred) ? fcNone : fcAllFlags);
+  }
+
+  // fcmp ord x, zero|normal|subnormal|inf -> ~fcNan
+  if (Pred == FCmpInst::FCMP_ORD)
+    return exactClass(Src, ~fcNan);
+
+  // fcmp uno x, zero|normal|subnormal|inf -> fcNan
+  if (Pred == FCmpInst::FCMP_UNO)
+    return exactClass(Src, fcNan);
+
+  const bool IsFabs = LookThroughSrc && mi_match(LHS, MRI, m_GFabs(m_Reg(Src)));
+  if (IsFabs)
+    RHSClass = llvm::inverse_fabs(RHSClass);
+
+  const bool IsZero = (OrigClass & fcZero) == OrigClass;
+  if (IsZero) {
+    assert(Pred != FCmpInst::FCMP_ORD && Pred != FCmpInst::FCMP_UNO);
+    // Compares with fcNone are only exactly equal to fcZero if input denormals
+    // are not flushed.
+    // TODO: Handle DAZ by expanding masks to cover subnormal cases.
+    if (!inputDenormalIsIEEE(MF, MRI.getType(LHS)))
+      return {Register(), fcAllFlags, fcAllFlags};
+
+    switch (Pred) {
+    case FCmpInst::FCMP_OEQ: // Match x == 0.0
+      return exactClass(Src, fcZero);
+    case FCmpInst::FCMP_UEQ: // Match isnan(x) || (x == 0.0)
+      return exactClass(Src, fcZero | fcNan);
+    case FCmpInst::FCMP_UNE: // Match (x != 0.0)
+      return exactClass(Src, ~fcZero);
+    case FCmpInst::FCMP_ONE: // Match !isnan(x) && x != 0.0
+      return exactClass(Src, ~fcNan & ~fcZero);
+    case FCmpInst::FCMP_ORD:
+      // Canonical form of ord/uno is with a zero. We could also handle
+      // non-canonical other non-NaN constants or LHS == RHS.
+      return exactClass(Src, ~fcNan);
+    case FCmpInst::FCMP_UNO:
+      return exactClass(Src, fcNan);
+    case FCmpInst::FCMP_OGT: // x > 0
+      return exactClass(Src, fcPosSubnormal | fcPosNormal | fcPosInf);
+    case FCmpInst::FCMP_UGT: // isnan(x) || x > 0
+      return exactClass(Src, fcPosSubnormal | fcPosNormal | fcPosInf | fcNan);
+    case FCmpInst::FCMP_OGE: // x >= 0
+      return exactClass(Src, fcPositive | fcNegZero);
+    case FCmpInst::FCMP_UGE: // isnan(x) || x >= 0
+      return exactClass(Src, fcPositive | fcNegZero | fcNan);
+    case FCmpInst::FCMP_OLT: // x < 0
+      return exactClass(Src, fcNegSubnormal | fcNegNormal | fcNegInf);
+    case FCmpInst::FCMP_ULT: // isnan(x) || x < 0
+      return exactClass(Src, fcNegSubnormal | fcNegNormal | fcNegInf | fcNan);
+    case FCmpInst::FCMP_OLE: // x <= 0
+      return exactClass(Src, fcNegative | fcPosZero);
+    case FCmpInst::FCMP_ULE: // isnan(x) || x <= 0
+      return exactClass(Src, fcNegative | fcPosZero | fcNan);
+    default:
+      llvm_unreachable("all compare types are handled");
+    }
+
+    return {Register(), fcAllFlags, fcAllFlags};
+  }
+
+  const bool IsDenormalRHS = (OrigClass & fcSubnormal) == OrigClass;
+
+  const bool IsInf = (OrigClass & fcInf) == OrigClass;
+  if (IsInf) {
+    FPClassTest Mask = fcAllFlags;
+
+    switch (Pred) {
+    case FCmpInst::FCMP_OEQ:
+    case FCmpInst::FCMP_UNE: {
+      // Match __builtin_isinf patterns
+      //
+      //   fcmp oeq x, +inf -> is_fpclass x, fcPosInf
+      //   fcmp oeq fabs(x), +inf -> is_fpclass x, fcInf
+      //   fcmp oeq x, -inf -> is_fpclass x, fcNegInf
+      //   fcmp oeq fabs(x), -inf -> is_fpclass x, 0 -> false
+      //
+      //   fcmp une x, +inf -> is_fpclass x, ~fcPosInf
+      //   fcmp une fabs(x), +inf -> is_fpclass x, ~fcInf
+      //   fcmp une x, -inf -> is_fpclass x, ~fcNegInf
+      //   fcmp une fabs(x), -inf -> is_fpclass x, fcAllFlags -> true
+      if (IsNegativeRHS) {
+        Mask = fcNegInf;
+        if (IsFabs)
+          Mask = fcNone;
+      } else {
+        Mask = fcPosInf;
+        if (IsFabs)
+          Mask |= fcNegInf;
+      }
+      break;
+    }
+    case FCmpInst::FCMP_ONE:
+    case FCmpInst::FCMP_UEQ: {
+      // Match __builtin_isinf patterns
+      //   fcmp one x, -inf -> is_fpclass x, fcNegInf
+      //   fcmp one fabs(x), -inf -> is_fpclass x, ~fcNegInf & ~fcNan
+      //   fcmp one x, +inf -> is_fpclass x, ~fcNegInf & ~fcNan
+      //   fcmp one fabs(x), +inf -> is_fpclass x, ~fcInf & fcNan
+      //
+      //   fcmp ueq x, +inf -> is_fpclass x, fcPosInf|fcNan
+      //   fcmp ueq (fabs x), +inf -> is_fpclass x, fcInf|fcNan
+      //   fcmp ueq x, -inf -> is_fpclass x, fcNegInf|fcNan
+      //   fcmp ueq fabs(x), -inf -> is_fpclass x, fcNan
+      if (IsNegativeRHS) {
+        Mask = ~fcNegInf & ~fcNan;
+        if (IsFabs)
+          Mask = ~fcNan;
+      } else {
+        Mask = ~fcPosInf & ~fcNan;
+        if (IsFabs)
+          Mask &= ~fcNegInf;
+      }
+
+      break;
+    }
+    case FCmpInst::FCMP_OLT:
+    case FCmpInst::FCMP_UGE: {
+      if (IsNegativeRHS) {
+        // No value is ordered and less than negative infinity.
+        // All values are unordered with or at least negative infinity.
+        // fcmp olt x, -inf -> false
+        // fcmp uge x, -inf -> true
+        Mask = fcNone;
+        break;
+      }
+
+      // fcmp olt fabs(x), +inf -> fcFinite
+      // fcmp uge fabs(x), +inf -> ~fcFinite
+      // fcmp olt x, +inf -> fcFinite|fcNegInf
+      // fcmp uge x, +inf -> ~(fcFinite|fcNegInf)
+      Mask = fcFinite;
+      if (!IsFabs)
+        Mask |= fcNegInf;
+      break;
+    }
+    case FCmpInst::FCMP_OGE:
+    case FCmpInst::FCMP_ULT: {
+      if (IsNegativeRHS) {
+        // fcmp oge x, -inf -> ~fcNan
+        // fcmp oge fabs(x), -inf -> ~fcNan
+        // fcmp ult x, -inf -> fcNan
+        // fcmp ult fabs(x), -inf -> fcNan
+        Mask = ~fcNan;
+        break;
+      }
+
+      // fcmp oge fabs(x), +inf -> fcInf
+      // fcmp oge x, +inf -> fcPosInf
+      // fcmp ult fabs(x), +inf -> ~fcInf
+      // fcmp ult x, +inf -> ~fcPosInf
+      Mask = fcPosInf;
+      if (IsFabs)
+        Mask |= fcNegInf;
+      break;
+    }
+    case FCmpInst::FCMP_OGT:
+    case FCmpInst::FCMP_ULE: {
+      if (IsNegativeRHS) {
+        // fcmp ogt x, -inf -> fcmp one x, -inf
+        // fcmp ogt fabs(x), -inf -> fcmp ord x, x
+        // fcmp ule x, -inf -> fcmp ueq x, -inf
+        // fcmp ule fabs(x), -inf -> fcmp uno x, x
+        Mask = IsFabs ? ~fcNan : ~(fcNegInf | fcNan);
+        break;
+      }
+
+      // No value is ordered and greater than infinity.
+      Mask = fcNone;
+      break;
+    }
+    case FCmpInst::FCMP_OLE:
+    case FCmpInst::FCMP_UGT: {
+      if (IsNegativeRHS) {
+        Mask = IsFabs ? fcNone : fcNegInf;
+        break;
+      }
+
+      // fcmp ole x, +inf -> fcmp ord x, x
+      // fcmp ole fabs(x), +inf -> fcmp ord x, x
+      // fcmp ole x, -inf -> fcmp oeq x, -inf
+      // fcmp ole fabs(x), -inf -> false
+      Mask = ~fcNan;
+      break;
+    }
+    default:
+      llvm_unreachable("all compare types are handled");
+    }
+
+    // Invert the comparison for the unordered cases.
+    if (FCmpInst::isUnordered(Pred))
+      Mask = ~Mask;
+
+    return exactClass(Src, Mask);
+  }
+
+  if (Pred == FCmpInst::FCMP_OEQ)
+    return {Src, RHSClass, fcAllFlags};
+
+  if (Pred == FCmpInst::FCMP_UEQ) {
+    FPClassTest Class = RHSClass | fcNan;
+    return {Src, Class, ~fcNan};
+  }
+
+  if (Pred == FCmpInst::FCMP_ONE)
+    return {Src, ~fcNan, RHSClass | fcNan};
+
+  if (Pred == FCmpInst::FCMP_UNE)
+    return {Src, fcAllFlags, RHSClass};
+
+  assert((RHSClass == fcNone || RHSClass == fcPosNormal ||
+          RHSClass == fcNegNormal || RHSClass == fcNormal ||
+          RHSClass == fcPosSubnormal || RHSClass == fcNegSubnormal ||
+          RHSClass == fcSubnormal) &&
+         "should have been recognized as an exact class test");
+
+  if (IsNegativeRHS) {
+    // TODO: Handle fneg(fabs)
+    if (IsFabs) {
+      // fabs(x) o> -k -> fcmp ord x, x
+      // fabs(x) u> -k -> true
+      // fabs(x) o< -k -> false
+      // fabs(x) u< -k -> fcmp uno x, x
+      switch (Pred) {
+      case FCmpInst::FCMP_OGT:
+      case FCmpInst::FCMP_OGE:
+        return {Src, ~fcNan, fcNan};
+      case FCmpInst::FCMP_UGT:
+      case FCmpInst::FCMP_UGE:
+        return {Src, fcAllFlags, fcNone};
+      case FCmpInst::FCMP_OLT:
+      case FCmpInst::FCMP_OLE:
+        return {Src, fcNone, fcAllFlags};
+      case FCmpInst::FCMP_ULT:
+      case FCmpInst::FCMP_ULE:
+        return {Src, fcNan, ~fcNan};
+      default:
+        break;
+      }
+
+      return {Register(), fcAllFlags, fcAllFlags};
+    }
+
+    FPClassTest ClassesLE = fcNegInf | fcNegNormal;
+    FPClassTest ClassesGE = fcPositive | fcNegZero | fcNegSubnormal;
+
+    if (IsDenormalRHS)
+      ClassesLE |= fcNegSubnormal;
+    else
+      ClassesGE |= fcNegNormal;
+
+    switch (Pred) {
+    case FCmpInst::FCMP_OGT:
+    case FCmpInst::FCMP_OGE:
+      return {Src, ClassesGE, ~ClassesGE | RHSClass};
+    case FCmpInst::FCMP_UGT:
+    case FCmpInst::FCMP_UGE:
+      return {Src, ClassesGE | fcNan, ~(ClassesGE | fcNan) | RHSClass};
+    case FCmpInst::FCMP_OLT:
+    case FCmpInst::FCMP_OLE:
+      return {Src, ClassesLE, ~ClassesLE | RHSClass};
+    case FCmpInst::FCMP_ULT:
+    case FCmpInst::FCMP_ULE:
+      return {Src, ClassesLE | fcNan, ~(ClassesLE | fcNan) | RHSClass};
+    default:
+      break;
+    }
+  } else if (IsPositiveRHS) {
+    FPClassTest ClassesGE = fcPosNormal | fcPosInf;
+    FPClassTest ClassesLE = fcNegative | fcPosZero | fcPosSubnormal;
+    if (IsDenormalRHS)
+      ClassesGE |= fcPosSubnormal;
+    else
+      ClassesLE |= fcPosNormal;
+
+    if (IsFabs) {
+      ClassesGE = llvm::inverse_fabs(ClassesGE);
+      ClassesLE = llvm::inverse_fabs(ClassesLE);
+    }
+
+    switch (Pred) {
+    case FCmpInst::FCMP_OGT:
+    case FCmpInst::FCMP_OGE:
+      return {Src, ClassesGE, ~ClassesGE | RHSClass};
+    case FCmpInst::FCMP_UGT:
+    case FCmpInst::FCMP_UGE:
+      return {Src, ClassesGE | fcNan, ~(ClassesGE | fcNan) | RHSClass};
+    case FCmpInst::FCMP_OLT:
+    case FCmpInst::FCMP_OLE:
+      return {Src, ClassesLE, ~ClassesLE | RHSClass};
+    case FCmpInst::FCMP_ULT:
+    case FCmpInst::FCMP_ULE:
+      return {Src, ClassesLE | fcNan, ~(ClassesLE | fcNan) | RHSClass};
+    default:
+      break;
+    }
+  }
+
+  return {Register(), fcAllFlags, fcAllFlags};
+}
+
+std::tuple<Register, FPClassTest, FPClassTest>
+GISelValueTracking::fcmpImpliesClass(CmpInst::Predicate Pred,
+                                     const MachineFunction &MF, Register LHS,
+                                     const APFloat &ConstRHS,
+                                     bool LookThroughSrc) {
+  // We can refine checks against smallest normal / largest denormal to an
+  // exact class test.
+  if (!ConstRHS.isNegative() && ConstRHS.isSmallestNormalized()) {
+    Register Src = LHS;
+    const bool IsFabs =
+        LookThroughSrc && mi_match(LHS, MRI, m_GFabs(m_Reg(Src)));
+
+    FPClassTest Mask;
+    // Match pattern that's used in __builtin_isnormal.
+    switch (Pred) {
+    case FCmpInst::FCMP_OLT:
+    case FCmpInst::FCMP_UGE: {
+      // fcmp olt x, smallest_normal -> fcNegInf|fcNegNormal|fcSubnormal|fcZero
+      // fcmp olt fabs(x), smallest_normal -> fcSubnormal|fcZero
+      // fcmp uge x, smallest_normal -> fcNan|fcPosNormal|fcPosInf
+      // fcmp uge fabs(x), smallest_normal -> ~(fcSubnormal|fcZero)
+      Mask = fcZero | fcSubnormal;
+      if (!IsFabs)
+        Mask |= fcNegNormal | fcNegInf;
+
+      break;
+    }
+    case FCmpInst::FCMP_OGE:
+    case FCmpInst::FCMP_ULT: {
+      // fcmp oge x, smallest_normal -> fcPosNormal | fcPosInf
+      // fcmp oge fabs(x), smallest_normal -> fcInf | fcNormal
+      // fcmp ult x, smallest_normal -> ~(fcPosNormal | fcPosInf)
+      // fcmp ult fabs(x), smallest_normal -> ~(fcInf | fcNormal)
+      Mask = fcPosInf | fcPosNormal;
+      if (IsFabs)
+        Mask |= fcNegInf | fcNegNormal;
+      break;
+    }
+    default:
+      return fcmpImpliesClass(Pred, MF, LHS, ConstRHS.classify(),
+                              LookThroughSrc);
+    }
+
+    // Invert the comparison for the unordered cases.
+    if (FCmpInst::isUnordered(Pred))
+      Mask = ~Mask;
+
+    return exactClass(Src, Mask);
+  }
+
+  return fcmpImpliesClass(Pred, MF, LHS, ConstRHS.classify(), LookThroughSrc);
+}
+
+std::tuple<Register, FPClassTest, FPClassTest>
+GISelValueTracking::fcmpImpliesClass(CmpInst::Predicate Pred,
+                                     const MachineFunction &MF, Register LHS,
+                                     Register RHS, bool LookThroughSrc) {
+  const ConstantFP *ConstRHS;
+  if (!mi_match(RHS, MRI, m_GFCst(ConstRHS)))
+    return {Register(), fcAllFlags, fcAllFlags};
+
+  // TODO: Just call computeKnownFPClass for RHS to handle non-constants.
+  return fcmpImpliesClass(Pred, MF, LHS, ConstRHS->getValueAPF(),
+                          LookThroughSrc);
+}
+
+void GISelValueTracking::computeKnownFPClass(Register R, KnownFPClass &Known,
+                                             FPClassTest InterestedClasses,
+                                             unsigned Depth) {
+  LLT Ty = MRI.getType(R);
+  APInt DemandedElts =
+      Ty.isFixedVector() ? APInt::getAllOnes(Ty.getNumElements()) : APInt(1, 1);
+  computeKnownFPClass(R, DemandedElts, InterestedClasses, Known, Depth);
+}
+
+void GISelValueTracking::computeKnownFPClassForFPTrunc(
+    const MachineInstr &MI, const APInt &DemandedElts,
+    FPClassTest InterestedClasses, KnownFPClass &Known, unsigned Depth) {
+  if ((InterestedClasses & (KnownFPClass::OrderedLessThanZeroMask | fcNan)) ==
+      fcNone)
+    return;
+
+  Register Val = MI.getOperand(1).getReg();
+  KnownFPClass KnownSrc;
+  computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
+                      Depth + 1);
+
+  // Sign should be preserved
+  // TODO: Handle cannot be ordered greater than zero
+  if (KnownSrc.cannotBeOrderedLessThanZero())
+    Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+
+  Known.propagateNaN(KnownSrc, true);
+
+  // Infinity needs a range check.
+}
+
+void GISelValueTracking::computeKnownFPClass(Register R,
+                                             const APInt &DemandedElts,
+                                             FPClassTest InterestedClasses,
+                                             KnownFPClass &Known,
+                                             unsigned Depth) {
+  assert(Known.isUnknown() && "should not be called with known information");
+
+  if (!DemandedElts) {
+    // No demanded elts, better to assume we don't know anything.
+    Known.resetAll();
+    return;
+  }
+
+  assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
+
+  MachineInstr &MI = *MRI.getVRegDef(R);
+  unsigned Opcode = MI.getOpcode();
+  LLT DstTy = MRI.getType(R);
+
+  if (!DstTy.isValid()) {
+    Known.resetAll();
+    return;
+  }
+
+  if (auto Cst = GFConstant::getConstant(R, MRI)) {
+    switch (Cst->getKind()) {
+    case GFConstant::GFConstantKind::Scalar: {
+      auto APF = Cst->getScalarValue();
+      Known.KnownFPClasses = APF.classify();
+      Known.SignBit = APF.isNegative();
+      break;
+    }
+    case GFConstant::GFConstantKind::FixedVector: {
+      Known.KnownFPClasses = fcNone;
+      bool SignBitAllZero = true;
+      bool SignBitAllOne = true;
+
+      for (auto C : *Cst) {
+        Known.KnownFPClasses |= C.classify();
+        if (C.isNegative())
+          SignBitAllZero = false;
+        else
+          SignBitAllOne = false;
+      }
+
+      if (SignBitAllOne != SignBitAllZero)
+        Known.SignBit = SignBitAllOne;
+
+      break;
+    }
+    case GFConstant::GFConstantKind::ScalableVector: {
+      Known.resetAll();
+      break;
+    }
+    }
+
+    return;
+  }
+
+  FPClassTest KnownNotFromFlags = fcNone;
+  if (MI.getFlag(MachineInstr::MIFlag::FmNoNans))
+    KnownNotFromFlags |= fcNan;
+  if (MI.getFlag(MachineInstr::MIFlag::FmNoInfs))
+    KnownNotFromFlags |= fcInf;
+
+  // We no longer need to find out about these bits from inputs if we can
+  // assume this from flags/attributes.
+  InterestedClasses &= ~KnownNotFromFlags;
+
+  auto ClearClassesFromFlags =
+      make_scope_exit([=, &Known] { Known.knownNot(KnownNotFromFlags); });
+
+  // All recursive calls that increase depth must come after this.
+  if (Depth == MaxAnalysisRecursionDepth)
+    return;
+
+  const MachineFunction *MF = MI.getMF();
+
+  switch (Opcode) {
+  default:
+    TL.computeKnownFPClassForTargetInstr(*this, R, Known, DemandedElts, MRI,
+                                         Depth);
+    break;
+  case TargetOpcode::G_FNEG: {
+    Register Val = MI.getOperand(1).getReg();
+    computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known, Depth + 1);
+    Known.fneg();
+    break;
+  }
+  case TargetOpcode::G_SELECT: {
+    GSelect &SelMI = cast<GSelect>(MI);
+    Register Cond = SelMI.getCondReg();
+    Register LHS = SelMI.getTrueReg();
+    Register RHS = SelMI.getFalseReg();
+
+    FPClassTest FilterLHS = fcAllFlags;
+    FPClassTest FilterRHS = fcAllFlags;
+
+    Register TestedValue;
+    FPClassTest MaskIfTrue = fcAllFlags;
+    FPClassTest MaskIfFalse = fcAllFlags;
+    FPClassTest ClassVal = fcNone;
+
+    CmpInst::Predicate Pred;
+    Register CmpLHS, CmpRHS;
+    if (mi_match(Cond, MRI,
+                 m_GFCmp(m_Pred(Pred), m_Reg(CmpLHS), m_Reg(CmpRHS)))) {
+      // If the select filters out a value based on the class, it no longer
+      // participates in the class of the result
+
+      // TODO: In some degenerate cases we can infer something if we try again
+      // without looking through sign operations.
+      bool LookThroughFAbsFNeg = CmpLHS != LHS && CmpLHS != RHS;
+      std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
+          fcmpImpliesClass(Pred, *MF, CmpLHS, CmpRHS, LookThroughFAbsFNeg);
+    } else if (mi_match(
+                   Cond, MRI,
+                   m_GIsFPClass(m_Reg(TestedValue), m_FPClassTest(ClassVal)))) {
+      FPClassTest TestedMask = ClassVal;
+      MaskIfTrue = TestedMask;
+      MaskIfFalse = ~TestedMask;
+    }
+
+    if (TestedValue == LHS) {
+      // match !isnan(x) ? x : y
+      FilterLHS = MaskIfTrue;
+    } else if (TestedValue == RHS) { // && IsExactClass
+      // match !isnan(x) ? y : x
+      FilterRHS = MaskIfFalse;
+    }
+
+    KnownFPClass Known2;
+    computeKnownFPClass(LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
+                        Depth + 1);
+    Known.KnownFPClasses &= FilterLHS;
+
+    computeKnownFPClass(RHS, DemandedElts, InterestedClasses & FilterRHS,
+                        Known2, Depth + 1);
+    Known2.KnownFPClasses &= FilterRHS;
+
+    Known |= Known2;
+    break;
+  }
+  case TargetOpcode::G_FCOPYSIGN: {
+    Register Magnitude = MI.getOperand(1).getReg();
+    Register Sign = MI.getOperand(2).getReg();
+
+    KnownFPClass KnownSign;
+
+    computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,
+                        Depth + 1);
+    computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,
+                        Depth + 1);
+    Known.copysign(KnownSign);
+    break;
+  }
+  case TargetOpcode::G_FMA:
+  case TargetOpcode::G_STRICT_FMA:
+  case TargetOpcode::G_FMAD: {
+    if ((InterestedClasses & fcNegative) == fcNone)
+      break;
+
+    Register A = MI.getOperand(1).getReg();
+    Register B = MI.getOperand(2).getReg();
+    Register C = MI.getOperand(3).getReg();
+
+    if (A != B)
+      break;
+
+    // The multiply cannot be -0 and therefore the add can't be -0
+    Known.knownNot(fcNegZero);
+
+    // x * x + y is non-negative if y is non-negative.
+    KnownFPClass KnownAddend;
+    computeKnownFPClass(C, DemandedElts, InterestedClasses, KnownAddend,
+                        Depth + 1);
+
+    if (KnownAddend.cannotBeOrderedLessThanZero())
+      Known.knownNot(fcNegative);
+    break;
+  }
+  case TargetOpcode::G_FSQRT:
+  case TargetOpcode::G_STRICT_FSQRT: {
+    KnownFPClass KnownSrc;
+    FPClassTest InterestedSrcs = InterestedClasses;
+    if (InterestedClasses & fcNan)
+      InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
+
+    Register Val = MI.getOperand(1).getReg();
+
+    computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);
+
+    if (KnownSrc.isKnownNeverPosInfinity())
+      Known.knownNot(fcPosInf);
+    if (KnownSrc.isKnownNever(fcSNan))
+      Known.knownNot(fcSNan);
+
+    // Any negative value besides -0 returns a nan.
+    if (KnownSrc.isKnownNeverNaN() && KnownSrc.cannotBeOrderedLessThanZero())
+      Known.knownNot(fcNan);
+
+    // The only negative value that can be returned is -0 for -0 inputs.
+    Known.knownNot(fcNegInf | fcNegSubnormal | fcNegNormal);
+    break;
+  }
+  case TargetOpcode::G_FABS: {
+    if ((InterestedClasses & (fcNan | fcPositive)) != fcNone) {
+      Register Val = MI.getOperand(1).getReg();
+      // If we only care about the sign bit we don't need to inspect the
+      // operand.
+      computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
+                          Depth + 1);
+    }
+    Known.fabs();
+    break;
+  }
+  case TargetOpcode::G_FSIN:
+  case TargetOpcode::G_FCOS:
+  case TargetOpcode::G_FSINCOS: {
+    // Return NaN on infinite inputs.
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+
+    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
+                        Depth + 1);
+    Known.knownNot(fcInf);
+
+    if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity())
+      Known.knownNot(fcNan);
+    break;
+  }
+  case TargetOpcode::G_FMAXNUM:
+  case TargetOpcode::G_FMINNUM:
+  case TargetOpcode::G_FMINNUM_IEEE:
+  case TargetOpcode::G_FMAXIMUM:
+  case TargetOpcode::G_FMINIMUM:
+  case TargetOpcode::G_FMAXNUM_IEEE: {
+    Register LHS = MI.getOperand(1).getReg();
+    Register RHS = MI.getOperand(2).getReg();
+    KnownFPClass KnownLHS, KnownRHS;
+
+    computeKnownFPClass(LHS, DemandedElts, InterestedClasses, KnownLHS,
+                        Depth + 1);
+    computeKnownFPClass(RHS, DemandedElts, InterestedClasses, KnownRHS,
+                        Depth + 1);
+
+    bool NeverNaN = KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN();
+    Known = KnownLHS | KnownRHS;
+
+    // If either operand is not NaN, the result is not NaN.
+    if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
+                     Opcode == TargetOpcode::G_FMAXNUM))
+      Known.knownNot(fcNan);
+
+    if (Opcode == TargetOpcode::G_FMAXNUM) {
+      // If at least one operand is known to be positive, the result must be
+      // positive.
+      if ((KnownLHS.cannotBeOrderedLessThanZero() &&
+           KnownLHS.isKnownNeverNaN()) ||
+          (KnownRHS.cannotBeOrderedLessThanZero() &&
+           KnownRHS.isKnownNeverNaN()))
+        Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+    } else if (Opcode == TargetOpcode::G_FMAXIMUM) {
+      // If at least one operand is known to be positive, the result must be
+      // positive.
+      if (KnownLHS.cannotBeOrderedLessThanZero() ||
+          KnownRHS.cannotBeOrderedLessThanZero())
+        Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+    } else if (Opcode == TargetOpcode::G_FMINNUM) {
+      // If at least one operand is known to be negative, the result must be
+      // negative.
+      if ((KnownLHS.cannotBeOrderedGreaterThanZero() &&
+           KnownLHS.isKnownNeverNaN()) ||
+          (KnownRHS.cannotBeOrderedGreaterThanZero() &&
+           KnownRHS.isKnownNeverNaN()))
+        Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
+    } else if (Opcode == TargetOpcode::G_FMINNUM_IEEE) {
+      // TODO:
+    } else if (Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
+      // TODO:
+    } else {
+      // If at least one operand is known to be negative, the result must be
+      // negative.
+      if (KnownLHS.cannotBeOrderedGreaterThanZero() ||
+          KnownRHS.cannotBeOrderedGreaterThanZero())
+        Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
+    }
+
+    // Fixup zero handling if denormals could be returned as a zero.
+    //
+    // As there's no spec for denormal flushing, be conservative with the
+    // treatment of denormals that could be flushed to zero. For older
+    // subtargets on AMDGPU the min/max instructions would not flush the
+    // output and return the original value.
+    //
+    if ((Known.KnownFPClasses & fcZero) != fcNone &&
+        !Known.isKnownNeverSubnormal()) {
+      DenormalMode Mode = MF->getDenormalMode(getFltSemanticForLLT(DstTy));
+      if (Mode != DenormalMode::getIEEE())
+        Known.KnownFPClasses |= fcZero;
+    }
+
+    if (Known.isKnownNeverNaN()) {
+      if (KnownLHS.SignBit && KnownRHS.SignBit &&
+          *KnownLHS.SignBit == *KnownRHS.SignBit) {
+        if (*KnownLHS.SignBit)
+          Known.signBitMustBeOne();
+        else
+          Known.signBitMustBeZero();
+      } else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
+                  Opcode == TargetOpcode::G_FMINIMUM) ||
+                 ((KnownLHS.isKnownNeverNegZero() ||
+                   KnownRHS.isKnownNeverPosZero()) &&
+                  (KnownLHS.isKnownNeverPosZero() ||
+                   KnownRHS.isKnownNeverNegZero()))) {
+        if ((Opcode == TargetOpcode::G_FMAXIMUM ||
+             Opcode == TargetOpcode::G_FMAXNUM) &&
+            (KnownLHS.SignBit == false || KnownRHS.SignBit == false))
+          Known.signBitMustBeZero();
+        else if ((Opcode == TargetOpcode::G_FMINIMUM ||
+                  Opcode == TargetOpcode::G_FMINNUM) &&
+                 (KnownLHS.SignBit == true || KnownRHS.SignBit == true))
+          Known.signBitMustBeOne();
+      }
+    }
+    break;
+  }
+  case TargetOpcode::G_FCANONICALIZE: {
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
+                        Depth + 1);
+
+    // This is essentially a stronger form of
+    // propagateCanonicalizingSrc. Other "canonicalizing" operations don't
+    // actually have an IR canonicalization guarantee.
+
+    // Canonicalize may flush denormals to zero, so we have to consider the
+    // denormal mode to preserve known-not-0 knowledge.
+    Known.KnownFPClasses = KnownSrc.KnownFPClasses | fcZero | fcQNan;
+
+    // Stronger version of propagateNaN
+    // Canonicalize is guaranteed to quiet signaling nans.
+    if (KnownSrc.isKnownNeverNaN())
+      Known.knownNot(fcNan);
+    else
+      Known.knownNot(fcSNan);
+
+    // If the parent function flushes denormals, the canonical output cannot
+    // be a denormal.
+    LLT Ty = MRI.getType(Val);
+    const fltSemantics &FPType = getFltSemanticForLLT(Ty.getScalarType());
+    DenormalMode DenormMode = MF->getDenormalMode(FPType);
+    if (DenormMode == DenormalMode::getIEEE()) {
+      if (KnownSrc.isKnownNever(fcPosZero))
+        Known.knownNot(fcPosZero);
+      if (KnownSrc.isKnownNever(fcNegZero))
+        Known.knownNot(fcNegZero);
+      break;
+    }
+
+    if (DenormMode.inputsAreZero() || DenormMode.outputsAreZero())
+      Known.knownNot(fcSubnormal);
+
+    if (DenormMode.Input == DenormalMode::PositiveZero ||
+        (DenormMode.Output == DenormalMode::PositiveZero &&
+         DenormMode.Input == DenormalMode::IEEE))
+      Known.knownNot(fcNegZero);
+
+    break;
+  }
+  case TargetOpcode::G_VECREDUCE_FMAX:
+  case TargetOpcode::G_VECREDUCE_FMIN:
+  case TargetOpcode::G_VECREDUCE_FMAXIMUM:
+  case TargetOpcode::G_VECREDUCE_FMINIMUM: {
+    Register Val = MI.getOperand(1).getReg();
+    // reduce min/max will choose an element from one of the vector elements,
+    // so we can infer and class information that is common to all elements.
+
+    Known =
+        computeKnownFPClass(Val, MI.getFlags(), InterestedClasses, Depth + 1);
+    // Can only propagate sign if output is never NaN.
+    if (!Known.isKnownNeverNaN())
+      Known.SignBit.reset();
+    break;
+  }
+  case TargetOpcode::G_TRUNC:
+  case TargetOpcode::G_FFLOOR:
+  case TargetOpcode::G_FCEIL:
+  case TargetOpcode::G_FRINT:
+  case TargetOpcode::G_FNEARBYINT:
+  case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
+  case TargetOpcode::G_INTRINSIC_ROUND: {
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    FPClassTest InterestedSrcs = InterestedClasses;
+    if (InterestedSrcs & fcPosFinite)
+      InterestedSrcs |= fcPosFinite;
+    if (InterestedSrcs & fcNegFinite)
+      InterestedSrcs |= fcNegFinite;
+    computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);
+
+    // Integer results cannot be subnormal.
+    Known.knownNot(fcSubnormal);
+
+    Known.propagateNaN(KnownSrc, true);
+
+    // TODO: handle multi unit FPTypes once LLT FPInfo lands
+
+    // Negative round ups to 0 produce -0
+    if (KnownSrc.isKnownNever(fcPosFinite))
+      Known.knownNot(fcPosFinite);
+    if (KnownSrc.isKnownNever(fcNegFinite))
+      Known.knownNot(fcNegFinite);
+
+    break;
+  }
+  case TargetOpcode::G_FEXP:
+  case TargetOpcode::G_FEXP2:
+  case TargetOpcode::G_FEXP10: {
+    Known.knownNot(fcNegative);
+    if ((InterestedClasses & fcNan) == fcNone)
+      break;
+
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
+                        Depth + 1);
+    if (KnownSrc.isKnownNeverNaN()) {
+      Known.knownNot(fcNan);
+      Known.signBitMustBeZero();
+    }
+
+    break;
+  }
+  case TargetOpcode::G_FLOG:
+  case TargetOpcode::G_FLOG2:
+  case TargetOpcode::G_FLOG10: {
+    // log(+inf) -> +inf
+    // log([+-]0.0) -> -inf
+    // log(-inf) -> nan
+    // log(-x) -> nan
+    if ((InterestedClasses & (fcNan | fcInf)) == fcNone)
+      break;
+
+    FPClassTest InterestedSrcs = InterestedClasses;
+    if ((InterestedClasses & fcNegInf) != fcNone)
+      InterestedSrcs |= fcZero | fcSubnormal;
+    if ((InterestedClasses & fcNan) != fcNone)
+      InterestedSrcs |= fcNan | (fcNegative & ~fcNan);
+
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);
+
+    if (KnownSrc.isKnownNeverPosInfinity())
+      Known.knownNot(fcPosInf);
+
+    if (KnownSrc.isKnownNeverNaN() && KnownSrc.cannotBeOrderedLessThanZero())
+      Known.knownNot(fcNan);
+
+    LLT Ty = MRI.getType(Val);
+    const fltSemantics &FltSem = getFltSemanticForLLT(Ty.getScalarType());
+    DenormalMode Mode = MF->getDenormalMode(FltSem);
+
+    if (KnownSrc.isKnownNeverLogicalZero(Mode))
+      Known.knownNot(fcNegInf);
+
+    break;
+  }
+  case TargetOpcode::G_FPOWI: {
+    if ((InterestedClasses & fcNegative) == fcNone)
+      break;
+
+    Register Exp = MI.getOperand(2).getReg();
+    LLT ExpTy = MRI.getType(Exp);
+    KnownBits ExponentKnownBits = getKnownBits(
+        Exp, ExpTy.isVector() ? DemandedElts : APInt(1, 1), Depth + 1);
+
+    if (ExponentKnownBits.Zero[0]) { // Is even
+      Known.knownNot(fcNegative);
+      break;
+    }
+
+    // Given that exp is an integer, here are the
+    // ways that pow can return a negative value:
+    //
+    //   pow(-x, exp)   --> negative if exp is odd and x is negative.
+    //   pow(-0, exp)   --> -inf if exp is negative odd.
+    //   pow(-0, exp)   --> -0 if exp is positive odd.
+    //   pow(-inf, exp) --> -0 if exp is negative odd.
+    //   pow(-inf, exp) --> -inf if exp is positive odd.
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    computeKnownFPClass(Val, DemandedElts, fcNegative, KnownSrc, Depth + 1);
+    if (KnownSrc.isKnownNever(fcNegative))
+      Known.knownNot(fcNegative);
+    break;
+  }
+  case TargetOpcode::G_FLDEXP:
+  case TargetOpcode::G_STRICT_FLDEXP: {
+    Register Val = MI.getOperand(1).getReg();
+    KnownFPClass KnownSrc;
+    computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
+                        Depth + 1);
+    Known.propagateNaN(KnownSrc, /*PropagateSign=*/true);
+
+    // Sign is preserved, but underflows may produce zeroes.
+    if (KnownSrc.isKnownNever(fcNegative))
+      Known.knownNot(fcNegative);
+    else if (KnownSrc.cannotBeOrderedLessThanZero())
+      Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+
+    if (KnownSrc.isKnownNever(fcPositive))
+      Known.knownNot(fcPositive);
+    else if (KnownSrc.cannotBeOrderedGreaterThanZero())
+      Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
+
+    // Can refine inf/zero handling based on the exponent operand.
+    const FPClassTest ExpInfoMask = fcZero | fcSubnormal | fcInf;
+    if ((InterestedClasses & ExpInfoMask) == fcNone)
+      break;
+    if ((KnownSrc.KnownFPClasses & ExpInfoMask) == fcNone)
+      break;
+
+    // TODO: Handle constant range of Exp
+
+    break;
+  }
+  case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
+    computeKnownFPClassForFPTrunc(MI, DemandedElts, InterestedClasses, Known,
+                                  Depth);
+    break;
+  }
+  case TargetOpcode::G_FADD:
+  case TargetOpcode::G_STRICT_FADD:
+  case TargetOpcode::G_FSUB:
+  case TargetOpcode::G_STRICT_FSUB: {
+    Register LHS = MI.getOperand(1).getReg();
+    Register RHS = MI.getOperand(2).getReg();
+    KnownFPClass KnownLHS, KnownRHS;
+    bool WantNegative =
+        (Opcode == TargetOpcode::G_FADD ||
+         Opcode == TargetOpcode::G_STRICT_FADD) &&
+        (InterestedClasses & KnownFPClass::OrderedLessThanZeroMask) != fcNone;
+    bool WantNaN = (InterestedClasses & fcNan) != fcNone;
+    bool WantNegZero = (InterestedClasses & fcNegZero) != fcNone;
+
+    if (!WantNaN && !WantNegative && !WantNegZero)
+      break;
+
+    FPClassTest InterestedSrcs = InterestedClasses;
+    if (WantNegative)
+      InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
+    if (InterestedClasses & fcNan)
+      InterestedSrcs |= fcInf;
+    computeKnownFPClass(RHS, DemandedElts, InterestedSrcs, KnownRHS, Depth + 1);
+
+    if ((WantNaN && KnownRHS.isKnownNeverNaN()) ||
+        (WantNegative && KnownRHS.cannotBeOrderedLessThanZero()) ||
+        WantNegZero ||
+        (Opcode == TargetOpcode::G_FSUB ||
+         Opcode == TargetOpcode::G_STRICT_FSUB)) {
+
+      // RHS is canonically cheaper to compute. Skip inspecting the LHS if
+      // there's no point.
+      computeKnownFPClass(LHS, DemandedElts, InterestedSrcs, KnownLHS,
+                          Depth + 1);
+      // Adding positive and negative infinity produces NaN.
+      // TODO: Check sign of infinities.
+      if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
+          (KnownLHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverInfinity()))
+        Known.knownNot(fcNan);
+
+      if (Opcode == Instruction::FAdd) {
+        if (KnownLHS.cannotBeOrderedLessThanZero() &&
+            KnownRHS.cannotBeOrderedLessThanZero())
+          Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+
+        // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
+        if ((KnownLHS.isKnownNeverLogicalNegZero(
+                 MF->getDenormalMode(getFltSemanticForLLT(DstTy))) ||
+             KnownRHS.isKnownNeverLogicalNegZero(
+                 MF->getDenormalMode(getFltSemanticForLLT(DstTy)))) &&
+            // Make sure output negative denormal can't flush to -0
+            outputDenormalIsIEEEOrPosZero(*MF, DstTy))
+          Known.knownNot(fcNegZero);
+      } else {
+        // Only fsub -0, +0 can return -0
+        if ((KnownLHS.isKnownNeverLogicalNegZero(
+                 MF->getDenormalMode(getFltSemanticForLLT(DstTy))) ||
+             KnownRHS.isKnownNeverLogicalPosZero(
+                 MF->getDenormalMode(getFltSemanticForLLT(DstTy)))) &&
+            // Make sure output negative denormal can't flush to -0
+            outputDenormalIsIEEEOrPosZero(*MF, DstTy))
+          Known.knownNot(fcNegZero);
+      }
+    }
+
+    break;
+  }
+  case TargetOpcode::G_FMUL:
+  case TargetOpcode::G_STRICT_FMUL: {
+    Register LHS = MI.getOperand(1).getReg();
+    Register RHS = MI.getOperand(2).getReg();
+    // X * X is always non-negative or a NaN.
+    if (LHS == RHS)
+      Known.knownNot(fcNegative);
+
+    if ((InterestedClasses & fcNan) != fcNan)
+      break;
+
+    // fcSubnormal is only needed in case of DAZ.
+    const FPClassTest NeedForNan = fcNan | fcInf | fcZero | fcSubnormal;
+
+    KnownFPClass KnownLHS, KnownRHS;
+    computeKnownFPClass(RHS, DemandedElts, NeedForNan, KnownRHS, Depth + 1);
+    if (!KnownRHS.isKnownNeverNaN())
+      break;
+
+    computeKnownFPClass(LHS, DemandedElts, NeedForNan, KnownLHS, Depth + 1);
+    if (!KnownLHS.isKnownNeverNaN())
+      break;
+
+    if (KnownLHS.SignBit && KnownRHS.SignBit) {
+      if (*KnownLHS.SignBit == *KnownRHS.SignBit)
+        Known.signBitMustBeZero();
+      else
+        Known.signBitMustBeOne();
+    }
+
+    // If 0 * +/-inf produces NaN.
+    if (KnownLHS.isKnownNeverInfinity() && KnownRHS.isKnownNeverInfinity()) {
+      Known.knownNot(fcNan);
+      break;
+    }
+
+    if ((KnownRHS.isKnownNeverInfinity() ||
+         KnownLHS.isKnownNeverLogicalZero(
+             MF->getDenormalMode(getFltSemanticForLLT(DstTy)))) &&
+        (KnownLHS.isKnownNeverInfinity() ||
+         KnownRHS.isKnownNeverLogicalZero(
+             MF->getDenormalMode(getFltSemanticForLLT(DstTy)))))
+      Known.knownNot(fcNan);
+
+    break;
+  }
+  case TargetOpcode::G_FDIV:
+  case TargetOpcode::G_FREM: {
+    Register LHS = MI.getOperand(1).getReg();
+    Register RHS = MI.getOperand(2).getReg();
+
+    if (LHS == RHS) {
+      // TODO: Could filter out snan if we inspect the operand
+      if (Opcode == TargetOpcode::G_FDIV) {
+        // X / X is always exactly 1.0 or a NaN.
+        Known.KnownFPClasses = fcNan | fcPosNormal;
+      } else {
+        // X % X is always exactly [+-]0.0 or a NaN.
+        Known.KnownFPClasses = fcNan | fcZero;
+      }
+
+      break;
+    }
+
+    const bool WantNan = (InterestedClasses & fcNan) != fcNone;
+    const bool WantNegative = (InterestedClasses & fcNegative) != fcNone;
+    const bool WantPositive = Opcode == TargetOpcode::G_FREM &&
+                              (InterestedClasses & fcPositive) != fcNone;
+    if (!WantNan && !WantNegative && !WantPositive)
+      break;
+
+    KnownFPClass KnownLHS, KnownRHS;
+
+    computeKnownFPClass(RHS, DemandedElts, fcNan | fcInf | fcZero | fcNegative,
+                        KnownRHS, Depth + 1);
+
+    bool KnowSomethingUseful =
+        KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative);
+
+    if (KnowSomethingUseful || WantPositive) {
+      const FPClassTest InterestedLHS =
+          WantPositive ? fcAllFlags
+                       : fcNan | fcInf | fcZero | fcSubnormal | fcNegative;
+
+      computeKnownFPClass(LHS, DemandedElts, InterestedClasses & InterestedLHS,
+                          KnownLHS, Depth + 1);
+    }
+
+    if (Opcode == Instruction::FDiv) {
+      // Only 0/0, Inf/Inf produce NaN.
+      if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
+          (KnownLHS.isKnownNeverInfinity() ||
+           KnownRHS.isKnownNeverInfinity()) &&
+          ((KnownLHS.isKnownNeverLogicalZero(
+               MF->getDenormalMode(getFltSemanticForLLT(DstTy)))) ||
+           (KnownRHS.isKnownNeverLogicalZero(
+               MF->getDenormalMode(getFltSemanticForLLT(DstTy)))))) {
+        Known.knownNot(fcNan);
+      }
+
+      // X / -0.0 is -Inf (or NaN).
+      // +X / +X is +X
+      if (KnownLHS.isKnownNever(fcNegative) &&
+          KnownRHS.isKnownNever(fcNegative))
+        Known.knownNot(fcNegative);
+    } else {
+      // Inf REM x and x REM 0 produce NaN.
+      if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
+          KnownLHS.isKnownNeverInfinity() &&
+          KnownRHS.isKnownNeverLogicalZero(
+              MF->getDenormalMode(getFltSemanticForLLT(DstTy)))) {
+        Known.knownNot(fcNan);
+      }
+
+      // The sign for frem is the same as the first operand.
+      if (KnownLHS.cannotBeOrderedLessThanZero())
+        Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
+      if (KnownLHS.cannotBeOrderedGreaterThanZero())
+        Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
+
+      // See if we can be more aggressive about the sign of 0.
+      if (KnownLHS.isKnownNever(fcNegative))
+        Known.knownNot(fcNegative);
+      if (KnownLHS.isKnownNever(fcPositive))
+        Known.knownNot(fcPositive);
+    }
+
+    break;
+  }
+  case TargetOpcode::G_FPEXT: {
+    Register Dst = MI.getOperand(0).getReg();
+    Register Src = MI.getOperand(1).getReg();
+    // Infinity, nan and zero propagate from source.
+    computeKnownFPClass(R, DemandedElts, InterestedClasses, Known, Depth + 1);
+
+    LLT DstTy = MRI.getType(Dst);
+    const fltSemantics &DstSem = getFltSemanticForLLT(DstTy.getScalarType());
+    LLT SrcTy = MRI.getType(Src);
+    const fltSemantics &SrcSem = getFltSemanticForLLT(SrcTy.getScalarType());
+
+    // All subnormal inputs should be in the normal range in the result type.
+    if (APFloat::isRepresentableAsNormalIn(SrcSem, DstSem)) {
+      if (Known.KnownFPClasses & fcPosSubnormal)
+        Known.KnownFPClasses |= fcPosNormal;
+      if (Known.KnownFPClasses & fcNegSubnormal)
+        Known.KnownFPClasses |= fcNegNormal;
+      Known.knownNot(fcSubnormal);
+    }
+
+    // Sign bit of a nan isn't guaranteed.
+    if (!Known.isKnownNeverNaN())
+      Known.SignBit = std::nullopt;
+    break;
+  }
+  case TargetOpcode::G_FPTRUNC: {
+    computeKnownFPClassForFPTrunc(MI, DemandedElts, InterestedClasses, Known,
+                                  Depth);
+    break;
+  }
+  case TargetOpcode::G_SITOFP:
+  case TargetOpcode::G_UITOFP: {
+    // Cannot produce nan
+    Known.knownNot(fcNan);
+
+    // Integers cannot be subnormal
+    Known.knownNot(fcSubnormal);
+
+    // sitofp and uitofp turn into +0.0 for zero.
+    Known.knownNot(fcNegZero);
+    if (Opcode == TargetOpcode::G_UITOFP)
+      Known.signBitMustBeZero();
+
+    Register Val = MI.getOperand(1).getReg();
+    LLT Ty = MRI.getType(Val);
+
+    if (InterestedClasses & fcInf) {
+      // Get width of largest magnitude integer (remove a bit if signed).
+      // This still works for a signed minimum value because the largest FP
+      // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).;
+      int IntSize = Ty.getScalarSizeInBits();
+      if (Opcode == TargetOpcode::G_SITOFP)
+        --IntSize;
+
+      // If the exponent of the largest finite FP value can hold the largest
+      // integer, the result of the cast must be finite.
+      LLT FPTy = DstTy.getScalarType();
+      const fltSemantics &FltSem = getFltSemanticForLLT(FPTy);
+      if (ilogb(APFloat::getLargest(FltSem)) >= IntSize)
+        Known.knownNot(fcInf);
+    }
+
+    break;
+  }
+  // case TargetOpcode::G_MERGE_VALUES:
+  case TargetOpcode::G_BUILD_VECTOR:
+  case TargetOpcode::G_CONCAT_VECTORS: {
+    GMergeLikeInstr &Merge = cast<GMergeLikeInstr>(MI);
+
+    if (!DstTy.isFixedVector())
+      break;
+
+    bool First = true;
+    for (unsigned Idx = 0; Idx < Merge.getNumSources(); ++Idx) {
+      // We know the index we are inserting to, so clear it from Vec check.
+      bool NeedsElt = DemandedElts[Idx];
+
+      // Do we demand the inserted element?
+      if (NeedsElt) {
+        Register Src = Merge.getSourceReg(Idx);
+        if (First) {
+          computeKnownFPClass(Src, Known, InterestedClasses, Depth + 1);
+          First = false;
+        } else {
+          KnownFPClass Known2;
+          computeKnownFPClass(Src, Known2, InterestedClasses, Depth + 1);
+          Known |= Known2;
+        }
+
+        // If we don't know any bits, early out.
+        if (Known.isUnknown())
+          break;
+      }
+    }
+
+    break;
+  }
+  case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
+    // Look through extract element. If the index is non-constant or
+    // out-of-range demand all elements, otherwise just the extracted
+    // element.
+    GExtractVectorElement &Extract = cast<GExtractVectorElement>(MI);
+    Register Vec = Extract.getVectorReg();
+    Register Idx = Extract.getIndexReg();
+
+    auto CIdx = getIConstantVRegVal(Idx, MRI);
+
+    LLT VecTy = MRI.getType(Vec);
+
+    if (VecTy.isFixedVector()) {
+      unsigned NumElts = VecTy.getNumElements();
+      APInt DemandedVecElts = APInt::getAllOnes(NumElts);
+      if (CIdx && CIdx->ult(NumElts))
+        DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
+      computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
+                          Depth + 1);
+    }
+
+    break;
+  }
+  case TargetOpcode::G_INSERT_VECTOR_ELT: {
+    GInsertVectorElement &Insert = cast<GInsertVectorElement>(MI);
+    Register Vec = Insert.getVectorReg();
+    Register Elt = Insert.getElementReg();
+    Register Idx = Insert.getIndexReg();
+
+    LLT VecTy = MRI.getType(Vec);
+
+    if (VecTy.isScalableVector())
+      break;
+
+    auto CIdx = getIConstantVRegVal(Idx, MRI);
+
+    unsigned NumElts = DemandedElts.getBitWidth();
+    APInt DemandedVecElts = DemandedElts;
+    bool NeedsElt = true;
+    // If we know the index we are inserting to, clear it from Vec check.
+    if (CIdx && CIdx->ult(NumElts)) {
+      DemandedVecElts.clearBit(CIdx->getZExtValue());
+      NeedsElt = DemandedElts[CIdx->getZExtValue()];
+    }
+
+    // Do we demand the inserted element?
+    if (NeedsElt) {
+      computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1);
+      // If we don't know any bits, early out.
+      if (Known.isUnknown())
+        break;
+    } else {
+      Known.KnownFPClasses = fcNone;
+    }
+
+    // Do we need anymore elements from Vec?
+    if (!DemandedVecElts.isZero()) {
+      KnownFPClass Known2;
+      computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
+                          Depth + 1);
+      Known |= Known2;
+    }
+
+    break;
+  }
+  case TargetOpcode::G_SHUFFLE_VECTOR: {
+    // For undef elements, we don't know anything about the common state of
+    // the shuffle result.
+    GShuffleVector &Shuf = cast<GShuffleVector>(MI);
+    APInt DemandedLHS, DemandedRHS;
+    if (DstTy.isScalableVector()) {
+      assert(DemandedElts == APInt(1, 1));
+      DemandedLHS = DemandedRHS = DemandedElts;
+    } else {
+      if (!llvm::getShuffleDemandedElts(DstTy.getNumElements(), Shuf.getMask(),
+                                        DemandedElts, DemandedLHS,
+                                        DemandedRHS)) {
+        Known.resetAll();
+        break;
+      }
+    }
+
+    if (!!DemandedLHS) {
+      Register LHS = Shuf.getSrc1Reg();
+      computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known,
+                          Depth + 1);
+
+      // If we don't know any bits, early out.
+      if (Known.isUnknown())
+        break;
+    } else {
+      Known.KnownFPClasses = fcNone;
+    }
+
+    if (!!DemandedRHS) {
+      KnownFPClass Known2;
+      Register RHS = Shuf.getSrc2Reg();
+      computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2,
+                          Depth + 1);
+      Known |= Known2;
+    }
+    break;
+  }
+  case TargetOpcode::COPY: {
+    Register Src = MI.getOperand(1).getReg();
+    computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Depth + 1);
+    break;
+  }
+  }
+}
+
+KnownFPClass
+GISelValueTracking::computeKnownFPClass(Register R, const APInt &DemandedElts,
+                                        FPClassTest InterestedClasses,
+                                        unsigned Depth) {
+  KnownFPClass KnownClasses;
+  computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses, Depth);
+  return KnownClasses;
+}
+
+KnownFPClass GISelValueTracking::computeKnownFPClass(
+    Register R, FPClassTest InterestedClasses, unsigned Depth) {
+  KnownFPClass Known;
+  computeKnownFPClass(R, Known, InterestedClasses, Depth);
+  return Known;
+}
+
+KnownFPClass GISelValueTracking::computeKnownFPClass(
+    Register R, const APInt &DemandedElts, uint32_t Flags,
+    FPClassTest InterestedClasses, unsigned Depth) {
+  if (Flags & MachineInstr::MIFlag::FmNoNans)
+    InterestedClasses &= ~fcNan;
+  if (Flags & MachineInstr::MIFlag::FmNoInfs)
+    InterestedClasses &= ~fcInf;
+
+  KnownFPClass Result =
+      computeKnownFPClass(R, DemandedElts, InterestedClasses, Depth);
+
+  if (Flags & MachineInstr::MIFlag::FmNoNans)
+    Result.KnownFPClasses &= ~fcNan;
+  if (Flags & MachineInstr::MIFlag::FmNoInfs)
+    Result.KnownFPClasses &= ~fcInf;
+  return Result;
+}
+
+KnownFPClass GISelValueTracking::computeKnownFPClass(
+    Register R, uint32_t Flags, FPClassTest InterestedClasses, unsigned Depth) {
+  LLT Ty = MRI.getType(R);
+  APInt DemandedElts =
+      Ty.isFixedVector() ? APInt::getAllOnes(Ty.getNumElements()) : APInt(1, 1);
+  return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses, Depth);
+}
+
 /// Compute number of sign bits for the intersection of \p Src0 and \p Src1
 unsigned GISelValueTracking::computeNumSignBitsMin(Register Src0, Register Src1,
                                                    const APInt &DemandedElts,
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 223d69c362185..d37b4821da407 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -318,6 +318,14 @@ llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
   return std::nullopt;
 }
 
+std::optional<uint64_t>
+llvm::getIConstantVRegZExtVal(Register VReg, const MachineRegisterInfo &MRI) {
+  std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
+  if (Val && Val->getBitWidth() <= 64)
+    return Val->getZExtValue();
+  return std::nullopt;
+}
+
 namespace {
 
 // This function is used in many places, and as such, it has some
@@ -1429,6 +1437,21 @@ llvm::getIConstantSplatSExtVal(const MachineInstr &MI,
   return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
 }
 
+std::optional<uint64_t>
+llvm::getIConstantSplatZExtVal(const Register Reg,
+                               const MachineRegisterInfo &MRI) {
+  if (auto SplatValAndReg =
+          getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
+    return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
+  return std::nullopt;
+}
+
+std::optional<uint64_t>
+llvm::getIConstantSplatZExtVal(const MachineInstr &MI,
+                               const MachineRegisterInfo &MRI) {
+  return getIConstantSplatZExtVal(MI.getOperand(0).getReg(), MRI);
+}
+
 std::optional<FPValueAndVReg>
 llvm::getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI,
                         bool AllowUndef) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 0f38bbd46cbca..1c12359403766 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -3798,6 +3798,13 @@ void TargetLowering::computeKnownBitsForTargetInstr(
   Known.resetAll();
 }
 
+void TargetLowering::computeKnownFPClassForTargetInstr(
+    GISelValueTracking &Analysis, Register R, KnownFPClass &Known,
+    const APInt &DemandedElts, const MachineRegisterInfo &MRI,
+    unsigned Depth) const {
+  Known.resetAll();
+}
+
 void TargetLowering::computeKnownBitsForFrameIndex(
   const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const {
   // The low bits are known zero if the pointer is aligned.

>From 05278bce3116a6e5d6a1abd4e32122dec56ad78d Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Thu, 3 Apr 2025 12:48:27 +0000
Subject: [PATCH 2/6] add tests

---
 .../CodeGen/GlobalISel/CMakeLists.txt         |    1 +
 .../CodeGen/GlobalISel/KnownFPClassTest.cpp   | 1062 +++++++++++++++++
 2 files changed, 1063 insertions(+)
 create mode 100644 llvm/unittests/CodeGen/GlobalISel/KnownFPClassTest.cpp

diff --git a/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt b/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
index bce91c1ed6173..4ef6aff943f73 100644
--- a/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
+++ b/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
@@ -24,6 +24,7 @@ add_llvm_unittest(GlobalISelTests
   GISelMITest.cpp
   PatternMatchTest.cpp
   KnownBitsTest.cpp
+  KnownFPClassTest.cpp
   KnownBitsVectorTest.cpp
   GISelUtilsTest.cpp
   GISelAliasTest.cpp
diff --git a/llvm/unittests/CodeGen/GlobalISel/KnownFPClassTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownFPClassTest.cpp
new file mode 100644
index 0000000000000..1a4cc1fa33e23
--- /dev/null
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownFPClassTest.cpp
@@ -0,0 +1,1062 @@
+//===- KnownFPClassTest.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GISelMITest.h"
+#include "llvm/ADT/FloatingPointMode.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "gtest/gtest.h"
+#include <optional>
+
+TEST_F(AArch64GISelMITest, TestFPClassCstPosZero) {
+  StringRef MIRString = "  %3:_(s32) = G_FCONSTANT float 0.0\n"
+                        "  %4:_(s32) = COPY %3\n";
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+  unsigned CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosZero, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCstNegZero) {
+  StringRef MIRString = "  %3:_(s32) = G_FCONSTANT float -0.0\n"
+                        "  %4:_(s32) = COPY %3\n";
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegZero, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassUndef) {
+  StringRef MIRString = R"(
+    %def:_(s32) = G_IMPLICIT_DEF
+    %copy_def:_(s32) = COPY %def
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcAllFlags, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCstVecNegZero) {
+  StringRef MIRString = R"(
+   %c0:_(s32) = G_FCONSTANT float -0.0
+   %c1:_(s32) = G_FCONSTANT float -0.0
+   %c2:_(s32) = G_FCONSTANT float -0.0
+   %vector:_(<3 x s32>) = G_BUILD_VECTOR %c0, %c1, %c2
+   %copy_vector:_(<3 x s32>) = COPY %vector
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegZero, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectPos0) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float 0.0
+    %rhs:_(s32) = G_FCONSTANT float 0.0
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosZero, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectNeg0) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float -0.0
+    %rhs:_(s32) = G_FCONSTANT float -0.0
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegZero, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectPosOrNeg0) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float -0.0
+    %rhs:_(s32) = G_FCONSTANT float 0.0
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcZero, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectPosInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float 0x7FF0000000000000
+    %rhs:_(s32) = G_FCONSTANT float 0x7FF0000000000000
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosInf, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectNegInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float 0xFFF0000000000000
+    %rhs:_(s32) = G_FCONSTANT float 0xFFF0000000000000
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegInf, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectPosOrNegInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_FCONSTANT float 0x7FF0000000000000
+    %rhs:_(s32) = G_FCONSTANT float 0xFFF0000000000000
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectNNaN) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sel:_(s32) = nnan G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(~fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectNInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sel:_(s32) = ninf G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(~fcInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectNNaNNInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %cond:_(s1) = G_LOAD %ptr(p0) :: (load (s1))
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sel:_(s32) = nnan ninf G_SELECT %cond, %lhs, %rhs
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(~(fcNan | fcInf), Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFNegNInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fneg:_(s32) = ninf G_FNEG %val
+    %copy_fneg:_(s32) = COPY %fneg
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(~fcInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFabsUnknown) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = G_FABS %val
+    %copy_fabs:_(s32) = COPY %fabs
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassVecFabsUnknown) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(<3 x s32>) = G_LOAD %ptr(p0) :: (load (<3 x s32>))
+    %fabs:_(<3 x s32>) = G_FABS %val
+    %copy_fabs:_(<3 x s32>) = COPY %fabs
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFnegFabs) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = G_FABS %val
+    %fneg:_(s32) = G_FNEG %fabs
+    %copy_fneg:_(s32) = COPY %fneg
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegative | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFnegFabsNInf) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = ninf G_FABS %val
+    %fneg:_(s32) = G_FNEG %fabs
+    %copy_fneg:_(s32) = COPY %fneg
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ((fcNegative & ~fcNegInf) | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFnegFabsNNan) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = nnan G_FABS %val
+    %fneg:_(s32) = G_FNEG %fabs
+    %copy_fneg:_(s32) = COPY %fneg
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegative, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCopySignNNanSrc0) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %mag:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sgn:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = nnan G_FABS %mag
+    %fcopysign:_(s32) = G_FCOPYSIGN %fabs, %sgn
+    %copy_fcopysign:_(s32) = COPY %fcopysign
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(~fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCopySignNInfSrc0_NegSign) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %mag:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sgn:_(s32) = G_FCONSTANT float -1.0
+    %fabs:_(s32) = ninf G_FLOG %mag
+    %fcopysign:_(s32) = G_FCOPYSIGN %fabs, %sgn
+    %copy_fcopysign:_(s32) = COPY %fcopysign
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcNegFinite | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(true, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCopySignNInfSrc0_PosSign) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %mag:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %sgn:_(s32) = G_FCONSTANT float 1.0
+    %fabs:_(s32) = ninf G_FSQRT %mag
+    %fcopysign:_(s32) = G_FCOPYSIGN %fabs, %sgn
+    %copy_fcopysign:_(s32) = COPY %fcopysign
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassUIToFP) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %cast:_(s32) = G_UITOFP %val
+    %copy_cast:_(s32) = COPY %cast
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite & ~fcSubnormal, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSIToFP) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %cast:_(s32) = G_SITOFP %val
+    %copy_cast:_(s32) = COPY %cast
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcFinite & ~fcNegZero & ~fcSubnormal, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFAdd) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fadd:_(s32) = G_FADD %lhs, %rhs
+    %copy_fadd:_(s32) = COPY %fadd
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcAllFlags, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFMul) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fmul:_(s32) = G_FMUL %val, %val
+    %copy_fadd:_(s32) = COPY %fmul
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFMulZero) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_FCONSTANT float 0.0
+    %fabs:_(s32) = nnan ninf G_FABS %lhs
+    %fmul:_(s32) = G_FMUL %fabs, %rhs
+    %copy_fadd:_(s32) = COPY %fmul
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFLogNeg) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = nnan ninf G_FABS %val
+    %fneg:_(s32) = nnan ninf G_FNEG %fabs
+    %flog:_(s32) = G_FLOG %fneg
+    %copy_flog:_(s32) = COPY %flog
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcFinite | fcNan | fcNegInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFLogPosZero) {
+  StringRef MIRString = R"(
+    %val:_(s32) = G_FCONSTANT float 0.0
+    %flog:_(s32) = G_FLOG %val
+    %copy_flog:_(s32) = COPY %flog
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcFinite | fcNegInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFLogNegZero) {
+  StringRef MIRString = R"(
+    %val:_(s32) = G_FCONSTANT float -0.0
+    %flog:_(s32) = G_FLOG %val
+    %copy_flog:_(s32) = COPY %flog
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcFinite | fcNegInf, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassCopy) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = G_FABS %val
+    %copy:_(s32) = COPY %fabs
+    %copy_copy:_(s32) = COPY %copy
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassSelectIsFPClass) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %lhs:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %rhs:_(s32) = G_FCONSTANT float 0.0
+    %cond:_(s1) = G_IS_FPCLASS %lhs, 96
+    %sel:_(s32) = G_SELECT %cond, %lhs, %rhs 
+    %copy_sel:_(s32) = COPY %sel
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcZero, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFLDExp) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %exp:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = G_FABS %val
+    %fldexp:_(s32) = G_FLDEXP %fabs, %exp
+    %copy_fldexp:_(s32) = COPY %fldexp
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFPowIEvenExp) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %pwr:_(s32) = G_CONSTANT i32 2
+    %fpowi:_(s32) = G_FPOWI %val, %pwr
+    %copy_fpowi:_(s32) = COPY %fpowi
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFPowIPos) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %pwr:_(s32) = G_CONSTANT i32 3
+    %fabs:_(s32) = nnan ninf G_FABS %val
+    %fpowi:_(s32) = G_FPOWI %fabs, %pwr
+    %copy_fpowi:_(s32) = COPY %fpowi
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPositive | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFDiv) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fdiv:_(s32) = G_FDIV %val, %val
+    %copy_fdiv:_(s32) = COPY %fdiv
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosNormal | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassFRem) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %frem:_(s32) = G_FREM %val, %val
+    %copy_frem:_(s32) = COPY %frem
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcZero | fcNan, Known.KnownFPClasses);
+  EXPECT_EQ(std::nullopt, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassShuffleVec) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %vec:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
+    %fabs:_(<4 x s32>) = nnan ninf G_FABS %vec
+    %def:_(<4 x s32>) = G_IMPLICIT_DEF
+    %shuf:_(<4 x s32>) = G_SHUFFLE_VECTOR %fabs(<4 x s32>), %def, shufflemask(0, 0, 0, 0)
+    %copy_shuf:_(<4 x s32>) = COPY %shuf
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassBuildVec) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %val1:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs:_(s32) = nnan ninf G_FABS %val1
+    %val2:_(s32) = G_FCONSTANT float 3.0
+    %vec:_(<2 x s32>) = G_BUILD_VECTOR %fabs, %val2
+    %copy_vec:_(<2 x s32>) = COPY %vec
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassConcatVec) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %vec1:_(<2 x s32>) = G_LOAD %ptr(p0) :: (load (<2 x s32>))
+    %c1:_(s32) = G_FCONSTANT float 1.0
+    %c2:_(s32) = G_FCONSTANT float 2.0
+    %vec2:_(<2 x s32>) = G_BUILD_VECTOR %c1, %c2
+    %fabs1:_(<2 x s32>) = nnan ninf G_FABS %vec1
+    %fabs2:_(<2 x s32>) = nnan ninf G_FABS %vec2
+    %cat:_(<4 x s32>) = G_CONCAT_VECTORS %fabs1, %fabs2
+    %copy_cat:_(<4 x s32>) = COPY %cat
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassVecExtractElem) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %vec:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
+    %fabs:_(<4 x s32>) = nnan ninf G_FABS %vec
+    %idx:_(s64) = G_CONSTANT i64 1
+    %extract:_(s32) = G_EXTRACT_VECTOR_ELT %fabs, %idx
+    %copy_elem:_(s32) = COPY %extract
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
+
+TEST_F(AArch64GISelMITest, TestFPClassVecInsertElem) {
+  StringRef MIRString = R"(
+    %ptr:_(p0) = G_IMPLICIT_DEF
+    %vec:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
+    %fabs1:_(<4 x s32>) = nnan ninf G_FABS %vec
+    %elem:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    %fabs2:_(s32) = nnan ninf G_FABS %elem
+    %idx:_(s64) = G_CONSTANT i64 1
+    %insert:_(<4 x s32>) = G_INSERT_VECTOR_ELT %fabs1, %fabs2, %idx
+    %copy_insert:_(<4 x s32>) = COPY %insert
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    GTEST_SKIP();
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  GISelValueTracking Info(*MF);
+
+  KnownFPClass Known = Info.computeKnownFPClass(SrcReg);
+
+  EXPECT_EQ(fcPosFinite, Known.KnownFPClasses);
+  EXPECT_EQ(false, Known.SignBit);
+}
\ No newline at end of file

>From 4cabeab721eb8bb78c16b1d43add40dcd0634ec8 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Mon, 7 Apr 2025 12:14:10 +0000
Subject: [PATCH 3/6] remove unused funcs

---
 llvm/include/llvm/CodeGen/GlobalISel/Utils.h | 15 -------------
 llvm/lib/CodeGen/GlobalISel/Utils.cpp        | 23 --------------------
 2 files changed, 38 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index f6101d5d589d2..4b8f577019d6e 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -183,10 +183,6 @@ std::optional<APInt> getIConstantVRegVal(Register VReg,
 std::optional<int64_t> getIConstantVRegSExtVal(Register VReg,
                                                const MachineRegisterInfo &MRI);
 
-/// If \p VReg is defined by a G_CONSTANT fits in uint64_t returns it.
-std::optional<uint64_t> getIConstantVRegZExtVal(Register VReg,
-                                                const MachineRegisterInfo &MRI);
-
 /// \p VReg is defined by a G_CONSTANT, return the corresponding value.
 const APInt &getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI);
 
@@ -442,17 +438,6 @@ std::optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
 std::optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
                                                 const MachineRegisterInfo &MRI);
 
-/// \returns the scalar sign extended integral splat value of \p Reg if
-/// possible.
-std::optional<uint64_t>
-getIConstantSplatZExtVal(const Register Reg, const MachineRegisterInfo &MRI);
-
-/// \returns the scalar sign extended integral splat value defined by \p MI if
-/// possible.
-std::optional<uint64_t>
-getIConstantSplatZExtVal(const MachineInstr &MI,
-                         const MachineRegisterInfo &MRI);
-
 /// Returns a floating point scalar constant of a build vector splat if it
 /// exists. When \p AllowUndef == true some elements can be undef but not all.
 std::optional<FPValueAndVReg> getFConstantSplat(Register VReg,
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index d37b4821da407..223d69c362185 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -318,14 +318,6 @@ llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
   return std::nullopt;
 }
 
-std::optional<uint64_t>
-llvm::getIConstantVRegZExtVal(Register VReg, const MachineRegisterInfo &MRI) {
-  std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
-  if (Val && Val->getBitWidth() <= 64)
-    return Val->getZExtValue();
-  return std::nullopt;
-}
-
 namespace {
 
 // This function is used in many places, and as such, it has some
@@ -1437,21 +1429,6 @@ llvm::getIConstantSplatSExtVal(const MachineInstr &MI,
   return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
 }
 
-std::optional<uint64_t>
-llvm::getIConstantSplatZExtVal(const Register Reg,
-                               const MachineRegisterInfo &MRI) {
-  if (auto SplatValAndReg =
-          getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
-    return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
-  return std::nullopt;
-}
-
-std::optional<uint64_t>
-llvm::getIConstantSplatZExtVal(const MachineInstr &MI,
-                               const MachineRegisterInfo &MRI) {
-  return getIConstantSplatZExtVal(MI.getOperand(0).getReg(), MRI);
-}
-
 std::optional<FPValueAndVReg>
 llvm::getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI,
                         bool AllowUndef) {

>From a93a018af2d62a3c75b220320f21f4234ebc0453 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Mon, 7 Apr 2025 12:20:30 +0000
Subject: [PATCH 4/6] fix format

---
 llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
index 9428d1d1babf6..35ea98aa81d8e 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
@@ -2010,7 +2010,7 @@ void GISelValueTracking::computeKnownFPClass(Register R,
     break;
   }
   case TargetOpcode::G_SHUFFLE_VECTOR: {
-    // For undef elements, we don't know anything about the common state of
+    // For undefined elements, we don't know anything about the common state of
     // the shuffle result.
     GShuffleVector &Shuf = cast<GShuffleVector>(MI);
     APInt DemandedLHS, DemandedRHS;

>From fe33a0f1db8b7c47c86c4ef970a06e5d7fa45208 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Mon, 7 Apr 2025 17:21:04 +0200
Subject: [PATCH 5/6] Update
 llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h

Co-authored-by: Matt Arsenault <arsenm2 at gmail.com>
---
 llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index 5387a88f385c1..31881171fd792 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -783,10 +783,7 @@ struct ClassifyOp_match {
 
     FPClassTest TmpClass =
         static_cast<FPClassTest>(TmpMI->getOperand(2).getImm());
-    if (T.match(MRI, TmpClass))
-      return true;
-
-    return false;
+    return T.match(MRI, TmpClass);
   }
 };
 

>From 10d90353b6ac23a9008b4bacf3210948e5b90528 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Mon, 7 Apr 2025 17:21:30 +0200
Subject: [PATCH 6/6] Update llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp

Co-authored-by: Matt Arsenault <arsenm2 at gmail.com>
---
 llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
index 35ea98aa81d8e..5382abd1fce1b 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
@@ -129,10 +129,10 @@ LLVM_ATTRIBUTE_UNUSED static void dumpKnownBitsResult(const MachineInstr &MI,
 LLVM_ATTRIBUTE_UNUSED static void
 dumpKnownFPClassResult(const MachineInstr &MI, const KnownFPClass &Known,
                        unsigned Depth) {
-  dbgs() << "[" << Depth << "] Compute known FP class: " << MI << "[" << Depth
-         << "] Computed for: " << MI << "[" << Depth
-         << "] KnownFPClasses: " << Known.KnownFPClasses << "\n"
-         << "[" << Depth << "] SignBit: " << Known.SignBit << "\n";
+  dbgs() << '[' << Depth << "] Compute known FP class: " << MI << '[' << Depth
+         << "] Computed for: " << MI << '[' << Depth
+         << "] KnownFPClasses: " << Known.KnownFPClasses << '\n'
+         << '[' << Depth << "] SignBit: " << Known.SignBit << '\n';
 }
 
 /// Compute known bits for the intersection of \p Src0 and \p Src1



More information about the llvm-commits mailing list