[llvm] [X86][GlobalIsel] Support IS_FP_CLASS intrinsic 1/4 (PR #148801)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 15 01:23:47 PDT 2025


https://github.com/mahesh-attarde updated https://github.com/llvm/llvm-project/pull/148801

>From 9039a1ef60b8f4752ce20bd0e8b8a2a58249fdb0 Mon Sep 17 00:00:00 2001
From: mattarde <mattarde at intel.com>
Date: Tue, 15 Jul 2025 00:48:12 -0700
Subject: [PATCH 1/2] [X86][GlobalIsel] Support IS_FP_CLASS intrinsic 1/4

---
 llvm/lib/CodeGen/LowLevelTypeUtils.cpp        |   2 +
 .../lib/Target/X86/GISel/X86LegalizerInfo.cpp | 230 +++++++++++++
 llvm/lib/Target/X86/GISel/X86LegalizerInfo.h  |   6 +
 llvm/test/CodeGen/X86/fpclass.ll              | 323 ++++++++++++++++++
 4 files changed, 561 insertions(+)
 create mode 100644 llvm/test/CodeGen/X86/fpclass.ll

diff --git a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
index 936c9fbb2fff0..226119384140e 100644
--- a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
+++ b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
@@ -80,6 +80,8 @@ const llvm::fltSemantics &llvm::getFltSemanticForLLT(LLT Ty) {
     return APFloat::IEEEsingle();
   case 64:
     return APFloat::IEEEdouble();
+  case 80:
+    return APFloat::x87DoubleExtended();
   case 128:
     return APFloat::IEEEquad();
   }
diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
index 7fe58539cd4ec..642104e447aac 100644
--- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
+++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
@@ -13,6 +13,7 @@
 #include "X86LegalizerInfo.h"
 #include "X86Subtarget.h"
 #include "X86TargetMachine.h"
+#include "llvm/CodeGen/CodeGenCommonISel.h"
 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -579,6 +580,7 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
   getActionDefinitionsBuilder({G_DYN_STACKALLOC, G_STACKSAVE, G_STACKRESTORE})
       .lower();
 
+  getActionDefinitionsBuilder(G_IS_FPCLASS).custom();
   // fp intrinsics
   getActionDefinitionsBuilder(G_INTRINSIC_ROUNDEVEN)
       .scalarize(0)
@@ -616,6 +618,8 @@ bool X86LegalizerInfo::legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI,
     return legalizeFPTOSI(MI, MRI, Helper);
   case TargetOpcode::G_GET_ROUNDING:
     return legalizeGETROUNDING(MI, MRI, Helper);
+  case TargetOpcode::G_IS_FPCLASS:
+    return legalizeIsFPClass(MI, MRI, Helper);
   }
   llvm_unreachable("expected switch to return");
 }
@@ -853,10 +857,236 @@ bool X86LegalizerInfo::legalizeGETROUNDING(MachineInstr &MI,
   auto RetValTrunc = MIRBuilder.buildZExtOrTrunc(DstTy, RetVal);
 
   MIRBuilder.buildCopy(Dst, RetValTrunc);
+  MI.eraseFromParent();
+  return true;
+}
+
+bool X86LegalizerInfo::expandFPClassTestForF32OrF64(
+    MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const {
+  MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
+  auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
+  FPClassTest Test = static_cast<FPClassTest>(MI.getOperand(2).getImm());
+  assert(!SrcTy.isVector() && "G_IS_FPCLASS does not support vectors yet");
+  const fltSemantics &Semantics = getFltSemanticForLLT(SrcTy.getScalarType());
+
+  // Some checks may be represented as inversion of simpler check, for example
+  // "inf|normal|subnormal|zero" => !"nan".
+  bool IsInverted = false;
+
+  if (FPClassTest InvertedCheck = invertFPClassTestIfSimpler(Test, false)) {
+    Test = InvertedCheck;
+    IsInverted = true;
+  }
+
+  // In the general case use integer operations.
+  unsigned BitSize = SrcTy.getScalarSizeInBits();
+  LLT IntVT = LLT::scalar(BitSize);
+  // MachineInstrBuilder OpAsInt = MIRBuilder.buildBitcast(IntVT, SrcReg);
+  MachineInstrBuilder OpAsInt = MIRBuilder.buildCopy(IntVT, SrcReg);
+
+  // Various Mask
+  APInt SignMask = APInt::getSignMask(BitSize);
+  APInt ValueMask = APInt::getSignedMaxValue(BitSize);
+  APInt Inf = APFloat::getInf(Semantics).bitcastToAPInt();
+  APInt InfPlus1 = Inf + 1;
+  APInt ExpMask = Inf;
+  APInt AllOneMantissa = APFloat::getLargest(Semantics).bitcastToAPInt() & ~Inf;
+  APInt QNaNBitMask =
+      APInt::getOneBitSet(BitSize, AllOneMantissa.getActiveBits() - 1);
+  APInt InvertionMask = APInt::getAllOnes(DstTy.getScalarSizeInBits());
+
+  auto ValueMaskV = MIRBuilder.buildConstant(IntVT, ValueMask);
+  auto SignBitV = MIRBuilder.buildConstant(IntVT, SignMask);
+  auto ExpMaskV = MIRBuilder.buildConstant(IntVT, ExpMask);
+  auto ZeroV = MIRBuilder.buildConstant(IntVT, 0);
+  auto InfV = MIRBuilder.buildConstant(IntVT, Inf);
+  auto InfPlus1V = MIRBuilder.buildConstant(IntVT, InfPlus1);
+  auto ResultInvertedV = MIRBuilder.buildConstant(DstTy, InvertionMask);
+
+  MachineInstrBuilder Res;
+  const auto appendResult = [&](MachineInstrBuilder &PartialRes) {
+    if (PartialRes.getInstr()) {
+      if (Res.getInstr()) {
+        Res = MIRBuilder.buildOr(DstTy, Res, PartialRes);
+      } else {
+        Res = PartialRes;
+      }
+    }
+  };
+  // Split the value into sign bit and absolute value.
+  auto AbsV = MIRBuilder.buildAnd(IntVT, OpAsInt, ValueMaskV);
+  auto SignVDestReg = MRI.createGenericVirtualRegister(LLT::scalar(1));
+  auto SignV =
+      MIRBuilder.buildICmp(CmpInst::ICMP_SLT, SignVDestReg, OpAsInt, ZeroV);
+
+  // Tests that involve more than one class should be processed first.
+  MachineInstrBuilder PartialRes;
+
+  if ((Test & fcFinite) == fcFinite) {
+    // finite(V) ==> abs(V) < exp_mask
+    PartialRes = MIRBuilder.buildICmp(
+        IsInverted ? CmpInst::ICMP_SGE : CmpInst::ICMP_SLT,
+        MRI.createGenericVirtualRegister(LLT::scalar(1)), AbsV, ExpMaskV);
+    Test &= ~fcFinite;
+  } else if ((Test & fcFinite) == fcPosFinite) {
+    // finite(V) && V > 0 ==> V < exp_mask
+    PartialRes = MIRBuilder.buildICmp(
+        IsInverted ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT,
+        MRI.createGenericVirtualRegister(LLT::scalar(1)), OpAsInt, ExpMaskV);
+    Test &= ~fcPosFinite;
+  } else if ((Test & fcFinite) == fcNegFinite) {
+    // finite(V) && V < 0 ==> abs(V) < exp_mask && signbit == 1
+    auto PartialResPart = MIRBuilder.buildICmp(
+        CmpInst::ICMP_SLT, MRI.createGenericVirtualRegister(LLT::scalar(1)),
+        AbsV, ExpMaskV);
+    PartialRes = MIRBuilder.buildAnd(LLT::scalar(1), PartialResPart, SignV);
+    Test &= ~fcNegFinite;
+  }
+  appendResult(PartialRes);
+
+  if (FPClassTest PartialCheck = Test & (fcZero | fcSubnormal)) {
+    // fcZero | fcSubnormal => test all exponent bits are 0
+    // TODO: Handle sign bit specific cases
+    if (PartialCheck == (fcZero | fcSubnormal)) {
+      auto ExpBits = MIRBuilder.buildAnd(IntVT, OpAsInt, ExpMaskV);
+      auto ExpIsZero = MIRBuilder.buildICmp(
+          CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)),
+          ExpBits, ZeroV);
+      appendResult(ExpIsZero);
+      Test &= ~PartialCheck & fcAllFlags;
+    }
+  }
+
+  // Check for individual classes.
+  if (unsigned PartialCheck = Test & fcZero) {
+    if (PartialCheck == fcPosZero)
+      PartialRes = MIRBuilder.buildICmp(
+          CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)),
+          OpAsInt, ZeroV);
+    else if (PartialCheck == fcZero)
+      PartialRes = MIRBuilder.buildICmp(
+          CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)),
+          AbsV, ZeroV);
+    else // ISD::fcNegZero
+      PartialRes = MIRBuilder.buildICmp(
+          CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)),
+          OpAsInt, SignBitV);
+    appendResult(PartialRes);
+  }
+  if (unsigned PartialCheck = Test & fcSubnormal) {
+    assert("Not Supported yet!");
+  }
+  if (unsigned PartialCheck = Test & fcInf) {
+    if (PartialCheck == fcPosInf)
+      PartialRes = MIRBuilder.buildICmp(
+          IsInverted ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ,
+          MRI.createGenericVirtualRegister(LLT::scalar(1)), OpAsInt, InfV);
+    else if (PartialCheck == fcInf)
+      PartialRes = MIRBuilder.buildICmp(
+          IsInverted ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ,
+          MRI.createGenericVirtualRegister(LLT::scalar(1)), AbsV, InfV);
+    else { // ISD::fcNegInf
+      APInt NegInf = APFloat::getInf(Semantics, true).bitcastToAPInt();
+      auto NegInfV = MIRBuilder.buildConstant(IntVT, NegInf);
+      PartialRes = MIRBuilder.buildICmp(
+          IsInverted ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ,
+          MRI.createGenericVirtualRegister(LLT::scalar(1)), OpAsInt, NegInfV);
+    }
+    MIRBuilder.buildCopy(DstReg, PartialRes);
+    MI.eraseFromParent();
+    return true;
+  }
+  if (unsigned PartialCheck = Test & fcNan) {
+    APInt InfWithQnanBit = Inf | QNaNBitMask;
+    auto InfWithQnanBitV = MIRBuilder.buildConstant(IntVT, InfWithQnanBit);
+    if (PartialCheck == fcNan) {
+      // isnan(V) ==> abs(V) > int(inf)
+      auto AbsDstReg = MRI.createGenericVirtualRegister(LLT::scalar(BitSize));
+      auto FAbsV = MIRBuilder.buildCopy(AbsDstReg, SrcReg);
+      auto InfVDstReg = MRI.createGenericVirtualRegister(LLT::scalar(BitSize));
+      PartialRes = MIRBuilder.buildFCmp(
+          CmpInst::FCMP_UEQ, MRI.createGenericVirtualRegister(LLT::scalar(1)),
+          FAbsV, FAbsV);
+    } else if (PartialCheck == fcQNan) {
+      // isquiet(V) ==> abs(V) >= (unsigned(Inf) | quiet_bit)
+      PartialRes = MIRBuilder.buildICmp(
+          IsInverted ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGE,
+          MRI.createGenericVirtualRegister(LLT::scalar(1)), AbsV,
+          InfWithQnanBitV);
+
+    } else { // ISD::fcSNan
+      // issignaling(V) ==> abs(V) > unsigned(Inf) &&
+      //                    abs(V) < (unsigned(Inf) | quiet_bit)
+      auto IsNotQnan = MIRBuilder.buildICmp(
+          CmpInst::ICMP_SLT, MRI.createGenericVirtualRegister(LLT::scalar(1)),
+          AbsV, InfWithQnanBitV);
+      auto IsNan = MIRBuilder.buildICmp(
+          CmpInst::ICMP_SGE, MRI.createGenericVirtualRegister(LLT::scalar(1)),
+          AbsV, InfPlus1V);
+      PartialRes = MIRBuilder.buildAnd(LLT::scalar(1), IsNan, IsNotQnan);
+    }
+    MIRBuilder.buildCopy(DstReg, PartialRes);
+    MI.eraseFromParent();
+    return true;
+  }
+  if (unsigned PartialCheck = Test & fcNormal) {
+    assert("Not Supported yet!");
+  }
+  if (unsigned PartialCheck = Test & fcSubnormal) {
+    // subnormal(V) ==> abs(V) < exp_mask && signbit == 0
+    auto ExpBits = MIRBuilder.buildAnd(IntVT, OpAsInt, ExpMaskV);
+    auto ExpIsZero = MIRBuilder.buildICmp(
+        CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)),
+        ExpBits, ZeroV);
+    auto SignBit = MIRBuilder.buildICmp(
+        CmpInst::ICMP_EQ, MRI.createGenericVirtualRegister(LLT::scalar(1)),
+        SignV, ZeroV);
+    PartialRes = MIRBuilder.buildAnd(LLT::scalar(1), ExpIsZero, SignBit);
+    appendResult(PartialRes);
+  }
+  if (!Res.getInstr()) {
+    Res = MIRBuilder.buildConstant(LLT::scalar(1), IsInverted);
+    MIRBuilder.buildCopy(DstReg, Res);
+    MI.eraseFromParent();
+    return true;
+  }
 
+  MIRBuilder.buildCopy(DstReg, Res);
   MI.eraseFromParent();
   return true;
 }
+bool X86LegalizerInfo::expandFPClassTestForF80(MachineInstr &MI,
+                                               MachineRegisterInfo &MRI,
+                                               LegalizerHelper &Helper) const {
+  return false;
+}
+
+bool X86LegalizerInfo::legalizeIsFPClass(MachineInstr &MI,
+                                         MachineRegisterInfo &MRI,
+                                         LegalizerHelper &Helper) const {
+  MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
+  auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
+  assert(!SrcTy.isVector() && "G_IS_FPCLASS does not support vectors yet");
+
+  FPClassTest Mask = static_cast<FPClassTest>(MI.getOperand(2).getImm());
+  if (Mask == fcNone) {
+    MIRBuilder.buildConstant(DstReg, 0);
+    MI.eraseFromParent();
+    return true;
+  }
+  if (Mask == fcAllFlags) {
+    MIRBuilder.buildConstant(DstReg, 1);
+    MI.eraseFromParent();
+    return true;
+  }
+  bool IsF80 = (SrcTy == LLT::scalar(80));
+  // For f32/f64/f80 if NoFpException is set, we can use the FCMP
+  // Some checks can be implemented using float comparisons, if floating point
+  // exceptions are ignored.
+
+  if (IsF80)
+    return expandFPClassTestForF80(MI, MRI, Helper);
+}
 
 bool X86LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
                                          MachineInstr &MI) const {
diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h
index 0003552d70ee0..107dd1c8af605 100644
--- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h
+++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h
@@ -57,6 +57,12 @@ class X86LegalizerInfo : public LegalizerInfo {
 
   bool legalizeGETROUNDING(MachineInstr &MI, MachineRegisterInfo &MRI,
                            LegalizerHelper &Helper) const;
+  bool expandFPClassTestForF32OrF64(MachineInstr &MI, MachineRegisterInfo &MRI,
+                                    LegalizerHelper &Helper) const;
+  bool expandFPClassTestForF80(MachineInstr &MI, MachineRegisterInfo &MRI,
+                               LegalizerHelper &Helper) const;
+  bool legalizeIsFPClass(MachineInstr &MI, MachineRegisterInfo &MRI,
+                         LegalizerHelper &Helper) const;
 };
 } // namespace llvm
 #endif
diff --git a/llvm/test/CodeGen/X86/fpclass.ll b/llvm/test/CodeGen/X86/fpclass.ll
new file mode 100644
index 0000000000000..80c3d579871aa
--- /dev/null
+++ b/llvm/test/CodeGen/X86/fpclass.ll
@@ -0,0 +1,323 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=i686-linux | FileCheck %s -check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefixes=X64
+; RUN: llc < %s -mtriple=i686-linux -global-isel | FileCheck %s -check-prefixes=X86,X86-GISEL
+; RUN: llc < %s -mtriple=x86_64-linux -global-isel | FileCheck %s -check-prefixes=X64,X64-GISEL
+
+; FIXME: We can reuse llvm/test/CodeGen/X86/is_fpclass.ll when all patches are included.
+
+define i1 @is_fcNone_f32(float %x) nounwind {
+; X86-LABEL: is_fcNone_f32:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: is_fcNone_f32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    retq
+entry:
+    %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 0)
+    ret i1 %0
+}
+
+define i1 @is_fcAllFlags_f32(float %x) nounwind {
+; X86-LABEL: is_fcAllFlags_f32:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movb $1, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: is_fcAllFlags_f32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movb $1, %al
+; X64-NEXT:    retq
+entry:
+    %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1023)
+    ret i1 %0
+}
+
+define i1 @issignaling_f(float %x) {
+; X64-LABEL: issignaling_f:
+; X64:       # %bb.0:
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    cmpl $2143289344, %eax # imm = 0x7FC00000
+; X64-NEXT:    setl %cl
+; X64-NEXT:    cmpl $2139095041, %eax # imm = 0x7F800001
+; X64-NEXT:    setge %al
+; X64-NEXT:    andb %cl, %al
+; X64-NEXT:    retq
+;
+; X86-GISEL-LABEL: issignaling_f:
+; X86-GISEL:       # %bb.0:
+; X86-GISEL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT:    cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-GISEL-NEXT:    setl %cl
+; X86-GISEL-NEXT:    cmpl $2139095041, %eax # imm = 0x7F800001
+; X86-GISEL-NEXT:    setge %al
+; X86-GISEL-NEXT:    andb %cl, %al
+; X86-GISEL-NEXT:    retl
+   %a0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1)  ; "snan"
+   ret i1 %a0
+}
+
+ define i1 @isquiet_f(float %x) {
+; X64-LABEL: isquiet_f:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    cmpl $2143289344, %eax # imm = 0x7FC00000
+; X64-NEXT:    setge %al
+; X64-NEXT:    retq
+;
+; X86-GISEL-LABEL: isquiet_f:
+; X86-GISEL:       # %bb.0: # %entry
+; X86-GISEL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT:    cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-GISEL-NEXT:    setge %al
+; X86-GISEL-NEXT:    retl
+ entry:
+   %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 2)  ; "qnan"
+   ret i1 %0
+}
+
+define i1 @not_isquiet_f(float %x) {
+; X64-LABEL: not_isquiet_f:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    cmpl $2143289344, %eax # imm = 0x7FC00000
+; X64-NEXT:    setl %al
+; X64-NEXT:    retq
+;
+; X86-GISEL-LABEL: not_isquiet_f:
+; X86-GISEL:       # %bb.0: # %entry
+; X86-GISEL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT:    cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-GISEL-NEXT:    setl %al
+; X86-GISEL-NEXT:    retl
+entry:
+  %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1021)  ; ~"qnan"
+  ret i1 %0
+}
+
+define i1 @isinf_f(float %x) {
+; X64-LABEL: isinf_f:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+;
+; X86-GISEL-LABEL: isinf_f:
+; X86-GISEL:       # %bb.0: # %entry
+; X86-GISEL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT:    sete %al
+; X86-GISEL-NEXT:    retl
+entry:
+  %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 516)  ; 0x204 = "inf"
+  ret i1 %0
+}
+
+define i1 @not_isinf_f(float %x) {
+; X64-LABEL: not_isinf_f:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+;
+; X86-GISEL-LABEL: not_isinf_f:
+; X86-GISEL:       # %bb.0: # %entry
+; X86-GISEL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT:    setne %al
+; X86-GISEL-NEXT:    retl
+entry:
+  %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 507)  ; ~0x204 = "~inf"
+  ret i1 %0
+}
+
+define i1 @is_plus_inf_f(float %x) {
+; X86-LABEL: is_plus_inf_f:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: is_plus_inf_f:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+entry:
+  %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 512)  ; 0x200 = "+inf"
+  ret i1 %0
+}
+
+define i1 @is_minus_inf_f(float %x) {
+; X86-LABEL: is_minus_inf_f:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: is_minus_inf_f:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    cmpl $-8388608, %eax # imm = 0xFF800000
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+entry:
+  %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 4)  ; "-inf"
+  ret i1 %0
+}
+
+define i1 @not_is_minus_inf_f(float %x) {
+; X86-LABEL: not_is_minus_inf_f:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: not_is_minus_inf_f:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    cmpl $-8388608, %eax # imm = 0xFF800000
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+entry:
+  %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1019)  ; ~"-inf"
+  ret i1 %0
+}
+
+define i1 @isfinite_f(float %x) {
+; X64-LABEL: isfinite_f:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-NEXT:    setl %al
+; X64-NEXT:    retq
+;
+; X86-GISEL-LABEL: isfinite_f:
+; X86-GISEL:       # %bb.0: # %entry
+; X86-GISEL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT:    setl %al
+; X86-GISEL-NEXT:    retl
+entry:
+  %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 504)  ; 0x1f8 = "finite"
+  ret i1 %0
+}
+
+define i1 @not_isfinite_f(float %x) {
+; X64-LABEL: not_isfinite_f:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-NEXT:    setge %al
+; X64-NEXT:    retq
+;
+; X86-GISEL-LABEL: not_isfinite_f:
+; X86-GISEL:       # %bb.0: # %entry
+; X86-GISEL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT:    setge %al
+; X86-GISEL-NEXT:    retl
+entry:
+  %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 519)  ; ~0x1f8 = "~finite"
+  ret i1 %0
+}
+
+define i1 @is_plus_finite_f(float %x) {
+; X86-LABEL: is_plus_finite_f:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: is_plus_finite_f:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+entry:
+  %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 448)  ; 0x1c0 = "+finite"
+  ret i1 %0
+}
+
+define i1 @is_fcNone_f64(double %x) nounwind {
+; X86-LABEL: is_fcNone_f64:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: is_fcNone_f64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    retq
+entry:
+    %0 = tail call i1 @llvm.is.fpclass.f64(double %x, i32 0)
+    ret i1 %0
+}
+
+define i1 @is_fcAllFlags_f64(double %x) nounwind {
+; X86-LABEL: is_fcAllFlags_f64:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movb $1, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: is_fcAllFlags_f64:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movb $1, %al
+; X64-NEXT:    retq
+entry:
+    %0 = tail call i1 @llvm.is.fpclass.f64(double %x, i32 1023)
+    ret i1 %0
+}
+
+define i1 @is_fcNone_f80(x86_fp80 %x) nounwind {
+; X86-LABEL: is_fcNone_f80:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: is_fcNone_f80:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    retq
+entry:
+%0 = tail call i1 @llvm.is.fpclass.f80(x86_fp80 %x, i32 0)
+ret i1 %0
+}
+
+define i1 @is_fcAllFlags_f80(x86_fp80 %x) nounwind {
+; X86-LABEL: is_fcAllFlags_f80:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movb $1, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: is_fcAllFlags_f80:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movb $1, %al
+; X64-NEXT:    retq
+entry:
+    %0 = tail call i1 @llvm.is.fpclass.f80(x86_fp80 %x, i32 1023)
+    ret i1 %0
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X64-GISEL: {{.*}}

>From 30b57ce2c483aae77b14329a04e8b70bbc5bf93b Mon Sep 17 00:00:00 2001
From: mattarde <mattarde at intel.com>
Date: Tue, 15 Jul 2025 01:23:32 -0700
Subject: [PATCH 2/2] add f32

---
 llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp | 1 +
 1 file changed, 1 insertion(+)

diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
index 642104e447aac..344ff71bdd27a 100644
--- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
+++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
@@ -1086,6 +1086,7 @@ bool X86LegalizerInfo::legalizeIsFPClass(MachineInstr &MI,
 
   if (IsF80)
     return expandFPClassTestForF80(MI, MRI, Helper);
+  return  expandFPClassTestForF32OrF64(MI, MRI, Helper);
 }
 
 bool X86LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,



More information about the llvm-commits mailing list