[llvm] DAG: Handle lowering unordered compare with inf (PR #100378)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 21 23:17:45 PDT 2024


https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/100378

>From 99b4eb607fc34ae87cd683fd379c0894e0112d02 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 21 Aug 2024 19:46:20 +0400
Subject: [PATCH 1/4] PPC: Custom lower ppcf128 is_fpclass if is_fpclass is
 custom

Unfortunately expandIS_FPCLASS is called directly in SelectionDAGBuilder
depending on whether IS_FPCLASS is custom or not. This helps avoid ppc test
regressions in a future patch where the custom lowering would be bypassed.
---
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 459a96eca1ff20..a5bc24b55660a5 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -1221,6 +1221,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
         setOperationAction(ISD::IS_FPCLASS, MVT::f32, Custom);
         setOperationAction(ISD::IS_FPCLASS, MVT::f64, Custom);
         setOperationAction(ISD::IS_FPCLASS, MVT::f128, Custom);
+        setOperationAction(ISD::IS_FPCLASS, MVT::ppcf128, Custom);
       }
 
       // 128 bit shifts can be accomplished via 3 instructions for SHL and
@@ -11479,6 +11480,12 @@ SDValue PPCTargetLowering::LowerIS_FPCLASS(SDValue Op,
   uint64_t RHSC = Op.getConstantOperandVal(1);
   SDLoc Dl(Op);
   FPClassTest Category = static_cast<FPClassTest>(RHSC);
+  if (LHS.getValueType() == MVT::ppcf128) {
+    // The higher part determines the value class.
+    LHS = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::f64, LHS,
+                      DAG.getConstant(1, Dl, MVT::i32));
+  }
+
   return getDataClassTest(LHS, Category, Dl, DAG, Subtarget);
 }
 

>From b57fb07c93a8052805110626786a8e242213c983 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 21 Aug 2024 20:15:55 +0400
Subject: [PATCH 2/4] DAG: Check if is_fpclass is custom, instead of
 isLegalOrCustom

For some reason, isOperationLegalOrCustom is not the same as
isOperationLegal || isOperationCustom. Unfortunately, it checks
if the type is legal which makes it uesless for custom lowering
on non-legal types (which is always ppcf128).

Really the DAG builder shouldn't be going to expand this in the
builder, it makes it difficult to work with. It's only here to work
around the DAG requiring legal integer types the same size as
the FP type after type legalization.
---
 .../SelectionDAG/SelectionDAGBuilder.cpp      |   3 +-
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp |  17 +-
 llvm/test/CodeGen/AMDGPU/fract-match.ll       |  10 +-
 .../CodeGen/AMDGPU/llvm.is.fpclass.f16.ll     | 205 +++++++++++-------
 llvm/test/CodeGen/PowerPC/is_fpclass.ll       |  37 ++--
 5 files changed, 160 insertions(+), 112 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 60dcb118542785..09a3def6586493 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -7032,7 +7032,8 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     // If ISD::IS_FPCLASS should be expanded, do it right now, because the
     // expansion can use illegal types. Making expansion early allows
     // legalizing these types prior to selection.
-    if (!TLI.isOperationLegalOrCustom(ISD::IS_FPCLASS, ArgVT)) {
+    if (!TLI.isOperationLegal(ISD::IS_FPCLASS, ArgVT) &&
+        !TLI.isOperationCustom(ISD::IS_FPCLASS, ArgVT)) {
       SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
       setValue(&I, Result);
       return;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index e57c8f8b7b4835..866e04bcc7fb2d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -426,12 +426,17 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
   // FIXME: These IS_FPCLASS vector fp types are marked custom so it reaches
   // scalarization code. Can be removed when IS_FPCLASS expand isn't called by
   // default unless marked custom/legal.
-  setOperationAction(
-      ISD::IS_FPCLASS,
-      {MVT::v2f16, MVT::v3f16, MVT::v4f16, MVT::v16f16, MVT::v2f32, MVT::v3f32,
-       MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32,
-       MVT::v2f64, MVT::v3f64, MVT::v4f64, MVT::v8f64, MVT::v16f64},
-      Custom);
+  setOperationAction(ISD::IS_FPCLASS,
+                     {MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32,
+                      MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32,
+                      MVT::v2f64, MVT::v3f64, MVT::v4f64, MVT::v8f64,
+                      MVT::v16f64},
+                     Custom);
+
+  if (isTypeLegal(MVT::f16))
+    setOperationAction(ISD::IS_FPCLASS,
+                       {MVT::v2f16, MVT::v3f16, MVT::v4f16, MVT::v16f16},
+                       Custom);
 
   // Expand to fneg + fadd.
   setOperationAction(ISD::FSUB, MVT::f64, Expand);
diff --git a/llvm/test/CodeGen/AMDGPU/fract-match.ll b/llvm/test/CodeGen/AMDGPU/fract-match.ll
index 1b28ddb2c58620..b212b9caf8400e 100644
--- a/llvm/test/CodeGen/AMDGPU/fract-match.ll
+++ b/llvm/test/CodeGen/AMDGPU/fract-match.ll
@@ -2135,16 +2135,16 @@ define <2 x half> @safe_math_fract_v2f16(<2 x half> %x, ptr addrspace(1) nocaptu
 ; GFX8-LABEL: safe_math_fract_v2f16:
 ; GFX8:       ; %bb.0: ; %entry
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_mov_b32_e32 v6, 0x204
+; GFX8-NEXT:    s_movk_i32 s6, 0x204
 ; GFX8-NEXT:    v_floor_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX8-NEXT:    v_floor_f16_e32 v4, v0
-; GFX8-NEXT:    v_cmp_class_f16_sdwa s[4:5], v0, v6 src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_fract_f16_sdwa v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX8-NEXT:    v_cmp_class_f16_sdwa s[4:5], v0, s6 src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_pack_b32_f16 v3, v4, v3
 ; GFX8-NEXT:    v_fract_f16_e32 v4, v0
-; GFX8-NEXT:    v_fract_f16_sdwa v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX8-NEXT:    v_cmp_class_f16_e32 vcc, v0, v6
 ; GFX8-NEXT:    v_cndmask_b32_e64 v5, v5, 0, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX8-NEXT:    v_cmp_class_f16_e64 s[4:5], v0, s6
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v4, 0, s[4:5]
 ; GFX8-NEXT:    v_pack_b32_f16 v0, v0, v5
 ; GFX8-NEXT:    global_store_dword v[1:2], v3, off
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.is.fpclass.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.is.fpclass.f16.ll
index 9c248bd6e8b2aa..3d8e9e60973053 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.is.fpclass.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.is.fpclass.f16.ll
@@ -959,47 +959,86 @@ define <2 x i1> @isnan_v2f16(<2 x half> %x) nounwind {
 ; GFX7GLISEL-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
 ; GFX7GLISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX8CHECK-LABEL: isnan_v2f16:
-; GFX8CHECK:       ; %bb.0:
-; GFX8CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8CHECK-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
-; GFX8CHECK-NEXT:    v_cmp_class_f16_e64 s[4:5], v0, 3
-; GFX8CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; GFX8CHECK-NEXT:    v_cmp_class_f16_e64 s[4:5], v1, 3
-; GFX8CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
-; GFX8CHECK-NEXT:    s_setpc_b64 s[30:31]
+; GFX8SELDAG-LABEL: isnan_v2f16:
+; GFX8SELDAG:       ; %bb.0:
+; GFX8SELDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8SELDAG-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; GFX8SELDAG-NEXT:    v_cmp_u_f16_e32 vcc, v1, v1
+; GFX8SELDAG-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8SELDAG-NEXT:    v_cmp_u_f16_e32 vcc, v0, v0
+; GFX8SELDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8SELDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX9CHECK-LABEL: isnan_v2f16:
-; GFX9CHECK:       ; %bb.0:
-; GFX9CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9CHECK-NEXT:    v_mov_b32_e32 v1, 3
-; GFX9CHECK-NEXT:    v_cmp_class_f16_e64 s[4:5], v0, 3
-; GFX9CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[4:5]
-; GFX9CHECK-NEXT:    v_cmp_class_f16_sdwa s[4:5], v0, v1 src0_sel:WORD_1 src1_sel:DWORD
-; GFX9CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
-; GFX9CHECK-NEXT:    v_mov_b32_e32 v0, v2
-; GFX9CHECK-NEXT:    s_setpc_b64 s[30:31]
+; GFX8GLISEL-LABEL: isnan_v2f16:
+; GFX8GLISEL:       ; %bb.0:
+; GFX8GLISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GLISEL-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; GFX8GLISEL-NEXT:    v_cmp_class_f16_e64 s[4:5], v0, 3
+; GFX8GLISEL-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8GLISEL-NEXT:    v_cmp_class_f16_e64 s[4:5], v1, 3
+; GFX8GLISEL-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX8GLISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10CHECK-LABEL: isnan_v2f16:
-; GFX10CHECK:       ; %bb.0:
-; GFX10CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10CHECK-NEXT:    v_mov_b32_e32 v1, 3
-; GFX10CHECK-NEXT:    v_cmp_class_f16_e64 s4, v0, 3
-; GFX10CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s4
-; GFX10CHECK-NEXT:    v_cmp_class_f16_sdwa s4, v0, v1 src0_sel:WORD_1 src1_sel:DWORD
-; GFX10CHECK-NEXT:    v_mov_b32_e32 v0, v2
-; GFX10CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s4
-; GFX10CHECK-NEXT:    s_setpc_b64 s[30:31]
+; GFX9SELDAG-LABEL: isnan_v2f16:
+; GFX9SELDAG:       ; %bb.0:
+; GFX9SELDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9SELDAG-NEXT:    v_cmp_u_f16_sdwa s[4:5], v0, v0 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9SELDAG-NEXT:    v_cmp_u_f16_e32 vcc, v0, v0
+; GFX9SELDAG-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX9SELDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9SELDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX11CHECK-LABEL: isnan_v2f16:
-; GFX11CHECK:       ; %bb.0:
-; GFX11CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11CHECK-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
-; GFX11CHECK-NEXT:    v_cmp_class_f16_e64 s0, v0, 3
-; GFX11CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11CHECK-NEXT:    v_cmp_class_f16_e64 s0, v1, 3
-; GFX11CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s0
-; GFX11CHECK-NEXT:    s_setpc_b64 s[30:31]
+; GFX9GLISEL-LABEL: isnan_v2f16:
+; GFX9GLISEL:       ; %bb.0:
+; GFX9GLISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9GLISEL-NEXT:    v_mov_b32_e32 v1, 3
+; GFX9GLISEL-NEXT:    v_cmp_class_f16_e64 s[4:5], v0, 3
+; GFX9GLISEL-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[4:5]
+; GFX9GLISEL-NEXT:    v_cmp_class_f16_sdwa s[4:5], v0, v1 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9GLISEL-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX9GLISEL-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9GLISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10SELDAG-LABEL: isnan_v2f16:
+; GFX10SELDAG:       ; %bb.0:
+; GFX10SELDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10SELDAG-NEXT:    v_cmp_u_f16_e32 vcc_lo, v0, v0
+; GFX10SELDAG-NEXT:    v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10SELDAG-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX10SELDAG-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s4
+; GFX10SELDAG-NEXT:    v_mov_b32_e32 v0, v2
+; GFX10SELDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10GLISEL-LABEL: isnan_v2f16:
+; GFX10GLISEL:       ; %bb.0:
+; GFX10GLISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10GLISEL-NEXT:    v_mov_b32_e32 v1, 3
+; GFX10GLISEL-NEXT:    v_cmp_class_f16_e64 s4, v0, 3
+; GFX10GLISEL-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s4
+; GFX10GLISEL-NEXT:    v_cmp_class_f16_sdwa s4, v0, v1 src0_sel:WORD_1 src1_sel:DWORD
+; GFX10GLISEL-NEXT:    v_mov_b32_e32 v0, v2
+; GFX10GLISEL-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s4
+; GFX10GLISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11SELDAG-LABEL: isnan_v2f16:
+; GFX11SELDAG:       ; %bb.0:
+; GFX11SELDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11SELDAG-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; GFX11SELDAG-NEXT:    v_cmp_u_f16_e32 vcc_lo, v0, v0
+; GFX11SELDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11SELDAG-NEXT:    v_cmp_u_f16_e32 vcc_lo, v1, v1
+; GFX11SELDAG-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX11SELDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11GLISEL-LABEL: isnan_v2f16:
+; GFX11GLISEL:       ; %bb.0:
+; GFX11GLISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11GLISEL-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; GFX11GLISEL-NEXT:    v_cmp_class_f16_e64 s0, v0, 3
+; GFX11GLISEL-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX11GLISEL-NEXT:    v_cmp_class_f16_e64 s0, v1, 3
+; GFX11GLISEL-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s0
+; GFX11GLISEL-NEXT:    s_setpc_b64 s[30:31]
   %1 = call <2 x i1> @llvm.is.fpclass.v2f16(<2 x half> %x, i32 3)  ; nan
   ret <2 x i1> %1
 }
@@ -1196,16 +1235,17 @@ define <4 x i1> @isnan_v4f16(<4 x half> %x) nounwind {
 ; GFX8SELDAG-LABEL: isnan_v4f16:
 ; GFX8SELDAG:       ; %bb.0:
 ; GFX8SELDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8SELDAG-NEXT:    v_cmp_class_f16_e64 s[4:5], v0, 3
 ; GFX8SELDAG-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
-; GFX8SELDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; GFX8SELDAG-NEXT:    v_cmp_class_f16_e64 s[4:5], v1, 3
-; GFX8SELDAG-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
-; GFX8SELDAG-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[4:5]
-; GFX8SELDAG-NEXT:    v_cmp_class_f16_e64 s[4:5], v3, 3
-; GFX8SELDAG-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
-; GFX8SELDAG-NEXT:    v_cmp_class_f16_e64 s[4:5], v4, 3
-; GFX8SELDAG-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
+; GFX8SELDAG-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
+; GFX8SELDAG-NEXT:    v_cmp_u_f16_e32 vcc, v3, v3
+; GFX8SELDAG-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX8SELDAG-NEXT:    v_cmp_u_f16_e32 vcc, v2, v2
+; GFX8SELDAG-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX8SELDAG-NEXT:    v_cmp_u_f16_e32 vcc, v0, v0
+; GFX8SELDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8SELDAG-NEXT:    v_cmp_u_f16_e32 vcc, v1, v1
+; GFX8SELDAG-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8SELDAG-NEXT:    v_mov_b32_e32 v1, v4
 ; GFX8SELDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8GLISEL-LABEL: isnan_v4f16:
@@ -1227,16 +1267,14 @@ define <4 x i1> @isnan_v4f16(<4 x half> %x) nounwind {
 ; GFX9SELDAG-LABEL: isnan_v4f16:
 ; GFX9SELDAG:       ; %bb.0:
 ; GFX9SELDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9SELDAG-NEXT:    v_cmp_class_f16_e64 s[4:5], v0, 3
-; GFX9SELDAG-NEXT:    v_mov_b32_e32 v3, 3
-; GFX9SELDAG-NEXT:    v_cndmask_b32_e64 v5, 0, 1, s[4:5]
-; GFX9SELDAG-NEXT:    v_cmp_class_f16_e64 s[4:5], v1, 3
-; GFX9SELDAG-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[4:5]
-; GFX9SELDAG-NEXT:    v_cmp_class_f16_sdwa s[4:5], v0, v3 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9SELDAG-NEXT:    v_cmp_u_f16_sdwa s[4:5], v0, v0 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9SELDAG-NEXT:    v_cmp_u_f16_e32 vcc, v0, v0
 ; GFX9SELDAG-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s[4:5]
-; GFX9SELDAG-NEXT:    v_cmp_class_f16_sdwa s[4:5], v1, v3 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9SELDAG-NEXT:    v_cmp_u_f16_sdwa s[4:5], v1, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9SELDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9SELDAG-NEXT:    v_cmp_u_f16_e32 vcc, v1, v1
 ; GFX9SELDAG-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
-; GFX9SELDAG-NEXT:    v_mov_b32_e32 v0, v5
+; GFX9SELDAG-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; GFX9SELDAG-NEXT:    v_mov_b32_e32 v1, v4
 ; GFX9SELDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -1259,16 +1297,14 @@ define <4 x i1> @isnan_v4f16(<4 x half> %x) nounwind {
 ; GFX10SELDAG-LABEL: isnan_v4f16:
 ; GFX10SELDAG:       ; %bb.0:
 ; GFX10SELDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10SELDAG-NEXT:    v_cmp_class_f16_e64 s4, v0, 3
-; GFX10SELDAG-NEXT:    v_mov_b32_e32 v3, 3
-; GFX10SELDAG-NEXT:    v_cndmask_b32_e64 v5, 0, 1, s4
-; GFX10SELDAG-NEXT:    v_cmp_class_f16_e64 s4, v1, 3
-; GFX10SELDAG-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s4
-; GFX10SELDAG-NEXT:    v_cmp_class_f16_sdwa s4, v0, v3 src0_sel:WORD_1 src1_sel:DWORD
-; GFX10SELDAG-NEXT:    v_mov_b32_e32 v0, v5
+; GFX10SELDAG-NEXT:    v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10SELDAG-NEXT:    v_cmp_u_f16_e32 vcc_lo, v0, v0
 ; GFX10SELDAG-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s4
-; GFX10SELDAG-NEXT:    v_cmp_class_f16_sdwa s4, v1, v3 src0_sel:WORD_1 src1_sel:DWORD
+; GFX10SELDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX10SELDAG-NEXT:    v_cmp_u_f16_e32 vcc_lo, v1, v1
+; GFX10SELDAG-NEXT:    v_cmp_u_f16_sdwa s4, v1, v1 src0_sel:WORD_1 src1_sel:WORD_1
 ; GFX10SELDAG-NEXT:    v_mov_b32_e32 v1, v4
+; GFX10SELDAG-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
 ; GFX10SELDAG-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX10SELDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -1288,20 +1324,35 @@ define <4 x i1> @isnan_v4f16(<4 x half> %x) nounwind {
 ; GFX10GLISEL-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX10GLISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX11CHECK-LABEL: isnan_v4f16:
-; GFX11CHECK:       ; %bb.0:
-; GFX11CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11CHECK-NEXT:    v_cmp_class_f16_e64 s0, v0, 3
-; GFX11CHECK-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
-; GFX11CHECK-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
-; GFX11CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11CHECK-NEXT:    v_cmp_class_f16_e64 s0, v1, 3
-; GFX11CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
-; GFX11CHECK-NEXT:    v_cmp_class_f16_e64 s0, v3, 3
-; GFX11CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s0
-; GFX11CHECK-NEXT:    v_cmp_class_f16_e64 s0, v4, 3
-; GFX11CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s0
-; GFX11CHECK-NEXT:    s_setpc_b64 s[30:31]
+; GFX11SELDAG-LABEL: isnan_v4f16:
+; GFX11SELDAG:       ; %bb.0:
+; GFX11SELDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11SELDAG-NEXT:    v_cmp_u_f16_e32 vcc_lo, v0, v0
+; GFX11SELDAG-NEXT:    v_lshrrev_b32_e32 v4, 16, v0
+; GFX11SELDAG-NEXT:    v_lshrrev_b32_e32 v3, 16, v1
+; GFX11SELDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11SELDAG-NEXT:    v_cmp_u_f16_e32 vcc_lo, v1, v1
+; GFX11SELDAG-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX11SELDAG-NEXT:    v_cmp_u_f16_e32 vcc_lo, v4, v4
+; GFX11SELDAG-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX11SELDAG-NEXT:    v_cmp_u_f16_e32 vcc_lo, v3, v3
+; GFX11SELDAG-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11SELDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11GLISEL-LABEL: isnan_v4f16:
+; GFX11GLISEL:       ; %bb.0:
+; GFX11GLISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11GLISEL-NEXT:    v_cmp_class_f16_e64 s0, v0, 3
+; GFX11GLISEL-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
+; GFX11GLISEL-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
+; GFX11GLISEL-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX11GLISEL-NEXT:    v_cmp_class_f16_e64 s0, v1, 3
+; GFX11GLISEL-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
+; GFX11GLISEL-NEXT:    v_cmp_class_f16_e64 s0, v3, 3
+; GFX11GLISEL-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s0
+; GFX11GLISEL-NEXT:    v_cmp_class_f16_e64 s0, v4, 3
+; GFX11GLISEL-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s0
+; GFX11GLISEL-NEXT:    s_setpc_b64 s[30:31]
   %1 = call <4 x i1> @llvm.is.fpclass.v4f16(<4 x half> %x, i32 3)  ; nan
   ret <4 x i1> %1
 }
diff --git a/llvm/test/CodeGen/PowerPC/is_fpclass.ll b/llvm/test/CodeGen/PowerPC/is_fpclass.ll
index 57f457553a5407..f88e23fe105f76 100644
--- a/llvm/test/CodeGen/PowerPC/is_fpclass.ll
+++ b/llvm/test/CodeGen/PowerPC/is_fpclass.ll
@@ -29,10 +29,10 @@ define i1 @isnan_double(double %x) nounwind {
 define i1 @isnan_ppc_fp128(ppc_fp128 %x) nounwind {
 ; CHECK-LABEL: isnan_ppc_fp128:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcmpu 0, 1, 1
+; CHECK-NEXT:    xststdcdp 0, 1, 64
 ; CHECK-NEXT:    li 3, 0
 ; CHECK-NEXT:    li 4, 1
-; CHECK-NEXT:    isel 3, 4, 3, 3
+; CHECK-NEXT:    iseleq 3, 4, 3
 ; CHECK-NEXT:    blr
   %1 = call i1 @llvm.is.fpclass.ppcf128(ppc_fp128 %x, i32 3)  ; nan
   ret i1 %1
@@ -77,14 +77,10 @@ define i1 @isnan_double_strictfp(double %x) strictfp nounwind {
 define i1 @isnan_ppc_fp128_strictfp(ppc_fp128 %x) strictfp nounwind {
 ; CHECK-LABEL: isnan_ppc_fp128_strictfp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    mffprd 3, 1
-; CHECK-NEXT:    li 4, 2047
-; CHECK-NEXT:    clrldi 3, 3, 1
-; CHECK-NEXT:    rldic 4, 4, 52, 1
-; CHECK-NEXT:    cmpd 3, 4
+; CHECK-NEXT:    xststdcdp 0, 1, 64
 ; CHECK-NEXT:    li 3, 0
 ; CHECK-NEXT:    li 4, 1
-; CHECK-NEXT:    iselgt 3, 4, 3
+; CHECK-NEXT:    iseleq 3, 4, 3
 ; CHECK-NEXT:    blr
   %1 = call i1 @llvm.is.fpclass.ppcf128(ppc_fp128 %x, i32 3) strictfp ; nan
   ret i1 %1
@@ -279,15 +275,11 @@ define i1 @issnan_ppc_fp128(ppc_fp128 %x) nounwind {
 ; CHECK-LABEL: issnan_ppc_fp128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mffprd 3, 1
-; CHECK-NEXT:    li 4, 4095
-; CHECK-NEXT:    clrldi 3, 3, 1
-; CHECK-NEXT:    rldic 4, 4, 51, 1
-; CHECK-NEXT:    cmpd 3, 4
-; CHECK-NEXT:    li 4, 2047
-; CHECK-NEXT:    rldic 4, 4, 52, 1
-; CHECK-NEXT:    cmpd 1, 3, 4
+; CHECK-NEXT:    xststdcdp 1, 1, 64
+; CHECK-NEXT:    rldicl 3, 3, 32, 32
+; CHECK-NEXT:    andis. 3, 3, 8
 ; CHECK-NEXT:    li 3, 1
-; CHECK-NEXT:    crnand 20, 5, 0
+; CHECK-NEXT:    crnand 20, 6, 2
 ; CHECK-NEXT:    isel 3, 0, 3, 20
 ; CHECK-NEXT:    blr
   %1 = call i1 @llvm.is.fpclass.ppcf128(ppc_fp128 %x, i32 1)
@@ -345,13 +337,12 @@ define i1 @isqnan_ppc_fp128(ppc_fp128 %x) nounwind {
 ; CHECK-LABEL: isqnan_ppc_fp128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mffprd 3, 1
-; CHECK-NEXT:    li 4, -17
-; CHECK-NEXT:    clrldi 3, 3, 1
-; CHECK-NEXT:    rldicl 4, 4, 47, 1
-; CHECK-NEXT:    cmpd 3, 4
-; CHECK-NEXT:    li 3, 0
-; CHECK-NEXT:    li 4, 1
-; CHECK-NEXT:    iselgt 3, 4, 3
+; CHECK-NEXT:    xststdcdp 1, 1, 64
+; CHECK-NEXT:    rldicl 3, 3, 13, 51
+; CHECK-NEXT:    andi. 3, 3, 1
+; CHECK-NEXT:    li 3, 1
+; CHECK-NEXT:    crnand 20, 6, 1
+; CHECK-NEXT:    isel 3, 0, 3, 20
 ; CHECK-NEXT:    blr
   %1 = call i1 @llvm.is.fpclass.ppcf128(ppc_fp128 %x, i32 2)
   ret i1 %1

>From 7ec4e66697b2a1b9a5f33b8fda3b5fcb26b761f8 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Tue, 7 Feb 2023 12:22:05 -0400
Subject: [PATCH 3/4] DAG: Handle lowering unordered compare with inf

Try to take advantage of the nan check behavior of fcmp.
x86_64 looks better, x86_32 looks worse.
---
 llvm/include/llvm/CodeGen/CodeGenCommonISel.h |  7 +-
 llvm/lib/CodeGen/CodeGenCommonISel.cpp        |  8 +-
 .../CodeGen/SelectionDAG/TargetLowering.cpp   | 53 +++++++------
 llvm/test/CodeGen/X86/is_fpclass.ll           | 78 +++++++++----------
 4 files changed, 83 insertions(+), 63 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/CodeGenCommonISel.h b/llvm/include/llvm/CodeGen/CodeGenCommonISel.h
index 90ef890f22d1b1..e4b2e20babc07a 100644
--- a/llvm/include/llvm/CodeGen/CodeGenCommonISel.h
+++ b/llvm/include/llvm/CodeGen/CodeGenCommonISel.h
@@ -218,10 +218,15 @@ findSplitPointForStackProtector(MachineBasicBlock *BB,
 /// Evaluates if the specified FP class test is better performed as the inverse
 /// (i.e. fewer instructions should be required to lower it).  An example is the
 /// test "inf|normal|subnormal|zero", which is an inversion of "nan".
+///
 /// \param Test The test as specified in 'is_fpclass' intrinsic invocation.
+///
+/// \param UseFCmp The intention is to perform the comparison using
+/// floating-point compare instructions which check for nan.
+///
 /// \returns The inverted test, or fcNone, if inversion does not produce a
 /// simpler test.
-FPClassTest invertFPClassTestIfSimpler(FPClassTest Test);
+FPClassTest invertFPClassTestIfSimpler(FPClassTest Test, bool UseFCmp);
 
 /// Assuming the instruction \p MI is going to be deleted, attempt to salvage
 /// debug users of \p MI by writing the effect of \p MI in a DIExpression.
diff --git a/llvm/lib/CodeGen/CodeGenCommonISel.cpp b/llvm/lib/CodeGen/CodeGenCommonISel.cpp
index fe144d3c182039..d985751e2be0be 100644
--- a/llvm/lib/CodeGen/CodeGenCommonISel.cpp
+++ b/llvm/lib/CodeGen/CodeGenCommonISel.cpp
@@ -173,8 +173,9 @@ llvm::findSplitPointForStackProtector(MachineBasicBlock *BB,
   return SplitPoint;
 }
 
-FPClassTest llvm::invertFPClassTestIfSimpler(FPClassTest Test) {
+FPClassTest llvm::invertFPClassTestIfSimpler(FPClassTest Test, bool UseFCmp) {
   FPClassTest InvertedTest = ~Test;
+
   // Pick the direction with fewer tests
   // TODO: Handle more combinations of cases that can be handled together
   switch (static_cast<unsigned>(InvertedTest)) {
@@ -200,6 +201,11 @@ FPClassTest llvm::invertFPClassTestIfSimpler(FPClassTest Test) {
   case fcSubnormal | fcZero:
   case fcSubnormal | fcZero | fcNan:
     return InvertedTest;
+  case fcInf | fcNan:
+    // If we're trying to use fcmp, we can take advantage of the nan check
+    // behavior of the compare (but this is more instructions in the integer
+    // expansion).
+    return UseFCmp ? InvertedTest : fcNone;
   default:
     return fcNone;
   }
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 4e796289cff0a1..1e3a0da0f3be5b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -8672,7 +8672,7 @@ SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op,
   // Degenerated cases.
   if (Test == fcNone)
     return DAG.getBoolConstant(false, DL, ResultVT, OperandVT);
-  if ((Test & fcAllFlags) == fcAllFlags)
+  if (Test == fcAllFlags)
     return DAG.getBoolConstant(true, DL, ResultVT, OperandVT);
 
   // PPC double double is a pair of doubles, of which the higher part determines
@@ -8683,14 +8683,6 @@ SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op,
     OperandVT = MVT::f64;
   }
 
-  // Some checks may be represented as inversion of simpler check, for example
-  // "inf|normal|subnormal|zero" => !"nan".
-  bool IsInverted = false;
-  if (FPClassTest InvertedCheck = invertFPClassTestIfSimpler(Test)) {
-    IsInverted = true;
-    Test = InvertedCheck;
-  }
-
   // Floating-point type properties.
   EVT ScalarFloatVT = OperandVT.getScalarType();
   const Type *FloatTy = ScalarFloatVT.getTypeForEVT(*DAG.getContext());
@@ -8702,9 +8694,16 @@ SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op,
   if (Flags.hasNoFPExcept() &&
       isOperationLegalOrCustom(ISD::SETCC, OperandVT.getScalarType())) {
     FPClassTest FPTestMask = Test;
+    bool IsInvertedFP = false;
+
+    if (FPClassTest InvertedFPCheck =
+            invertFPClassTestIfSimpler(FPTestMask, true)) {
+      FPTestMask = InvertedFPCheck;
+      IsInvertedFP = true;
+    }
 
-    ISD::CondCode OrderedCmpOpcode = IsInverted ? ISD::SETUNE : ISD::SETOEQ;
-    ISD::CondCode UnorderedCmpOpcode = IsInverted ? ISD::SETONE : ISD::SETUEQ;
+    ISD::CondCode OrderedCmpOpcode = IsInvertedFP ? ISD::SETUNE : ISD::SETOEQ;
+    ISD::CondCode UnorderedCmpOpcode = IsInvertedFP ? ISD::SETONE : ISD::SETUEQ;
 
     // See if we can fold an | fcNan into an unordered compare.
     FPClassTest OrderedFPTestMask = FPTestMask & ~fcNan;
@@ -8717,7 +8716,7 @@ SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op,
     const bool IsOrdered = FPTestMask == OrderedFPTestMask;
 
     if (std::optional<bool> IsCmp0 =
-            isFCmpEqualZero(Test, Semantics, DAG.getMachineFunction());
+            isFCmpEqualZero(FPTestMask, Semantics, DAG.getMachineFunction());
         IsCmp0 && (isCondCodeLegalOrCustom(
                       *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode,
                       OperandVT.getScalarType().getSimpleVT()))) {
@@ -8729,15 +8728,16 @@ SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op,
                           *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode);
     }
 
-    if (Test == fcNan &&
-        isCondCodeLegalOrCustom(IsInverted ? ISD::SETO : ISD::SETUO,
-                                OperandVT.getScalarType().getSimpleVT())) {
+    if (FPTestMask == fcNan &&
+        isCondCodeLegalOrCustom(IsInvertedFP ? ISD::SETO : ISD::SETUO,
+                                OperandVT.getScalarType().getSimpleVT()))
       return DAG.getSetCC(DL, ResultVT, Op, Op,
-                          IsInverted ? ISD::SETO : ISD::SETUO);
-    }
+                          IsInvertedFP ? ISD::SETO : ISD::SETUO);
 
-    if (Test == fcInf &&
-        isCondCodeLegalOrCustom(IsInverted ? ISD::SETUNE : ISD::SETOEQ,
+    bool IsOrderedInf = FPTestMask == fcInf;
+    if ((FPTestMask == fcInf || FPTestMask == (fcInf | fcNan)) &&
+        isCondCodeLegalOrCustom(IsOrderedInf ? OrderedCmpOpcode
+                                             : UnorderedCmpOpcode,
                                 OperandVT.getScalarType().getSimpleVT()) &&
         isOperationLegalOrCustom(ISD::FABS, OperandVT.getScalarType())) {
       // isinf(x) --> fabs(x) == inf
@@ -8745,15 +8745,15 @@ SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op,
       SDValue Inf =
           DAG.getConstantFP(APFloat::getInf(Semantics), DL, OperandVT);
       return DAG.getSetCC(DL, ResultVT, Abs, Inf,
-                          IsInverted ? ISD::SETUNE : ISD::SETOEQ);
+                          IsOrderedInf ? OrderedCmpOpcode : UnorderedCmpOpcode);
     }
 
     if (OrderedFPTestMask == (fcSubnormal | fcZero) && !IsOrdered) {
       // TODO: Could handle ordered case, but it produces worse code for
       // x86. Maybe handle ordered if fabs is free?
 
-      ISD::CondCode OrderedOp = IsInverted ? ISD::SETUGE : ISD::SETOLT;
-      ISD::CondCode UnorderedOp = IsInverted ? ISD::SETOGE : ISD::SETULT;
+      ISD::CondCode OrderedOp = IsInvertedFP ? ISD::SETUGE : ISD::SETOLT;
+      ISD::CondCode UnorderedOp = IsInvertedFP ? ISD::SETOGE : ISD::SETULT;
 
       if (isCondCodeLegalOrCustom(IsOrdered ? OrderedOp : UnorderedOp,
                                   OperandVT.getScalarType().getSimpleVT())) {
@@ -8770,6 +8770,15 @@ SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op,
     }
   }
 
+  // Some checks may be represented as inversion of simpler check, for example
+  // "inf|normal|subnormal|zero" => !"nan".
+  bool IsInverted = false;
+
+  if (FPClassTest InvertedCheck = invertFPClassTestIfSimpler(Test, false)) {
+    Test = InvertedCheck;
+    IsInverted = true;
+  }
+
   // In the general case use integer operations.
   unsigned BitSize = OperandVT.getScalarSizeInBits();
   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), BitSize);
diff --git a/llvm/test/CodeGen/X86/is_fpclass.ll b/llvm/test/CodeGen/X86/is_fpclass.ll
index cc4d4c4543a515..4e46abbdb01c34 100644
--- a/llvm/test/CodeGen/X86/is_fpclass.ll
+++ b/llvm/test/CodeGen/X86/is_fpclass.ll
@@ -240,18 +240,22 @@ entry:
 define i1 @isfinite_f(float %x) {
 ; X86-LABEL: isfinite_f:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
-; X86-NEXT:    setl %al
+; X86-NEXT:    flds {{[0-9]+}}(%esp)
+; X86-NEXT:    fabs
+; X86-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NEXT:    fxch %st(1)
+; X86-NEXT:    fucompp
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: isfinite_f:
 ; X64:       # %bb.0: # %entry
-; X64-NEXT:    movd %xmm0, %eax
-; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
-; X64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
-; X64-NEXT:    setl %al
+; X64-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    ucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    setne %al
 ; X64-NEXT:    retq
 entry:
   %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 504)  ; 0x1f8 = "finite"
@@ -1150,31 +1154,23 @@ entry:
 define i1 @isfinite_d(double %x) {
 ; X86-LABEL: isfinite_d:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    cmpl $2146435072, %eax # imm = 0x7FF00000
-; X86-NEXT:    setl %al
+; X86-NEXT:    fldl {{[0-9]+}}(%esp)
+; X86-NEXT:    fabs
+; X86-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NEXT:    fxch %st(1)
+; X86-NEXT:    fucompp
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
 ;
-; X64-GENERIC-LABEL: isfinite_d:
-; X64-GENERIC:       # %bb.0: # %entry
-; X64-GENERIC-NEXT:    movq %xmm0, %rax
-; X64-GENERIC-NEXT:    movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
-; X64-GENERIC-NEXT:    andq %rax, %rcx
-; X64-GENERIC-NEXT:    movabsq $9218868437227405312, %rax # imm = 0x7FF0000000000000
-; X64-GENERIC-NEXT:    cmpq %rax, %rcx
-; X64-GENERIC-NEXT:    setl %al
-; X64-GENERIC-NEXT:    retq
-;
-; X64-NDD-LABEL: isfinite_d:
-; X64-NDD:       # %bb.0: # %entry
-; X64-NDD-NEXT:    movq %xmm0, %rax
-; X64-NDD-NEXT:    movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
-; X64-NDD-NEXT:    andq %rcx, %rax
-; X64-NDD-NEXT:    movabsq $9218868437227405312, %rcx # imm = 0x7FF0000000000000
-; X64-NDD-NEXT:    cmpq %rcx, %rax
-; X64-NDD-NEXT:    setl %al
-; X64-NDD-NEXT:    retq
+; X64-LABEL: isfinite_d:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    andpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    ucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
 entry:
   %0 = tail call i1 @llvm.is.fpclass.f64(double %x, i32 504)  ; 0x1f8 = "finite"
   ret i1 %0
@@ -2053,18 +2049,22 @@ entry:
 define i1 @not_isinf_or_nan_f(float %x) {
 ; X86-LABEL: not_isinf_or_nan_f:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
-; X86-NEXT:    setl %al
+; X86-NEXT:    flds {{[0-9]+}}(%esp)
+; X86-NEXT:    fabs
+; X86-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NEXT:    fxch %st(1)
+; X86-NEXT:    fucompp
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: not_isinf_or_nan_f:
 ; X64:       # %bb.0: # %entry
-; X64-NEXT:    movd %xmm0, %eax
-; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
-; X64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
-; X64-NEXT:    setl %al
+; X64-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    ucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    setne %al
 ; X64-NEXT:    retq
 entry:
   %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 504)  ; ~(0x204|0x3) = "~(inf|nan)"

>From f0ea19f3df7fb9b3a1a6359f0af4aadf9dbe1ae3 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 21 Aug 2024 15:42:24 +0400
Subject: [PATCH 4/4] Check for immediate legality

This avoids the x86_32 regressions, at the expense of
several other cases.
---
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |  5 +-
 llvm/test/CodeGen/AArch64/isinf.ll            | 12 +--
 llvm/test/CodeGen/PowerPC/fp-classify.ll      | 28 +++----
 llvm/test/CodeGen/X86/is_fpclass.ll           | 78 +++++++++----------
 4 files changed, 63 insertions(+), 60 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 1e3a0da0f3be5b..aa022480947a7d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -8739,7 +8739,10 @@ SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op,
         isCondCodeLegalOrCustom(IsOrderedInf ? OrderedCmpOpcode
                                              : UnorderedCmpOpcode,
                                 OperandVT.getScalarType().getSimpleVT()) &&
-        isOperationLegalOrCustom(ISD::FABS, OperandVT.getScalarType())) {
+        isOperationLegalOrCustom(ISD::FABS, OperandVT.getScalarType()) &&
+        (isOperationLegal(ISD::ConstantFP, OperandVT.getScalarType()) ||
+         (OperandVT.isVector() &&
+          isOperationLegalOrCustom(ISD::BUILD_VECTOR, OperandVT)))) {
       // isinf(x) --> fabs(x) == inf
       SDValue Abs = DAG.getNode(ISD::FABS, DL, OperandVT, Op);
       SDValue Inf =
diff --git a/llvm/test/CodeGen/AArch64/isinf.ll b/llvm/test/CodeGen/AArch64/isinf.ll
index 834417b98743a8..e68539bcf07d9c 100644
--- a/llvm/test/CodeGen/AArch64/isinf.ll
+++ b/llvm/test/CodeGen/AArch64/isinf.ll
@@ -26,10 +26,10 @@ define i32 @replace_isinf_call_f16(half %x) {
 define i32 @replace_isinf_call_f32(float %x) {
 ; CHECK-LABEL: replace_isinf_call_f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fabs s0, s0
+; CHECK-NEXT:    fmov w9, s0
 ; CHECK-NEXT:    mov w8, #2139095040 // =0x7f800000
-; CHECK-NEXT:    fmov s1, w8
-; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    and w9, w9, #0x7fffffff
+; CHECK-NEXT:    cmp w9, w8
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
   %abs = tail call float @llvm.fabs.f32(float %x)
@@ -42,10 +42,10 @@ define i32 @replace_isinf_call_f32(float %x) {
 define i32 @replace_isinf_call_f64(double %x) {
 ; CHECK-LABEL: replace_isinf_call_f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fabs d0, d0
+; CHECK-NEXT:    fmov x9, d0
 ; CHECK-NEXT:    mov x8, #9218868437227405312 // =0x7ff0000000000000
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    and x9, x9, #0x7fffffffffffffff
+; CHECK-NEXT:    cmp x9, x8
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
   %abs = tail call double @llvm.fabs.f64(double %x)
diff --git a/llvm/test/CodeGen/PowerPC/fp-classify.ll b/llvm/test/CodeGen/PowerPC/fp-classify.ll
index f527b3c48040e7..dc9853ff2e3014 100644
--- a/llvm/test/CodeGen/PowerPC/fp-classify.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-classify.ll
@@ -7,13 +7,13 @@
 define zeroext i1 @abs_isinff(float %x) {
 ; P8-LABEL: abs_isinff:
 ; P8:       # %bb.0: # %entry
-; P8-NEXT:    addis 3, 2, .LCPI0_0 at toc@ha
-; P8-NEXT:    xsabsdp 0, 1
-; P8-NEXT:    li 4, 1
-; P8-NEXT:    lfs 1, .LCPI0_0 at toc@l(3)
-; P8-NEXT:    li 3, 0
-; P8-NEXT:    fcmpu 0, 0, 1
-; P8-NEXT:    iseleq 3, 4, 3
+; P8-NEXT:    xscvdpspn 0, 1
+; P8-NEXT:    lis 4, 32640
+; P8-NEXT:    mffprwz 3, 0
+; P8-NEXT:    clrlwi 3, 3, 1
+; P8-NEXT:    xor 3, 3, 4
+; P8-NEXT:    cntlzw 3, 3
+; P8-NEXT:    srwi 3, 3, 5
 ; P8-NEXT:    blr
 ;
 ; P9-LABEL: abs_isinff:
@@ -32,13 +32,13 @@ entry:
 define zeroext i1 @abs_isinf(double %x) {
 ; P8-LABEL: abs_isinf:
 ; P8:       # %bb.0: # %entry
-; P8-NEXT:    addis 3, 2, .LCPI1_0 at toc@ha
-; P8-NEXT:    xsabsdp 0, 1
-; P8-NEXT:    li 4, 1
-; P8-NEXT:    lfs 1, .LCPI1_0 at toc@l(3)
-; P8-NEXT:    li 3, 0
-; P8-NEXT:    fcmpu 0, 0, 1
-; P8-NEXT:    iseleq 3, 4, 3
+; P8-NEXT:    mffprd 3, 1
+; P8-NEXT:    li 4, 2047
+; P8-NEXT:    rldic 4, 4, 52, 1
+; P8-NEXT:    clrldi 3, 3, 1
+; P8-NEXT:    xor 3, 3, 4
+; P8-NEXT:    cntlzd 3, 3
+; P8-NEXT:    rldicl 3, 3, 58, 63
 ; P8-NEXT:    blr
 ;
 ; P9-LABEL: abs_isinf:
diff --git a/llvm/test/CodeGen/X86/is_fpclass.ll b/llvm/test/CodeGen/X86/is_fpclass.ll
index 4e46abbdb01c34..cc4d4c4543a515 100644
--- a/llvm/test/CodeGen/X86/is_fpclass.ll
+++ b/llvm/test/CodeGen/X86/is_fpclass.ll
@@ -240,22 +240,18 @@ entry:
 define i1 @isfinite_f(float %x) {
 ; X86-LABEL: isfinite_f:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    flds {{[0-9]+}}(%esp)
-; X86-NEXT:    fabs
-; X86-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
-; X86-NEXT:    fxch %st(1)
-; X86-NEXT:    fucompp
-; X86-NEXT:    fnstsw %ax
-; X86-NEXT:    # kill: def $ah killed $ah killed $ax
-; X86-NEXT:    sahf
-; X86-NEXT:    setne %al
+; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-NEXT:    setl %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: isfinite_f:
 ; X64:       # %bb.0: # %entry
-; X64-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT:    ucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT:    setne %al
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-NEXT:    setl %al
 ; X64-NEXT:    retq
 entry:
   %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 504)  ; 0x1f8 = "finite"
@@ -1154,23 +1150,31 @@ entry:
 define i1 @isfinite_d(double %x) {
 ; X86-LABEL: isfinite_d:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    fldl {{[0-9]+}}(%esp)
-; X86-NEXT:    fabs
-; X86-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
-; X86-NEXT:    fxch %st(1)
-; X86-NEXT:    fucompp
-; X86-NEXT:    fnstsw %ax
-; X86-NEXT:    # kill: def $ah killed $ah killed $ax
-; X86-NEXT:    sahf
-; X86-NEXT:    setne %al
+; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $2146435072, %eax # imm = 0x7FF00000
+; X86-NEXT:    setl %al
 ; X86-NEXT:    retl
 ;
-; X64-LABEL: isfinite_d:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    andpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT:    ucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT:    setne %al
-; X64-NEXT:    retq
+; X64-GENERIC-LABEL: isfinite_d:
+; X64-GENERIC:       # %bb.0: # %entry
+; X64-GENERIC-NEXT:    movq %xmm0, %rax
+; X64-GENERIC-NEXT:    movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
+; X64-GENERIC-NEXT:    andq %rax, %rcx
+; X64-GENERIC-NEXT:    movabsq $9218868437227405312, %rax # imm = 0x7FF0000000000000
+; X64-GENERIC-NEXT:    cmpq %rax, %rcx
+; X64-GENERIC-NEXT:    setl %al
+; X64-GENERIC-NEXT:    retq
+;
+; X64-NDD-LABEL: isfinite_d:
+; X64-NDD:       # %bb.0: # %entry
+; X64-NDD-NEXT:    movq %xmm0, %rax
+; X64-NDD-NEXT:    movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
+; X64-NDD-NEXT:    andq %rcx, %rax
+; X64-NDD-NEXT:    movabsq $9218868437227405312, %rcx # imm = 0x7FF0000000000000
+; X64-NDD-NEXT:    cmpq %rcx, %rax
+; X64-NDD-NEXT:    setl %al
+; X64-NDD-NEXT:    retq
 entry:
   %0 = tail call i1 @llvm.is.fpclass.f64(double %x, i32 504)  ; 0x1f8 = "finite"
   ret i1 %0
@@ -2049,22 +2053,18 @@ entry:
 define i1 @not_isinf_or_nan_f(float %x) {
 ; X86-LABEL: not_isinf_or_nan_f:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    flds {{[0-9]+}}(%esp)
-; X86-NEXT:    fabs
-; X86-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
-; X86-NEXT:    fxch %st(1)
-; X86-NEXT:    fucompp
-; X86-NEXT:    fnstsw %ax
-; X86-NEXT:    # kill: def $ah killed $ah killed $ax
-; X86-NEXT:    sahf
-; X86-NEXT:    setne %al
+; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-NEXT:    setl %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: not_isinf_or_nan_f:
 ; X64:       # %bb.0: # %entry
-; X64-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT:    ucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT:    setne %al
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-NEXT:    setl %al
 ; X64-NEXT:    retq
 entry:
   %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 504)  ; ~(0x204|0x3) = "~(inf|nan)"



More information about the llvm-commits mailing list