[llvm] 606a62c - AMDGPU: Force sign operand of f64 fcopysign to f32

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 5 15:54:19 PST 2023


Author: Matt Arsenault
Date: 2023-03-05T19:54:13-04:00
New Revision: 606a62ce27e602cfc12381dd9d1ec2d065aa075e

URL: https://github.com/llvm/llvm-project/commit/606a62ce27e602cfc12381dd9d1ec2d065aa075e
DIFF: https://github.com/llvm/llvm-project/commit/606a62ce27e602cfc12381dd9d1ec2d065aa075e.diff

LOG: AMDGPU: Force sign operand of f64 fcopysign to f32

The fcopysign DAG operation, unlike the IR one, allows
different types for the sign and magnitude. We can reduce
the bitwidth of the high operand since only the sign bit matters.

The default combine only introduces mixed fcopysign
operand types from fpext/fptrunc. We effectively do this
already during selection, but doing it earlier in the combiner
should expose new combine opportunities (e.g. the existing tests
now eliminate the load of the low half of the double). Unfortunately
this isn't enough to handle the case I'm interested in just yet.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.h
    llvm/test/CodeGen/AMDGPU/fnearbyint.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 1f623dc5e217..d23ef959d93c 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -769,7 +769,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
                        ISD::ZERO_EXTEND,
                        ISD::SIGN_EXTEND_INREG,
                        ISD::EXTRACT_VECTOR_ELT,
-                       ISD::INSERT_VECTOR_ELT});
+                       ISD::INSERT_VECTOR_ELT,
+                       ISD::FCOPYSIGN});
 
   // All memory operations. Some folding on the pointer operand is done to help
   // matching the constant offsets in the addressing modes.
@@ -9465,6 +9466,29 @@ SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
   return SDValue();
 }
 
+SDValue SITargetLowering::performFCopySignCombine(SDNode *N,
+                                                  DAGCombinerInfo &DCI) const {
+  SDValue SignOp = N->getOperand(1);
+  if (SignOp.getValueType() != MVT::f64)
+    return SDValue();
+
+  SelectionDAG &DAG = DCI.DAG;
+  SDLoc DL(N);
+
+  // Reduce width of sign operand, we only need the highest bit.
+  //
+  // fcopysign f64:x, f64:y ->
+  //   fcopysign f64:x, (extract_vector_elt (bitcast f64:y to v2f32), 1)
+  // TODO: In some cases it might make sense to go all the way to f16.
+  SDValue SignAsVector = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, SignOp);
+  SDValue SignAsF32 =
+      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, SignAsVector,
+                  DAG.getConstant(1, DL, MVT::i32));
+
+  return DAG.getNode(ISD::FCOPYSIGN, DL, N->getValueType(0), N->getOperand(0),
+                     SignAsF32);
+}
+
 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
 
 // This is a variant of
@@ -11705,6 +11729,8 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
   case ISD::SINT_TO_FP:
   case ISD::UINT_TO_FP:
     return performUCharToFloatCombine(N, DCI);
+  case ISD::FCOPYSIGN:
+    return performFCopySignCombine(N, DCI);
   case AMDGPUISD::CVT_F32_UBYTE0:
   case AMDGPUISD::CVT_F32_UBYTE1:
   case AMDGPUISD::CVT_F32_UBYTE2:

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 74985c6c625e..d9fde1c6adce 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -167,6 +167,8 @@ class SITargetLowering final : public AMDGPUTargetLowering {
 
   SDValue performUCharToFloatCombine(SDNode *N,
                                      DAGCombinerInfo &DCI) const;
+  SDValue performFCopySignCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+
   SDValue performSHLPtrCombine(SDNode *N,
                                unsigned AS,
                                EVT MemVT,

diff  --git a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
index beff3dd27571..48f6c26c8d63 100644
--- a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
@@ -159,9 +159,8 @@ define amdgpu_kernel void @nearbyint_f64(ptr addrspace(1) %out, double %in) {
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    v_mov_b32_e32 v4, s3
-; SI-NEXT:    v_bfi_b32 v1, s8, v1, v4
 ; SI-NEXT:    v_mov_b32_e32 v6, s3
+; SI-NEXT:    v_bfi_b32 v1, s8, v1, v6
 ; SI-NEXT:    v_mov_b32_e32 v7, s2
 ; SI-NEXT:    v_add_f64 v[4:5], s[2:3], v[0:1]
 ; SI-NEXT:    v_add_f64 v[0:1], v[4:5], -v[0:1]
@@ -210,13 +209,11 @@ define amdgpu_kernel void @nearbyint_v2f64(ptr addrspace(1) %out, <2 x double> %
 ; SI-NEXT:    v_mov_b32_e32 v4, s8
 ; SI-NEXT:    v_mov_b32_e32 v5, s9
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_bfi_b32 v1, s10, v6, v1
 ; SI-NEXT:    v_mov_b32_e32 v7, s3
+; SI-NEXT:    v_bfi_b32 v1, s10, v6, v7
 ; SI-NEXT:    v_mov_b32_e32 v8, s2
 ; SI-NEXT:    v_mov_b32_e32 v9, s1
-; SI-NEXT:    v_mov_b32_e32 v10, s1
-; SI-NEXT:    v_mov_b32_e32 v11, s0
+; SI-NEXT:    v_mov_b32_e32 v10, s0
 ; SI-NEXT:    v_add_f64 v[2:3], s[2:3], v[0:1]
 ; SI-NEXT:    v_add_f64 v[2:3], v[2:3], -v[0:1]
 ; SI-NEXT:    v_bfi_b32 v1, s10, v6, v9
@@ -226,8 +223,8 @@ define amdgpu_kernel void @nearbyint_v2f64(ptr addrspace(1) %out, <2 x double> %
 ; SI-NEXT:    v_add_f64 v[6:7], s[0:1], v[0:1]
 ; SI-NEXT:    v_add_f64 v[0:1], v[6:7], -v[0:1]
 ; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[0:1]|, v[4:5]
-; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v10, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v11, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -275,22 +272,18 @@ define amdgpu_kernel void @nearbyint_v4f64(ptr addrspace(1) %out, <4 x double> %
 ; SI-NEXT:    v_mov_b32_e32 v8, s12
 ; SI-NEXT:    v_mov_b32_e32 v9, s13
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s3
-; SI-NEXT:    v_bfi_b32 v5, s14, v10, v0
 ; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_bfi_b32 v5, s14, v10, v2
 ; SI-NEXT:    v_mov_b32_e32 v6, s2
-; SI-NEXT:    v_mov_b32_e32 v3, s1
 ; SI-NEXT:    v_mov_b32_e32 v7, s1
 ; SI-NEXT:    v_mov_b32_e32 v11, s0
 ; SI-NEXT:    v_mov_b32_e32 v12, s7
-; SI-NEXT:    v_mov_b32_e32 v13, s7
-; SI-NEXT:    v_mov_b32_e32 v14, s6
-; SI-NEXT:    v_mov_b32_e32 v15, s5
-; SI-NEXT:    v_mov_b32_e32 v16, s5
-; SI-NEXT:    v_mov_b32_e32 v17, s4
+; SI-NEXT:    v_mov_b32_e32 v13, s6
+; SI-NEXT:    v_mov_b32_e32 v14, s5
+; SI-NEXT:    v_mov_b32_e32 v15, s4
 ; SI-NEXT:    v_add_f64 v[0:1], s[2:3], v[4:5]
 ; SI-NEXT:    v_add_f64 v[0:1], v[0:1], -v[4:5]
-; SI-NEXT:    v_bfi_b32 v5, s14, v10, v3
+; SI-NEXT:    v_bfi_b32 v5, s14, v10, v7
 ; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[2:3]|, v[8:9]
 ; SI-NEXT:    v_cndmask_b32_e32 v3, v1, v2, vcc
 ; SI-NEXT:    v_cndmask_b32_e32 v2, v0, v6, vcc
@@ -302,15 +295,15 @@ define amdgpu_kernel void @nearbyint_v4f64(ptr addrspace(1) %out, <4 x double> %
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v11, vcc
 ; SI-NEXT:    v_add_f64 v[6:7], s[6:7], v[4:5]
 ; SI-NEXT:    v_add_f64 v[6:7], v[6:7], -v[4:5]
-; SI-NEXT:    v_bfi_b32 v5, s14, v10, v15
+; SI-NEXT:    v_bfi_b32 v5, s14, v10, v14
 ; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[6:7]|, v[8:9]
-; SI-NEXT:    v_cndmask_b32_e32 v7, v7, v13, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v6, v6, v14, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v7, v7, v12, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v6, v6, v13, vcc
 ; SI-NEXT:    v_add_f64 v[10:11], s[4:5], v[4:5]
 ; SI-NEXT:    v_add_f64 v[4:5], v[10:11], -v[4:5]
 ; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[4:5]|, v[8:9]
-; SI-NEXT:    v_cndmask_b32_e32 v5, v5, v16, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v17, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v5, v5, v14, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v15, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[8:11], 0 offset:16
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
 ; SI-NEXT:    s_endpgm


        


More information about the llvm-commits mailing list