[PATCH] DAGCombiner: Pass the correct type to TargetLowering::isF(Abs|Neg)Free

Tom Stellard tom at stellard.net
Tue Jul 16 16:25:59 PDT 2013


From: Tom Stellard <thomas.stellard at amd.com>

This commit also implements these functions for R600 and removes a test
case that was relying on the buggy behavior.
---
 include/llvm/Target/TargetLowering.h     |   6 +-
 lib/CodeGen/SelectionDAG/DAGCombiner.cpp |   4 +-
 lib/Target/R600/AMDGPUISelLowering.cpp   |  14 +++
 lib/Target/R600/AMDGPUISelLowering.h     |   3 +
 test/CodeGen/R600/fabs.ll                |  28 ++++--
 test/CodeGen/R600/fneg.ll                |  16 +++
 test/CodeGen/R600/literals.ll            | 166 -------------------------------
 7 files changed, 56 insertions(+), 181 deletions(-)
 create mode 100644 test/CodeGen/R600/fneg.ll

diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 09a93f9..2a383e4 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -1174,13 +1174,15 @@ public:
 
   /// Return true if an fneg operation is free to the point where it is never
   /// worthwhile to replace it with a bitwise operation.
-  virtual bool isFNegFree(EVT) const {
+  virtual bool isFNegFree(EVT VT) const {
+    assert(VT.isFloatingPoint());
     return false;
   }
 
   /// Return true if an fneg operation is free to the point where it is never
   /// worthwhile to replace it with a bitwise operation.
-  virtual bool isFAbsFree(EVT) const {
+  virtual bool isFAbsFree(EVT VT) const {
+    assert(VT.isFloatingPoint());
     return false;
   }
 
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 111803f..ac4eeaf 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5680,8 +5680,8 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
   // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
   // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
   // This often reduces constant pool loads.
-  if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(VT)) ||
-       (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(VT))) &&
+  if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) ||
+       (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) &&
       N0.getNode()->hasOneUse() && VT.isInteger() &&
       !VT.isVector() && !N0.getValueType().isVector()) {
     SDValue NewConv = DAG.getNode(ISD::BITCAST, SDLoc(N0), VT,
diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp
index dcf0f56..3342a8d 100644
--- a/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/lib/Target/R600/AMDGPUISelLowering.cpp
@@ -103,6 +103,20 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
 }
 
 //===---------------------------------------------------------------------===//
+// Target Properties
+//===---------------------------------------------------------------------===//
+
+bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
+  assert(VT.isFloatingPoint());
+  return VT == MVT::f32;
+}
+
+bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
+  assert(VT.isFloatingPoint());
+  return VT == MVT::f32;
+}
+
+//===---------------------------------------------------------------------===//
 // TargetLowering Callbacks
 //===---------------------------------------------------------------------===//
 
diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h
index 7f4468c..0e1c131 100644
--- a/lib/Target/R600/AMDGPUISelLowering.h
+++ b/lib/Target/R600/AMDGPUISelLowering.h
@@ -49,6 +49,9 @@ protected:
 public:
   AMDGPUTargetLowering(TargetMachine &TM);
 
+  virtual bool isFAbsFree(EVT VT) const;
+  virtual bool isFNegFree(EVT VT) const;
+
   virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
                               bool isVarArg,
                               const SmallVectorImpl<ISD::OutputArg> &Outs,
diff --git a/test/CodeGen/R600/fabs.ll b/test/CodeGen/R600/fabs.ll
index 85f2882..78ffd57 100644
--- a/test/CodeGen/R600/fabs.ll
+++ b/test/CodeGen/R600/fabs.ll
@@ -1,16 +1,22 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
 
-;CHECK: MOV * T{{[0-9]+\.[XYZW], \|T[0-9]+\.[XYZW]\|}}
+; DAGCombiner will transform:
+; (fabs (f32 bitcast (i32 a))) => (f32 bitcast (and (i32 a), 0x7FFFFFFF))
+; unless isFabsFree returns true
 
-define void @test() {
-   %r0 = call float @llvm.R600.load.input(i32 0)
-   %r1 = call float @fabs( float %r0)
-   call void @llvm.AMDGPU.store.output(float %r1, i32 0)
-   ret void
-}
-
-declare float @llvm.R600.load.input(i32) readnone
+; R600-CHECK: @fabs_free
+; R600-CHECK-NOT: AND
+; R600-CHECK: |PV.{{[XYZW]}}|
+; SI-CHECK: @fabs_free
+; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, SGPR{{[0-9]}}, 0, 1, 0, 0, 0
 
-declare void @llvm.AMDGPU.store.output(float, i32)
+define void @fabs_free(float addrspace(1)* %out, i32 %in) {
+entry:
+  %0 = bitcast i32 %in to float
+  %1 = call float @fabs(float %0)
+  store float %1, float addrspace(1)* %out
+  ret void
+}
 
 declare float @fabs(float ) readnone
diff --git a/test/CodeGen/R600/fneg.ll b/test/CodeGen/R600/fneg.ll
new file mode 100644
index 0000000..343e296
--- /dev/null
+++ b/test/CodeGen/R600/fneg.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; DAGCombiner will transform:
+; (fneg (f32 bitcast (i32 a))) => (f32 bitcast (xor (i32 a), 0x80000000))
+; unless the target returns true for isNegFree()
+
+; CHECK-NOT: XOR
+; CHECK: -KC0[2].Z
+
+define void @fneg_free(float addrspace(1)* %out, i32 %in) {
+entry:
+  %0 = bitcast i32 %in to float
+  %1 = fsub float 0.0, %0
+  store float %1, float addrspace(1)* %out
+  ret void
+}
diff --git a/test/CodeGen/R600/literals.ll b/test/CodeGen/R600/literals.ll
index fbb77b3..77b168e 100644
--- a/test/CodeGen/R600/literals.ll
+++ b/test/CodeGen/R600/literals.ll
@@ -31,169 +31,3 @@ entry:
   store float %0, float addrspace(1)* %out
   ret void
 }
-
-; CHECK: @main
-; CHECK: -2147483648
-; CHECK-NEXT-NOT: -2147483648
-
-define void @main() #0 {
-main_body:
-  %0 = call float @llvm.R600.load.input(i32 4)
-  %1 = call float @llvm.R600.load.input(i32 5)
-  %2 = call float @llvm.R600.load.input(i32 6)
-  %3 = call float @llvm.R600.load.input(i32 7)
-  %4 = call float @llvm.R600.load.input(i32 8)
-  %5 = call float @llvm.R600.load.input(i32 9)
-  %6 = call float @llvm.R600.load.input(i32 10)
-  %7 = call float @llvm.R600.load.input(i32 11)
-  %8 = call float @llvm.R600.load.input(i32 12)
-  %9 = call float @llvm.R600.load.input(i32 13)
-  %10 = call float @llvm.R600.load.input(i32 14)
-  %11 = call float @llvm.R600.load.input(i32 15)
-  %12 = load <4 x float> addrspace(8)* null
-  %13 = extractelement <4 x float> %12, i32 0
-  %14 = fsub float -0.000000e+00, %13
-  %15 = fadd float %0, %14
-  %16 = load <4 x float> addrspace(8)* null
-  %17 = extractelement <4 x float> %16, i32 1
-  %18 = fsub float -0.000000e+00, %17
-  %19 = fadd float %1, %18
-  %20 = load <4 x float> addrspace(8)* null
-  %21 = extractelement <4 x float> %20, i32 2
-  %22 = fsub float -0.000000e+00, %21
-  %23 = fadd float %2, %22
-  %24 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
-  %25 = extractelement <4 x float> %24, i32 0
-  %26 = fmul float %25, %0
-  %27 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
-  %28 = extractelement <4 x float> %27, i32 1
-  %29 = fmul float %28, %0
-  %30 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
-  %31 = extractelement <4 x float> %30, i32 2
-  %32 = fmul float %31, %0
-  %33 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
-  %34 = extractelement <4 x float> %33, i32 3
-  %35 = fmul float %34, %0
-  %36 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
-  %37 = extractelement <4 x float> %36, i32 0
-  %38 = fmul float %37, %1
-  %39 = fadd float %38, %26
-  %40 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
-  %41 = extractelement <4 x float> %40, i32 1
-  %42 = fmul float %41, %1
-  %43 = fadd float %42, %29
-  %44 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
-  %45 = extractelement <4 x float> %44, i32 2
-  %46 = fmul float %45, %1
-  %47 = fadd float %46, %32
-  %48 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
-  %49 = extractelement <4 x float> %48, i32 3
-  %50 = fmul float %49, %1
-  %51 = fadd float %50, %35
-  %52 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
-  %53 = extractelement <4 x float> %52, i32 0
-  %54 = fmul float %53, %2
-  %55 = fadd float %54, %39
-  %56 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
-  %57 = extractelement <4 x float> %56, i32 1
-  %58 = fmul float %57, %2
-  %59 = fadd float %58, %43
-  %60 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
-  %61 = extractelement <4 x float> %60, i32 2
-  %62 = fmul float %61, %2
-  %63 = fadd float %62, %47
-  %64 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
-  %65 = extractelement <4 x float> %64, i32 3
-  %66 = fmul float %65, %2
-  %67 = fadd float %66, %51
-  %68 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
-  %69 = extractelement <4 x float> %68, i32 0
-  %70 = fmul float %69, %3
-  %71 = fadd float %70, %55
-  %72 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
-  %73 = extractelement <4 x float> %72, i32 1
-  %74 = fmul float %73, %3
-  %75 = fadd float %74, %59
-  %76 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
-  %77 = extractelement <4 x float> %76, i32 2
-  %78 = fmul float %77, %3
-  %79 = fadd float %78, %63
-  %80 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
-  %81 = extractelement <4 x float> %80, i32 3
-  %82 = fmul float %81, %3
-  %83 = fadd float %82, %67
-  %84 = insertelement <4 x float> undef, float %15, i32 0
-  %85 = insertelement <4 x float> %84, float %19, i32 1
-  %86 = insertelement <4 x float> %85, float %23, i32 2
-  %87 = insertelement <4 x float> %86, float 0.000000e+00, i32 3
-  %88 = insertelement <4 x float> undef, float %15, i32 0
-  %89 = insertelement <4 x float> %88, float %19, i32 1
-  %90 = insertelement <4 x float> %89, float %23, i32 2
-  %91 = insertelement <4 x float> %90, float 0.000000e+00, i32 3
-  %92 = call float @llvm.AMDGPU.dp4(<4 x float> %87, <4 x float> %91)
-  %93 = call float @fabs(float %92)
-  %94 = call float @llvm.AMDGPU.rsq(float %93)
-  %95 = fmul float %15, %94
-  %96 = fmul float %19, %94
-  %97 = fmul float %23, %94
-  %98 = insertelement <4 x float> undef, float %4, i32 0
-  %99 = insertelement <4 x float> %98, float %5, i32 1
-  %100 = insertelement <4 x float> %99, float %6, i32 2
-  %101 = insertelement <4 x float> %100, float 0.000000e+00, i32 3
-  %102 = insertelement <4 x float> undef, float %4, i32 0
-  %103 = insertelement <4 x float> %102, float %5, i32 1
-  %104 = insertelement <4 x float> %103, float %6, i32 2
-  %105 = insertelement <4 x float> %104, float 0.000000e+00, i32 3
-  %106 = call float @llvm.AMDGPU.dp4(<4 x float> %101, <4 x float> %105)
-  %107 = call float @fabs(float %106)
-  %108 = call float @llvm.AMDGPU.rsq(float %107)
-  %109 = fmul float %4, %108
-  %110 = fmul float %5, %108
-  %111 = fmul float %6, %108
-  %112 = insertelement <4 x float> undef, float %95, i32 0
-  %113 = insertelement <4 x float> %112, float %96, i32 1
-  %114 = insertelement <4 x float> %113, float %97, i32 2
-  %115 = insertelement <4 x float> %114, float 0.000000e+00, i32 3
-  %116 = insertelement <4 x float> undef, float %109, i32 0
-  %117 = insertelement <4 x float> %116, float %110, i32 1
-  %118 = insertelement <4 x float> %117, float %111, i32 2
-  %119 = insertelement <4 x float> %118, float 0.000000e+00, i32 3
-  %120 = call float @llvm.AMDGPU.dp4(<4 x float> %115, <4 x float> %119)
-  %121 = fsub float -0.000000e+00, %120
-  %122 = fcmp uge float 0.000000e+00, %121
-  %123 = select i1 %122, float 0.000000e+00, float %121
-  %124 = insertelement <4 x float> undef, float %8, i32 0
-  %125 = insertelement <4 x float> %124, float %9, i32 1
-  %126 = insertelement <4 x float> %125, float 5.000000e-01, i32 2
-  %127 = insertelement <4 x float> %126, float 1.000000e+00, i32 3
-  call void @llvm.R600.store.swizzle(<4 x float> %127, i32 60, i32 1)
-  %128 = insertelement <4 x float> undef, float %71, i32 0
-  %129 = insertelement <4 x float> %128, float %75, i32 1
-  %130 = insertelement <4 x float> %129, float %79, i32 2
-  %131 = insertelement <4 x float> %130, float %83, i32 3
-  call void @llvm.R600.store.swizzle(<4 x float> %131, i32 0, i32 2)
-  %132 = insertelement <4 x float> undef, float %123, i32 0
-  %133 = insertelement <4 x float> %132, float %96, i32 1
-  %134 = insertelement <4 x float> %133, float %97, i32 2
-  %135 = insertelement <4 x float> %134, float 0.000000e+00, i32 3
-  call void @llvm.R600.store.swizzle(<4 x float> %135, i32 1, i32 2)
-  ret void
-}
-
-; Function Attrs: readnone
-declare float @llvm.R600.load.input(i32) #1
-
-; Function Attrs: readnone
-declare float @llvm.AMDGPU.dp4(<4 x float>, <4 x float>) #1
-
-; Function Attrs: readonly
-declare float @fabs(float) #2
-
-; Function Attrs: readnone
-declare float @llvm.AMDGPU.rsq(float) #1
-
-declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
-
-attributes #0 = { "ShaderType"="1" }
-attributes #1 = { readnone }
-attributes #2 = { readonly }
-- 
1.7.11.4




More information about the llvm-commits mailing list