[PATCH][X86] Improve the lowering of BITCAST dag nodes from type f64 to type v2i32 (and vice versa).

Andrea Di Biagio andrea.dibiagio at gmail.com
Tue May 6 07:59:32 PDT 2014


Hi,

The goal of this patch is to simplify the bitconvert from type
MVT::f64 to type MVT::v2i32 (and vice versa).

When legalizing an ISD::BITCAST dag node from MVT::f64 to MVT::v2i32,
we now produce a cheaper SCALAR_TO_VECTOR (to a vector of type v2f64)
followed by a 'free' bitcast to v4i32. The elements of the resulting
v4i32 are then extracted to eventually build the resulting v2i32
vector. This is cheaper than introducing a store+load sequence to
convert the operand in input from type f64 to i64.

During type legalization, the f64 operand of a ISD::BITCAST dag node
that performs a bitconvert from type MVT::f64 to type MVT::v2i32 is
initially converted into an i64. Then the resulting i64 is used to
build a vector of type v2i64.
The reason why the backend introduces a new v2i64 vector is because
value type MVT::v2i32 is illegal and it requires promotion to the next
legal vector type with the same number of elements (in this case, it
is type MVT::v2i64).
The conversion from f64 to i64 is done by storing the value on a stack
location and then loading the value from that same location as a i64.

This patch is beneficial for example in the following case:

define double @test(double %A) {
  %1 = bitcast double %A to <2 x i32>
  %add = add <2 x i32> %1, <i32 3, i32 5>
  %2 = bitcast <2 x i32> %add to double
  ret double %2
}

Before we produced:
   movsd %xmm0, -8(%rsp)
   movq -8(%rsp), %xmm0
   pshufd $16, %xmm0, %xmm0
   paddq .LCPI0_0(%rip), %xmm0
   pshufd $8, %xmm0, %xmm0
   movq %xmm0, -16(%rsp)
   movsd -16(%rsp), %xmm0
   retq

With this patch we produce a much cleaner:
   pshufd $16, %xmm0, %xmm0
   paddq .LCPI0_0(%rip), %xmm0
   pshufd $8, %xmm0, %xmm0


Function @t4 from test 'ret-mmx.ll' is another example of function
that is strongly simplified by this transformation. Before we produced
a long sequence of 8 instructions (for @t4). Now  the entire function
is optimized into a single 'movsd' instruction.

Back to function @test from the example,
with this patch we would produce a sequence of pshufd+paddq+pshufd.
Ideally we should be able to fold that entire sequence into a single paddd.

Another patch will follow that improves the dagcombiner to spot
sequences of shuffle+binop+shuffle which can be safely folded into a
single binop.

Please let me know if ok to submit.

Thanks,
Andrea Di Biagio
SN Systems - Sony Computer Entertainment Group.
-------------- next part --------------
Index: lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- lib/Target/X86/X86ISelLowering.cpp	(revision 208091)
+++ lib/Target/X86/X86ISelLowering.cpp	(working copy)
@@ -1037,6 +1037,8 @@
     setOperationAction(ISD::FP_ROUND,           MVT::v2f32, Custom);
 
     setLoadExtAction(ISD::EXTLOAD,              MVT::v2f32, Legal);
+
+    setOperationAction(ISD::BITCAST,            MVT::v2i32, Custom);
   }
 
   if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
@@ -14078,6 +14080,25 @@
                             SelectionDAG &DAG) {
   MVT SrcVT = Op.getOperand(0).getSimpleValueType();
   MVT DstVT = Op.getSimpleValueType();
+
+  if (SrcVT == MVT::v2i32) {
+    assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+    if (DstVT != MVT::f64)
+      // This conversion needs to be expanded.
+      return SDValue();
+
+    SDLoc dl(Op);
+    SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+                               Op->getOperand(0), DAG.getIntPtrConstant(0));
+    SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+                               Op->getOperand(0), DAG.getIntPtrConstant(1));
+    SDValue Elts[] = {Elt0, Elt1, Elt0, Elt0};
+    SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Elts);
+    SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
+    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
+                       DAG.getIntPtrConstant(0));
+  }
+
   assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
          Subtarget->hasMMX() && "Unexpected custom BITCAST");
   assert((DstVT == MVT::i64 ||
@@ -14533,9 +14554,28 @@
     ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc);
     return;
   }
-  case ISD::ATOMIC_LOAD:
+  case ISD::ATOMIC_LOAD: {
     ReplaceATOMIC_LOAD(N, Results, DAG);
+    return;
   }
+  case ISD::BITCAST: {
+    assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+    EVT DstVT = N->getValueType(0);
+    EVT SrcVT = N->getOperand(0)->getValueType(0);
+
+    if (SrcVT == MVT::f64 && DstVT == MVT::v2i32) {
+      SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
+                                     MVT::v2f64, N->getOperand(0));
+      SDValue ToV4I32 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Expanded);
+      SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+                                 ToV4I32, DAG.getIntPtrConstant(0));
+      SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+                                 ToV4I32, DAG.getIntPtrConstant(1));
+      SDValue Elts[] = {Elt0, Elt1};
+      Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Elts));
+    }
+  }
+  }
 }
 
 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
Index: test/CodeGen/X86/lower-bitcast-v2i32.ll
===================================================================
--- test/CodeGen/X86/lower-bitcast-v2i32.ll	(revision 0)
+++ test/CodeGen/X86/lower-bitcast-v2i32.ll	(working copy)
@@ -0,0 +1,81 @@
+; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s
+
+
+define double @test1(double %A) {
+  %1 = bitcast double %A to <2 x i32>
+  %add = add <2 x i32> %1, <i32 3, i32 5>
+  %2 = bitcast <2 x i32> %add to double
+  ret double %2
+}
+; FIXME: Ideally we should be able to fold the entire body of @test1 into a
+; single paddd instruction. At the moment we produce the sequence 
+; pshufd+paddq+pshufd.
+
+; CHECK-LABEL: test1
+; CHECK-NOT: movsd
+; CHECK: pshufd
+; CHECK-NEXT: paddq
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: ret
+
+
+define double @test2(double %A, double %B) {
+  %1 = bitcast double %A to <2 x i32>
+  %2 = bitcast double %B to <2 x i32>
+  %add = add <2 x i32> %1, %2
+  %3 = bitcast <2 x i32> %add to double
+  ret double %3
+}
+; FIXME: Ideally we should be able to fold the entire body of @test1 into a
+; single 'paddd %xmm1, %xmm0' instruction. At the moment we produce the
+; sequence pshufd+pshufd+paddq+pshufd.
+
+; CHECK-LABEL: test2
+; CHECK-NOT: movsd
+; CHECK: pshufd
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: paddq
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: ret
+
+
+define i64 @test3(i64 %A) {
+  %1 = bitcast i64 %A to <2 x float>
+  %add = fadd <2 x float> %1, <float 3.0, float 5.0>
+  %2 = bitcast <2 x float> %add to i64
+  ret i64 %2
+}
+; CHECK-LABEL: test3
+; CHECK-NOT: pshufd
+; CHECK: addps
+; CHECK-NOT: pshufd
+; CHECK: ret
+
+
+define i64 @test4(i64 %A) {
+  %1 = bitcast i64 %A to <2 x i32>
+  %add = add <2 x i32> %1, <i32 3, i32 5>
+  %2 = bitcast <2 x i32> %add to i64
+  ret i64 %2
+}
+; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd.
+; Ideally, we should fold that sequence into a single paddd.
+
+; CHECK-LABEL: test4
+; CHECK: pshufd
+; CHECK-NEXT: paddq
+; CHECK-NEXT: pshufd
+; CHECK: ret
+
+
+define double @test5(double %A) {
+  %1 = bitcast double %A to <2 x float>
+  %add = fadd <2 x float> %1, <float 3.0, float 5.0>
+  %2 = bitcast <2 x float> %add to double
+  ret double %2
+}
+; CHECK-LABEL: test5
+; CHECK: addps
+; CHECK-NEXT: ret
+
+
Index: test/CodeGen/X86/ret-mmx.ll
===================================================================
--- test/CodeGen/X86/ret-mmx.ll	(revision 208091)
+++ test/CodeGen/X86/ret-mmx.ll	(working copy)
@@ -33,7 +33,8 @@
 define double @t4() nounwind {
 	ret double bitcast (<2 x i32> <i32 1, i32 0> to double)
 ; CHECK-LABEL: t4:
-; CHECK: movl $1
-; CHECK: movd {{.*}}, %xmm0
+; CHECK-NOT: movl $1
+; CHECK-NOT: pshufd
+; CHECK: movsd {{.*}}, %xmm0
 }
 


More information about the llvm-commits mailing list