[PATCH][X86] Improve the lowering of ISD::BITCAST from MVT::f64 to MVT::v4i16 / MVT::v8i8.
Andrea Di Biagio
andrea.dibiagio at gmail.com
Thu May 8 09:00:49 PDT 2014
Hi,
This patch teaches the x86 backend how to efficiently lower
ISD::BITCAST dag nodes from MVT::f64 to MVT::v4i16 (and vice versa)
and from MVT::f64 to MVT::v8i8 (and vice versa).
This improves the patch committed at revision 208107
(http://llvm.org/viewvc/llvm-project?view=revision&revision=208107 ).
Revision 208107 teached the backend how to efficiently lower a bitcast
dag node from f64 to v2i32 without introducing the redundant
store+reload sequence to bitconvert f64 to i64.
This patch expands the logic from revision 208107 to also handle
MVT::v4i16 and MVT::v8i8. Also, this patch correctly propagates Undef
values when performing the widening of a vector (example: when
widening from v2i32 to v4i32, the upper 64bits of the resulting vector
are 'undef').
I had to modify test ret-mmx.ll because this new patch correctly
propagates undef values in the resulting vector when widening from
v2i32 to v4i32.
The effect is that now function @t4 in 'test/CodeGen/X86/ret-mmx.ll'
produces the sequence:
movl $1, %eax
movd %eax, %xmm0
rather than:
movsd .LCPI3_0(%rip), %xmm0
With
.LCPI3_0:
.long 1
.long 0
.long 1
.long 1
For consistency, I moved all the test cases from test
'lower-bitcast-v2i32.ll' to a new test file called
'CodeGen/X86/lower-bitcast.ll'. This new test adds extra test cases to
verify that we don't emit redundant stack store+reload of f64 values.
Please let me know if ok to submit.
Thanks,
Andrea Di Biagio
-------------- next part --------------
Index: test/CodeGen/X86/lower-bitcast.ll
===================================================================
--- test/CodeGen/X86/lower-bitcast.ll (revision 0)
+++ test/CodeGen/X86/lower-bitcast.ll (working copy)
@@ -0,0 +1,155 @@
+; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s
+
+
+define double @test1(double %A) {
+ %1 = bitcast double %A to <2 x i32>
+ %add = add <2 x i32> %1, <i32 3, i32 5>
+ %2 = bitcast <2 x i32> %add to double
+ ret double %2
+}
+; FIXME: Ideally we should be able to fold the entire body of @test1 into a
+; single paddd instruction. At the moment we produce the sequence
+; pshufd+paddq+pshufd.
+
+; CHECK-LABEL: test1
+; CHECK-NOT: movsd
+; CHECK: pshufd
+; CHECK-NEXT: paddq
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: ret
+
+
+define double @test2(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %add = add <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %add to double
+ ret double %3
+}
+; FIXME: Ideally we should be able to fold the entire body of @test2 into a
+; single 'paddd %xmm1, %xmm0' instruction. At the moment we produce the
+; sequence pshufd+pshufd+paddq+pshufd.
+
+; CHECK-LABEL: test2
+; CHECK-NOT: movsd
+; CHECK: pshufd
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: paddq
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: ret
+
+
+define i64 @test3(i64 %A) {
+ %1 = bitcast i64 %A to <2 x float>
+ %add = fadd <2 x float> %1, <float 3.0, float 5.0>
+ %2 = bitcast <2 x float> %add to i64
+ ret i64 %2
+}
+; CHECK-LABEL: test3
+; CHECK-NOT: pshufd
+; CHECK: addps
+; CHECK-NOT: pshufd
+; CHECK: ret
+
+
+define i64 @test4(i64 %A) {
+ %1 = bitcast i64 %A to <2 x i32>
+ %add = add <2 x i32> %1, <i32 3, i32 5>
+ %2 = bitcast <2 x i32> %add to i64
+ ret i64 %2
+}
+; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd.
+; Ideally, we should fold that sequence into a single paddd.
+
+; CHECK-LABEL: test4
+; CHECK: pshufd
+; CHECK-NEXT: paddq
+; CHECK-NEXT: pshufd
+; CHECK: ret
+
+
+define double @test5(double %A) {
+ %1 = bitcast double %A to <2 x float>
+ %add = fadd <2 x float> %1, <float 3.0, float 5.0>
+ %2 = bitcast <2 x float> %add to double
+ ret double %2
+}
+; CHECK-LABEL: test5
+; CHECK: addps
+; CHECK-NEXT: ret
+
+
+define double @test6(double %A) {
+ %1 = bitcast double %A to <4 x i16>
+ %add = add <4 x i16> %1, <i16 3, i16 4, i16 5, i16 6>
+ %2 = bitcast <4 x i16> %add to double
+ ret double %2
+}
+; FIXME: Ideally we should be able to fold the entire body of @test6 into a
+; single paddw instruction.
+
+; CHECK-LABEL: test6
+; CHECK-NOT: movsd
+; CHECK: punpcklwd
+; CHECK-NEXT: paddd
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: ret
+
+
+define double @test7(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %add = add <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %add to double
+ ret double %3
+}
+; FIXME: Ideally we should be able to fold the entire body of @test7 into a
+; single 'paddw %xmm1, %xmm0' instruction. At the moment we produce the
+; sequence pshufd+pshufd+paddd+pshufd.
+
+; CHECK-LABEL: test7
+; CHECK-NOT: movsd
+; CHECK: punpcklwd
+; CHECK-NEXT: punpcklwd
+; CHECK-NEXT: paddd
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: ret
+
+
+define double @test8(double %A) {
+ %1 = bitcast double %A to <8 x i8>
+ %add = add <8 x i8> %1, <i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10>
+ %2 = bitcast <8 x i8> %add to double
+ ret double %2
+}
+; FIXME: Ideally we should be able to fold the entire body of @test8 into a
+; single paddb instruction. At the moment we produce the sequence
+; pshufd+paddw+pshufd.
+
+; CHECK-LABEL: test8
+; CHECK-NOT: movsd
+; CHECK: punpcklbw
+; CHECK-NEXT: paddw
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: ret
+
+
+define double @test9(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %add = add <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %add to double
+ ret double %3
+}
+; FIXME: Ideally we should be able to fold the entire body of @test9 into a
+; single 'paddb %xmm1, %xmm0' instruction. At the moment we produce the
+; sequence pshufd+pshufd+paddw+pshufd.
+
+; CHECK-LABEL: test9
+; CHECK-NOT: movsd
+; CHECK: punpcklbw
+; CHECK-NEXT: punpcklbw
+; CHECK-NEXT: paddw
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: ret
+
Index: test/CodeGen/X86/lower-bitcast-v2i32.ll
===================================================================
--- test/CodeGen/X86/lower-bitcast-v2i32.ll (revision 208331)
+++ test/CodeGen/X86/lower-bitcast-v2i32.ll (working copy)
@@ -1,80 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s
-
-
-define double @test1(double %A) {
- %1 = bitcast double %A to <2 x i32>
- %add = add <2 x i32> %1, <i32 3, i32 5>
- %2 = bitcast <2 x i32> %add to double
- ret double %2
-}
-; FIXME: Ideally we should be able to fold the entire body of @test1 into a
-; single paddd instruction. At the moment we produce the sequence
-; pshufd+paddq+pshufd.
-
-; CHECK-LABEL: test1
-; CHECK-NOT: movsd
-; CHECK: pshufd
-; CHECK-NEXT: paddq
-; CHECK-NEXT: pshufd
-; CHECK-NEXT: ret
-
-
-define double @test2(double %A, double %B) {
- %1 = bitcast double %A to <2 x i32>
- %2 = bitcast double %B to <2 x i32>
- %add = add <2 x i32> %1, %2
- %3 = bitcast <2 x i32> %add to double
- ret double %3
-}
-; FIXME: Ideally we should be able to fold the entire body of @test2 into a
-; single 'paddd %xmm1, %xmm0' instruction. At the moment we produce the
-; sequence pshufd+pshufd+paddq+pshufd.
-
-; CHECK-LABEL: test2
-; CHECK-NOT: movsd
-; CHECK: pshufd
-; CHECK-NEXT: pshufd
-; CHECK-NEXT: paddq
-; CHECK-NEXT: pshufd
-; CHECK-NEXT: ret
-
-
-define i64 @test3(i64 %A) {
- %1 = bitcast i64 %A to <2 x float>
- %add = fadd <2 x float> %1, <float 3.0, float 5.0>
- %2 = bitcast <2 x float> %add to i64
- ret i64 %2
-}
-; CHECK-LABEL: test3
-; CHECK-NOT: pshufd
-; CHECK: addps
-; CHECK-NOT: pshufd
-; CHECK: ret
-
-
-define i64 @test4(i64 %A) {
- %1 = bitcast i64 %A to <2 x i32>
- %add = add <2 x i32> %1, <i32 3, i32 5>
- %2 = bitcast <2 x i32> %add to i64
- ret i64 %2
-}
-; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd.
-; Ideally, we should fold that sequence into a single paddd.
-
-; CHECK-LABEL: test4
-; CHECK: pshufd
-; CHECK-NEXT: paddq
-; CHECK-NEXT: pshufd
-; CHECK: ret
-
-
-define double @test5(double %A) {
- %1 = bitcast double %A to <2 x float>
- %add = fadd <2 x float> %1, <float 3.0, float 5.0>
- %2 = bitcast <2 x float> %add to double
- ret double %2
-}
-; CHECK-LABEL: test5
-; CHECK: addps
-; CHECK-NEXT: ret
-
Index: test/CodeGen/X86/ret-mmx.ll
===================================================================
--- test/CodeGen/X86/ret-mmx.ll (revision 208331)
+++ test/CodeGen/X86/ret-mmx.ll (working copy)
@@ -33,8 +33,8 @@
define double @t4() nounwind {
ret double bitcast (<2 x i32> <i32 1, i32 0> to double)
; CHECK-LABEL: t4:
-; CHECK-NOT: movl $1
+; CHECK: movl $1
; CHECK-NOT: pshufd
-; CHECK: movsd {{.*}}, %xmm0
+; CHECK: movd {{.*}}, %xmm0
}
Index: lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- lib/Target/X86/X86ISelLowering.cpp (revision 208331)
+++ lib/Target/X86/X86ISelLowering.cpp (working copy)
@@ -1040,6 +1040,8 @@
setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal);
setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
+ setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
+ setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
}
if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
@@ -14170,19 +14172,31 @@
MVT SrcVT = Op.getOperand(0).getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
- if (SrcVT == MVT::v2i32) {
+ if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
if (DstVT != MVT::f64)
// This conversion needs to be expanded.
return SDValue();
+ SDValue InVec = Op->getOperand(0);
SDLoc dl(Op);
- SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
- Op->getOperand(0), DAG.getIntPtrConstant(0));
- SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
- Op->getOperand(0), DAG.getIntPtrConstant(1));
- SDValue Elts[] = {Elt0, Elt1, Elt0, Elt0};
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Elts);
+ unsigned NumElts = SrcVT.getVectorNumElements();
+ EVT SVT = SrcVT.getVectorElementType();
+
+ // Widen the vector in input in the case of MVT::v2i32.
+ // Example: from MVT::v2i32 to MVT::v4i32.
+ SmallVector<SDValue, 16> Elts;
+ for (unsigned i = 0, e = NumElts; i != e; ++i)
+ Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
+ DAG.getIntPtrConstant(i)));
+
+ // Explicitly mark the extra elements as Undef.
+ SDValue Undef = DAG.getUNDEF(SVT);
+ for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
+ Elts.push_back(Undef);
+
+ EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
DAG.getIntPtrConstant(0));
@@ -14652,17 +14666,23 @@
EVT DstVT = N->getValueType(0);
EVT SrcVT = N->getOperand(0)->getValueType(0);
- if (SrcVT == MVT::f64 && DstVT == MVT::v2i32) {
- SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
- MVT::v2f64, N->getOperand(0));
- SDValue ToV4I32 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Expanded);
- SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
- ToV4I32, DAG.getIntPtrConstant(0));
- SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
- ToV4I32, DAG.getIntPtrConstant(1));
- SDValue Elts[] = {Elt0, Elt1};
- Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Elts));
- }
+ if (SrcVT != MVT::f64 ||
+ (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
+ return;
+
+ unsigned NumElts = DstVT.getVectorNumElements();
+ EVT SVT = DstVT.getVectorElementType();
+ EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
+ SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
+ MVT::v2f64, N->getOperand(0));
+ SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
+
+ SmallVector<SDValue, 8> Elts;
+ for (unsigned i = 0, e = NumElts; i != e; ++i)
+ Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
+ ToVecInt, DAG.getIntPtrConstant(i)));
+
+ Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
}
}
}
More information about the llvm-commits
mailing list