[llvm] r205146 - [PowerPC] Handle VSX v2i64 SIGN_EXTEND_INREG
Hal Finkel
hfinkel at anl.gov
Sun Mar 30 06:22:59 PDT 2014
Author: hfinkel
Date: Sun Mar 30 08:22:59 2014
New Revision: 205146
URL: http://llvm.org/viewvc/llvm-project?rev=205146&view=rev
Log:
[PowerPC] Handle VSX v2i64 SIGN_EXTEND_INREG
sitofp from v2i32 to v2f64 ends up generating a SIGN_EXTEND_INREG v2i64 node
(and similarly for v2i16 and v2i8). Even though there are no sign-extension (or
algebraic shifts) for v2i64 types, we can handle v2i32 sign extensions by
converting two and from v2i64. The small trick necessary here is to shift the
i32 elements into the right lanes before the i32 -> f64 step. This is because
of the big Endian nature of the system, we need the i32 portion in the high
word of the i64 elements.
For v2i16 and v2i8 we can do the same, but we first use the default Altivec
shift-based expansion from v2i16 or v2i8 to v2i32 (by casting to v4i32) and
then apply the above procedure.
Modified:
llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h
llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td
llvm/trunk/test/CodeGen/PowerPC/vsx.ll
Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp?rev=205146&r1=205145&r2=205146&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp Sun Mar 30 08:22:59 2014
@@ -600,6 +600,13 @@ PPCTargetLowering::PPCTargetLowering(PPC
setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
+ // Vector operation legalization checks the result type of
+ // SIGN_EXTEND_INREG, overall legalization checks the inner type.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
+
addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
}
}
@@ -5947,6 +5954,30 @@ SDValue PPCTargetLowering::LowerINTRINSI
return Flags;
}
+SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int
+ // instructions), but for smaller types, we need to first extend up to v2i32
+ // before doing going farther.
+ if (Op.getValueType() == MVT::v2i64) {
+ EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ if (ExtVT != MVT::v2i32) {
+ Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0));
+ Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op,
+ DAG.getValueType(EVT::getVectorVT(*DAG.getContext(),
+ ExtVT.getVectorElementType(), 4)));
+ Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op);
+ Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op,
+ DAG.getValueType(MVT::v2i32));
+ }
+
+ return Op;
+ }
+
+ return SDValue();
+}
+
SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
@@ -6074,6 +6105,7 @@ SDValue PPCTargetLowering::LowerOperatio
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
+ case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
case ISD::MUL: return LowerMUL(Op, DAG);
// For counter-based loop handling.
Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h?rev=205146&r1=205145&r2=205146&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h Sun Mar 30 08:22:59 2014
@@ -535,6 +535,7 @@ namespace llvm {
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
Modified: llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td?rev=205146&r1=205145&r2=205146&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCInstrVSX.td Sun Mar 30 08:22:59 2014
@@ -791,6 +791,18 @@ def : Pat<(v2f64 (bitconvert v2i64:$A)),
def : Pat<(v2i64 (bitconvert v2f64:$A)),
(COPY_TO_REGCLASS $A, VRRC)>;
+// sign extension patterns
+// To extend "in place" from v2i32 to v2i64, we have input data like:
+// | undef | i32 | undef | i32 |
+// but xvcvsxwdp expects the input in big-Endian format:
+// | i32 | undef | i32 | undef |
+// so we need to shift everything to the left by one i32 (word) before
+// the conversion.
+def : Pat<(sext_inreg v2i64:$C, v2i32),
+ (XVCVDPSXDS (XVCVSXWDP (XXSLDWI $C, $C, 1)))>;
+def : Pat<(v2f64 (sint_to_fp (sext_inreg v2i64:$C, v2i32))),
+ (XVCVSXWDP (XXSLDWI $C, $C, 1))>;
+
} // AddedComplexity
} // HasVSX
Modified: llvm/trunk/test/CodeGen/PowerPC/vsx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vsx.ll?rev=205146&r1=205145&r2=205146&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vsx.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vsx.ll Sun Mar 30 08:22:59 2014
@@ -580,3 +580,41 @@ define <2 x i1> @test67(<2 x i64> %a, <2
; CHECK: blr
}
+define <2 x double> @test68(<2 x i32> %a) {
+ %w = sitofp <2 x i32> %a to <2 x double>
+ ret <2 x double> %w
+
+; CHECK-LABEL: @test68
+; CHECK: xxsldwi [[V1:[0-9]+]], 34, 34, 1
+; CHECK: xvcvsxwdp 34, [[V1]]
+; CHECK: blr
+}
+
+define <2 x double> @test69(<2 x i16> %a) {
+ %w = sitofp <2 x i16> %a to <2 x double>
+ ret <2 x double> %w
+
+; CHECK-LABEL: @test69
+; CHECK: vspltisw [[V1:[0-9]+]], 8
+; CHECK: vadduwm [[V2:[0-9]+]], [[V1]], [[V1]]
+; CHECK: vslw [[V3:[0-9]+]], 2, [[V2]]
+; CHECK: vsraw {{[0-9]+}}, [[V3]], [[V2]]
+; CHECK: xxsldwi [[V4:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}, 1
+; CHECK: xvcvsxwdp 34, [[V4]]
+; CHECK: blr
+}
+
+define <2 x double> @test70(<2 x i8> %a) {
+ %w = sitofp <2 x i8> %a to <2 x double>
+ ret <2 x double> %w
+
+; CHECK-LABEL: @test70
+; CHECK: vspltisw [[V1:[0-9]+]], 12
+; CHECK: vadduwm [[V2:[0-9]+]], [[V1]], [[V1]]
+; CHECK: vslw [[V3:[0-9]+]], 2, [[V2]]
+; CHECK: vsraw {{[0-9]+}}, [[V3]], [[V2]]
+; CHECK: xxsldwi [[V4:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}, 1
+; CHECK: xvcvsxwdp 34, [[V4]]
+; CHECK: blr
+}
+
More information about the llvm-commits
mailing list