[llvm] r309680 - [DAG] Extend visitSCALAR_TO_VECTOR optimization to truncated vector.
Nirav Dave via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 1 06:45:35 PDT 2017
Author: niravd
Date: Tue Aug 1 06:45:35 2017
New Revision: 309680
URL: http://llvm.org/viewvc/llvm-project?rev=309680&view=rev
Log:
[DAG] Extend visitSCALAR_TO_VECTOR optimization to truncated vector.
Summary:
Allow SCALAR_TO_VECTOR of EXTRACT_VECTOR_ELT to reduce to
EXTRACT_SUBVECTOR of vector shuffle when output is smaller. Marginally
improves vector shuffle computations.
Reviewers: efriedma, RKSimon, spatel
Subscribers: javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D35566
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/trunk/test/CodeGen/AArch64/arm64-neon-copy.ll
llvm/trunk/test/CodeGen/AArch64/neon-scalar-copy.ll
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=309680&r1=309679&r2=309680&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Tue Aug 1 06:45:35 2017
@@ -15710,23 +15710,38 @@ SDValue DAGCombiner::visitSCALAR_TO_VECT
EVT VT = N->getValueType(0);
// Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern
- // with a VECTOR_SHUFFLE.
+ // with a VECTOR_SHUFFLE and possible truncate.
if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
SDValue InVec = InVal->getOperand(0);
SDValue EltNo = InVal->getOperand(1);
-
- // FIXME: We could support implicit truncation if the shuffle can be
- // scaled to a smaller vector scalar type.
+ auto InVecT = InVec.getValueType();
ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(EltNo);
- if (C0 && VT == InVec.getValueType() &&
- VT.getScalarType() == InVal.getValueType()) {
- SmallVector<int, 8> NewMask(VT.getVectorNumElements(), -1);
+
+ if (C0) {
+ SmallVector<int, 8> NewMask(InVecT.getVectorNumElements(), -1);
int Elt = C0->getZExtValue();
NewMask[0] = Elt;
-
- if (TLI.isShuffleMaskLegal(NewMask, VT))
- return DAG.getVectorShuffle(VT, SDLoc(N), InVec, DAG.getUNDEF(VT),
- NewMask);
+ SDValue Val;
+ if (VT.getVectorNumElements() <= InVecT.getVectorNumElements() &&
+ TLI.isShuffleMaskLegal(NewMask, VT)) {
+ Val = DAG.getVectorShuffle(InVecT, SDLoc(N), InVec,
+ DAG.getUNDEF(InVecT), NewMask);
+ // If the initial vector is the correct size this shuffle is a
+ // valid result.
+ if (VT == InVecT)
+ return Val;
+ // If not we must truncate the vector.
+ if (VT.getVectorNumElements() != InVecT.getVectorNumElements()) {
+ MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
+ SDValue ZeroIdx = DAG.getConstant(0, SDLoc(N), IdxTy);
+ EVT SubVT =
+ EVT::getVectorVT(*DAG.getContext(), InVecT.getVectorElementType(),
+ VT.getVectorNumElements());
+ Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), SubVT, Val,
+ ZeroIdx);
+ return Val;
+ }
+ }
}
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-neon-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-neon-copy.ll?rev=309680&r1=309679&r2=309680&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-neon-copy.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-neon-copy.ll Tue Aug 1 06:45:35 2017
@@ -188,7 +188,7 @@ define <2 x float> @ins4f2(<4 x float> %
define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) {
; CHECK-LABEL: ins2f1:
-; CHECK: mov {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK: dup {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
%tmp3 = extractelement <2 x double> %tmp1, i32 1
%tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0
ret <1 x double> %tmp4
Modified: llvm/trunk/test/CodeGen/AArch64/neon-scalar-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/neon-scalar-copy.ll?rev=309680&r1=309679&r2=309680&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/neon-scalar-copy.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/neon-scalar-copy.ll Tue Aug 1 06:45:35 2017
@@ -79,8 +79,7 @@ define half @test_dup_hv8H_0(<8 x half>
define <1 x i8> @test_vector_dup_bv16B(<16 x i8> %v1) #0 {
; CHECK-LABEL: test_vector_dup_bv16B:
- ; CHECK-NEXT: umov [[W:w[0-9]+]], v0.b[14]
- ; CHECK-NEXT: fmov s0, [[W]]
+ ; CHECK-NEXT: dup v0.16b, v0.b[14]
; CHECK-NEXT: ret
%shuffle.i = shufflevector <16 x i8> %v1, <16 x i8> undef, <1 x i32> <i32 14>
ret <1 x i8> %shuffle.i
@@ -96,8 +95,7 @@ define <1 x i8> @test_vector_dup_bv8B(<8
define <1 x i16> @test_vector_dup_hv8H(<8 x i16> %v1) #0 {
; CHECK-LABEL: test_vector_dup_hv8H:
- ; CHECK-NEXT: umov [[W:w[0-9]+]], v0.h[7]
- ; CHECK-NEXT: fmov s0, [[W]]
+ ; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: ret
%shuffle.i = shufflevector <8 x i16> %v1, <8 x i16> undef, <1 x i32> <i32 7>
ret <1 x i16> %shuffle.i
@@ -113,8 +111,7 @@ define <1 x i16> @test_vector_dup_hv4H(<
define <1 x i32> @test_vector_dup_sv4S(<4 x i32> %v1) #0 {
; CHECK-LABEL: test_vector_dup_sv4S:
- ; CHECK-NEXT: mov [[W:w[0-9]+]], v0.s[3]
- ; CHECK-NEXT: fmov s0, [[W]]
+ ; CHECK-NEXT: dup v0.4s, v0.s[3]
; CHECK-NEXT: ret
%shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <1 x i32> <i32 3>
ret <1 x i32> %shuffle
@@ -138,7 +135,7 @@ define <1 x i64> @test_vector_dup_dv2D(<
define <1 x i64> @test_vector_copy_dup_dv2D(<1 x i64> %a, <2 x i64> %c) #0 {
; CHECK-LABEL: test_vector_copy_dup_dv2D:
- ; CHECK-NEXT: {{dup|mov}} {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+ ; CHECK-NEXT: dup v0.2d, v1.d[1]
; CHECK-NEXT: ret
%vget_lane = extractelement <2 x i64> %c, i32 1
%vset_lane = insertelement <1 x i64> undef, i64 %vget_lane, i32 0
More information about the llvm-commits
mailing list