[llvm] r218214 - [x86] Teach the new vector shuffle lowering the basics about insertion

Chandler Carruth chandlerc at gmail.com
Sun Sep 21 05:49:46 PDT 2014


Author: chandlerc
Date: Sun Sep 21 07:49:46 2014
New Revision: 218214

URL: http://llvm.org/viewvc/llvm-project?rev=218214&view=rev
Log:
[x86] Teach the new vector shuffle lowering the basics about insertion
of a single element into a zero vector for v4f64 and v4i64 in AVX.
Ironically, there is less to see here because xor+blend is so crazy fast
that we can't really beat that to zero the high 128-bit lane.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=218214&r1=218213&r2=218214&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sun Sep 21 07:49:46 2014
@@ -9243,6 +9243,15 @@ static SDValue lowerV4F64VectorShuffle(S
   if (isShuffleEquivalent(Mask, 5, 1, 7, 3))
     return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V2, V1);
 
+  // If we have a single input to the zero element, insert that into V1 if we
+  // can do so cheaply.
+  int NumV2Elements =
+      std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
+  if (NumV2Elements == 1 && Mask[0] >= 4)
+    if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
+            MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
+      return Insertion;
+
   if (SDValue Blend =
           lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask, DAG))
     return Blend;
@@ -9306,6 +9315,15 @@ static SDValue lowerV4I64VectorShuffle(S
   if (is128BitLaneCrossingShuffleMask(MVT::v4i64, Mask))
     return splitAndLower256BitVectorShuffle(Op, V1, V2, Subtarget, DAG);
 
+  // If we have a single input to the zero element, insert that into V1 if we
+  // can do so cheaply.
+  int NumV2Elements =
+      std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
+  if (NumV2Elements == 1 && Mask[0] >= 4)
+    if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
+            MVT::v4i64, DL, V1, V2, Mask, Subtarget, DAG))
+      return Insertion;
+
   // AVX1 doesn't provide any facilities for v4i64 shuffles, bitcast and
   // delegate to floating point code.
   V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f64, V1);

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=218214&r1=218213&r2=218214&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Sun Sep 21 07:49:46 2014
@@ -563,3 +563,52 @@ define <4 x i64> @stress_test1(<4 x i64>
 
   ret <4 x i64> %f
 }
+
+define <4 x i64> @insert_reg_and_zero_v4i64(i64 %a) {
+; ALL-LABEL: @insert_reg_and_zero_v4i64
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovq %rdi, %xmm0
+; ALL-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; ALL-NEXT:    vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3]
+; ALL-NEXT:    retq
+  %v = insertelement <4 x i64> undef, i64 %a, i64 0
+  %shuffle = shufflevector <4 x i64> %v, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+  ret <4 x i64> %shuffle
+}
+
+define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
+; ALL-LABEL: @insert_mem_and_zero_v4i64
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovq (%rdi), %xmm0
+; ALL-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; ALL-NEXT:    vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3]
+; ALL-NEXT:    retq
+  %a = load i64* %ptr
+  %v = insertelement <4 x i64> undef, i64 %a, i64 0
+  %shuffle = shufflevector <4 x i64> %v, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+  ret <4 x i64> %shuffle
+}
+
+define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
+; ALL-LABEL: @insert_reg_and_zero_v4f64
+; ALL:       # BB#0:
+; ALL:         vxorpd %ymm1, %ymm1, %ymm1
+; ALL-NEXT:    vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3]
+; ALL-NEXT:    retq
+  %v = insertelement <4 x double> undef, double %a, i32 0
+  %shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+  ret <4 x double> %shuffle
+}
+
+define <4 x double> @insert_mem_and_zero_v4f64(double* %ptr) {
+; ALL-LABEL: @insert_mem_and_zero_v4f64
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovsd (%rdi), %xmm0
+; ALL-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; ALL-NEXT:    vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3]
+; ALL-NEXT:    retq
+  %a = load double* %ptr
+  %v = insertelement <4 x double> undef, double %a, i32 0
+  %shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+  ret <4 x double> %shuffle
+}





More information about the llvm-commits mailing list