[llvm] r268368 - [AVX512] Fix lowerV4X128VectorShuffle to select correctly input operands .

Igor Breger via llvm-commits llvm-commits at lists.llvm.org
Tue May 3 01:08:45 PDT 2016


Author: ibreger
Date: Tue May  3 03:08:44 2016
New Revision: 268368

URL: http://llvm.org/viewvc/llvm-project?rev=268368&view=rev
Log:
[AVX512] Fix lowerV4X128VectorShuffle to select correctly input operands .

Differential Revision: http://reviews.llvm.org/D19803

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=268368&r1=268367&r2=268368&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue May  3 03:08:44 2016
@@ -11542,6 +11542,23 @@ static SDValue lowerV4X128VectorShuffle(
   if (!canWidenShuffleElements(Mask, WidenedMask))
     return SDValue();
 
+  SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
+  // Insure elements came from the same Op.
+  int MaxOp1Index = VT.getVectorNumElements()/2 - 1;
+  for (int i = 0, Size = WidenedMask.size(); i < Size; ++i) {
+    if (WidenedMask[i] == SM_SentinelZero)
+      return SDValue();
+    if (WidenedMask[i] == SM_SentinelUndef)
+      continue;
+
+    SDValue Op = WidenedMask[i] > MaxOp1Index ? V2 : V1;
+    unsigned OpIndex = (i < Size/2) ? 0 : 1;
+    if (Ops[OpIndex].isUndef())
+      Ops[OpIndex] = Op;
+    else if (Ops[OpIndex] != Op)
+      return SDValue();
+  }
+
   // Form a 128-bit permutation.
   // Convert the 64-bit shuffle mask selection values into 128-bit selection
   // bits defined by a vshuf64x2 instruction's immediate control byte.
@@ -11549,15 +11566,12 @@ static SDValue lowerV4X128VectorShuffle(
   unsigned ControlBitsNum = WidenedMask.size() / 2;
 
   for (int i = 0, Size = WidenedMask.size(); i < Size; ++i) {
-    if (WidenedMask[i] == SM_SentinelZero)
-      return SDValue();
-
     // Use first element in place of undef mask.
     Imm = (WidenedMask[i] == SM_SentinelUndef) ? 0 : WidenedMask[i];
     PermMask |= (Imm % WidenedMask.size()) << (i * ControlBitsNum);
   }
 
-  return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
+  return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
                      DAG.getConstant(PermMask, DL, MVT::i8));
 }
 

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll?rev=268368&r1=268367&r2=268368&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll Tue May  3 03:08:44 2016
@@ -2271,3 +2271,35 @@ define <16 x float> @test_vshuff32x4_512
   %res = shufflevector <16 x float> %x, <16 x float> %x1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
   ret <16 x float> %res
 }
+
+define <8 x double> @shuffle_v8f64_23014567(<8 x double> %a0, <8 x double> %a1) {
+; ALL-LABEL: shuffle_v8f64_23014567:
+; ALL:       # BB#0:
+; ALL-NEXT:    vshuff64x2 $225, %zmm1, %zmm1, %zmm0 # zmm0 = zmm1[2,3,0,1,4,5,6,7]
+  %1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x double> %1
+}
+
+define <8 x double> @shuffle_v8f64_2301uu67(<8 x double> %a0, <8 x double> %a1) {
+; ALL-LABEL: shuffle_v8f64_2301uu67:
+; ALL:       # BB#0:
+; ALL-NEXT:    vshuff64x2 $193, %zmm1, %zmm1, %zmm0 # zmm0 = zmm1[2,3,0,1,0,1,6,7]
+  %1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 undef, i32 undef, i32 6, i32 7>
+  ret <8 x double> %1
+}
+
+define <8 x double> @shuffle_v8f64_2301uuuu(<8 x double> %a0, <8 x double> %a1) {
+; ALL-LABEL: shuffle_v8f64_2301uuuu:
+; ALL:       # BB#0:
+; ALL-NEXT:    vshuff64x2 $1, %zmm0, %zmm1, %zmm0 # zmm0 = zmm1[2,3,0,1],zmm0[0,1,0,1]
+  %1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef>
+  ret <8 x double> %1
+}
+
+define <8 x double> @shuffle_v8f64_uuu2301(<8 x double> %a0, <8 x double> %a1) {
+; ALL-LABEL: shuffle_v8f64_uuu2301:
+; ALL:       # BB#0:
+; ALL-NEXT:    vshuff64x2 $16, %zmm1, %zmm0, %zmm0 # zmm0 = zmm0[0,1,0,1],zmm1[2,3,0,1]
+  %1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 3, i32 0, i32 1>
+  ret <8 x double> %1
+}




More information about the llvm-commits mailing list