[llvm] r218210 - [x86] Regenerate this test case now that I've improved my script for

Chandler Carruth chandlerc at gmail.com
Sun Sep 21 04:51:33 PDT 2014


Author: chandlerc
Date: Sun Sep 21 06:51:33 2014
New Revision: 218210

URL: http://llvm.org/viewvc/llvm-project?rev=218210&view=rev
Log:
[x86] Regenerate this test case now that I've improved my script for
generating the test cases to format things more consistently and
actually catch all the operand sequences that should be elided in favor
of the asm comments. No actual changes here.

Modified:
    llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll?rev=218210&r1=218209&r2=218210&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll Sun Sep 21 06:51:33 2014
@@ -7,7 +7,7 @@ define <8 x float> @shuffle_v8f32_000000
 ; ALL-LABEL: @shuffle_v8f32_00000000
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,0,0,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm0, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
   ret <8 x float> %shuffle
@@ -18,7 +18,7 @@ define <8 x float> @shuffle_v8f32_000000
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[0,0,0,0]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,0,1,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 0>
   ret <8 x float> %shuffle
@@ -29,7 +29,7 @@ define <8 x float> @shuffle_v8f32_000002
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[0,0,0,0]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,2,0,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 2, i32 0, i32 0>
   ret <8 x float> %shuffle
@@ -40,7 +40,7 @@ define <8 x float> @shuffle_v8f32_000030
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[0,0,0,0]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[3,0,0,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 0>
   ret <8 x float> %shuffle
@@ -49,11 +49,11 @@ define <8 x float> @shuffle_v8f32_000030
 define <8 x float> @shuffle_v8f32_00040000(<8 x float> %a, <8 x float> %b) {
 ; ALL-LABEL: @shuffle_v8f32_00040000
 ; ALL:       # BB#0:
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm1
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; ALL-NEXT:    vshufps {{.*}} # xmm1 = xmm1[0,0],xmm0[0,0]
 ; ALL-NEXT:    vshufps {{.*}} # xmm1 = xmm0[0,0],xmm1[2,0]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,0,0,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 4, i32 0, i32 0, i32 0, i32 0>
   ret <8 x float> %shuffle
@@ -62,11 +62,11 @@ define <8 x float> @shuffle_v8f32_000400
 define <8 x float> @shuffle_v8f32_00500000(<8 x float> %a, <8 x float> %b) {
 ; ALL-LABEL: @shuffle_v8f32_00500000
 ; ALL:       # BB#0:
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm1
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; ALL-NEXT:    vshufps {{.*}} # xmm1 = xmm1[1,0],xmm0[0,0]
 ; ALL-NEXT:    vshufps {{.*}} # xmm1 = xmm0[0,0],xmm1[0,2]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,0,0,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0>
   ret <8 x float> %shuffle
@@ -75,11 +75,11 @@ define <8 x float> @shuffle_v8f32_005000
 define <8 x float> @shuffle_v8f32_06000000(<8 x float> %a, <8 x float> %b) {
 ; ALL-LABEL: @shuffle_v8f32_06000000
 ; ALL:       # BB#0:
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm1
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; ALL-NEXT:    vshufps {{.*}} # xmm1 = xmm1[2,0],xmm0[0,0]
 ; ALL-NEXT:    vshufps {{.*}} # xmm1 = xmm1[2,0],xmm0[0,0]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,0,0,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 6, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
   ret <8 x float> %shuffle
@@ -88,11 +88,11 @@ define <8 x float> @shuffle_v8f32_060000
 define <8 x float> @shuffle_v8f32_70000000(<8 x float> %a, <8 x float> %b) {
 ; ALL-LABEL: @shuffle_v8f32_70000000
 ; ALL:       # BB#0:
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm1
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; ALL-NEXT:    vshufps {{.*}} # xmm1 = xmm1[3,0],xmm0[0,0]
 ; ALL-NEXT:    vshufps {{.*}} # xmm1 = xmm1[0,2],xmm0[0,0]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,0,0,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
   ret <8 x float> %shuffle
@@ -102,9 +102,9 @@ define <8 x float> @shuffle_v8f32_010145
 ; ALL-LABEL: @shuffle_v8f32_01014545
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vunpcklpd {{.*}} # xmm1 = xmm0[0,0]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>
   ret <8 x float> %shuffle
@@ -115,7 +115,7 @@ define <8 x float> @shuffle_v8f32_001122
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vunpcklps {{.*}} # xmm1 = xmm0[0,0,1,1]
 ; ALL-NEXT:    vunpckhps {{.*}} # xmm0 = xmm0[2,2,3,3]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
   ret <8 x float> %shuffle
@@ -126,7 +126,7 @@ define <8 x float> @shuffle_v8f32_000011
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[0,0,0,0]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[1,1,1,1]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1>
   ret <8 x float> %shuffle
@@ -135,7 +135,7 @@ define <8 x float> @shuffle_v8f32_000011
 define <8 x float> @shuffle_v8f32_81a3c5e7(<8 x float> %a, <8 x float> %b) {
 ; ALL-LABEL: @shuffle_v8f32_81a3c5e7
 ; ALL:       # BB#0:
-; ALL-NEXT:    vblendps  $85, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
+; ALL-NEXT:    vblendps {{.*}} # ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
   ret <8 x float> %shuffle
@@ -146,8 +146,8 @@ define <8 x float> @shuffle_v8f32_080808
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm1[0,0,2,0]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,1,0,3]
-; ALL-NEXT:    vblendps  $10, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm0, %ymm0
+; ALL-NEXT:    vblendps {{.*}} # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8>
   ret <8 x float> %shuffle
@@ -157,14 +157,14 @@ define <8 x float> @shuffle_v8f32_08084c
 ; ALL-LABEL: @shuffle_v8f32_08084c4c
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm2 = xmm1[0,0,2,0]
-; ALL-NEXT:    vextractf128  $1, %ymm1, %xmm1
+; ALL-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm1[0,0,2,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm1, %ymm2, %ymm1
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; ALL-NEXT:    vpermilps {{.*}} # xmm2 = xmm0[0,1,0,3]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,1,0,3]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm2, %ymm0
-; ALL-NEXT:    vblendps  $-86, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; ALL-NEXT:    vblendps {{.*}} # ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 8, i32 0, i32 8, i32 4, i32 12, i32 4, i32 12>
   ret <8 x float> %shuffle
@@ -174,9 +174,9 @@ define <8 x float> @shuffle_v8f32_8823cc
 ; ALL-LABEL: @shuffle_v8f32_8823cc67
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm2 = xmm1[0,0,2,3]
-; ALL-NEXT:    vextractf128  $1, %ymm1, %xmm1
+; ALL-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm1[0,0,2,3]
-; ALL-NEXT:    vinsertf128  $1, %xmm1, %ymm2, %ymm1
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; ALL-NEXT:    vblendps {{.*}} # ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 8, i32 8, i32 2, i32 3, i32 12, i32 12, i32 6, i32 7>
@@ -187,13 +187,13 @@ define <8 x float> @shuffle_v8f32_9832dc
 ; ALL-LABEL: @shuffle_v8f32_9832dc76
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm2 = xmm1[1,0,2,3]
-; ALL-NEXT:    vextractf128  $1, %ymm1, %xmm1
+; ALL-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm1[1,0,2,3]
-; ALL-NEXT:    vinsertf128  $1, %xmm1, %ymm2, %ymm1
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; ALL-NEXT:    vpermilps {{.*}} # xmm2 = xmm0[0,1,3,2]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,1,3,2]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm2, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
 ; ALL-NEXT:    vblendps {{.*}} # ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 9, i32 8, i32 3, i32 2, i32 13, i32 12, i32 7, i32 6>
@@ -204,13 +204,13 @@ define <8 x float> @shuffle_v8f32_9810dc
 ; ALL-LABEL: @shuffle_v8f32_9810dc54
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm2 = xmm1[1,0,2,3]
-; ALL-NEXT:    vextractf128  $1, %ymm1, %xmm1
+; ALL-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm1[1,0,2,3]
-; ALL-NEXT:    vinsertf128  $1, %xmm1, %ymm2, %ymm1
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; ALL-NEXT:    vpermilps {{.*}} # xmm2 = xmm0[0,1,1,0]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,1,1,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm2, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
 ; ALL-NEXT:    vblendps {{.*}} # ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 9, i32 8, i32 1, i32 0, i32 13, i32 12, i32 5, i32 4>
@@ -222,11 +222,11 @@ define <8 x float> @shuffle_v8f32_08192a
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm2 = xmm1[0,2,2,3]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm3 = xmm0[2,1,3,3]
-; ALL-NEXT:    vblendps  $10, %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
+; ALL-NEXT:    vblendps {{.*}} # xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm1[0,0,2,1]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,1,1,3]
-; ALL-NEXT:    vblendps  $10, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; ALL-NEXT:    vinsertf128  $1, %xmm2, %ymm0, %ymm0
+; ALL-NEXT:    vblendps {{.*}} # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; ALL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
   ret <8 x float> %shuffle
@@ -237,10 +237,10 @@ define <8 x float> @shuffle_v8f32_08991a
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm2 = xmm0[1,1,2,3]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm3 = xmm1[0,2,3,3]
-; ALL-NEXT:    vblendps  $1, %xmm2, %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[1,2,3]
+; ALL-NEXT:    vblendps {{.*}} # xmm2 = xmm2[0],xmm3[1,2,3]
 ; ALL-NEXT:    vunpcklps {{.*}} # xmm1 = xmm1[0,0,1,1]
-; ALL-NEXT:    vblendps  $1, %xmm0, %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[1,2,3]
-; ALL-NEXT:    vinsertf128  $1, %xmm2, %ymm0, %ymm0
+; ALL-NEXT:    vblendps {{.*}} # xmm0 = xmm0[0],xmm1[1,2,3]
+; ALL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 8, i32 9, i32 9, i32 1, i32 10, i32 11, i32 11>
   ret <8 x float> %shuffle
@@ -249,12 +249,12 @@ define <8 x float> @shuffle_v8f32_08991a
 define <8 x float> @shuffle_v8f32_091b2d3f(<8 x float> %a, <8 x float> %b) {
 ; ALL-LABEL: @shuffle_v8f32_091b2d3f
 ; ALL:       # BB#0:
-; ALL-NEXT:    vextractf128  $1, %ymm1, %xmm2
+; ALL-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; ALL-NEXT:    vpermilps {{.*}} # xmm3 = xmm0[2,1,3,3]
-; ALL-NEXT:    vblendps  $10, %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
+; ALL-NEXT:    vblendps {{.*}} # xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,1,1,3]
-; ALL-NEXT:    vblendps  $10, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; ALL-NEXT:    vinsertf128  $1, %xmm2, %ymm0, %ymm0
+; ALL-NEXT:    vblendps {{.*}} # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; ALL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 1, i32 11, i32 2, i32 13, i32 3, i32 15>
   ret <8 x float> %shuffle
@@ -263,11 +263,11 @@ define <8 x float> @shuffle_v8f32_091b2d
 define <8 x float> @shuffle_v8f32_09ab1def(<8 x float> %a, <8 x float> %b) {
 ; ALL-LABEL: @shuffle_v8f32_09ab1def
 ; ALL:       # BB#0:
-; ALL-NEXT:    vextractf128  $1, %ymm1, %xmm2
+; ALL-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; ALL-NEXT:    vpermilps {{.*}} # xmm3 = xmm0[1,1,2,3]
-; ALL-NEXT:    vblendps  $1, %xmm3, %xmm2, %xmm2 # xmm2 = xmm3[0],xmm2[1,2,3]
-; ALL-NEXT:    vblendps  $1, %xmm0, %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[1,2,3]
-; ALL-NEXT:    vinsertf128  $1, %xmm2, %ymm0, %ymm0
+; ALL-NEXT:    vblendps {{.*}} # xmm2 = xmm3[0],xmm2[1,2,3]
+; ALL-NEXT:    vblendps {{.*}} # xmm0 = xmm0[0],xmm1[1,2,3]
+; ALL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
   ret <8 x float> %shuffle
@@ -277,9 +277,9 @@ define <8 x float> @shuffle_v8f32_000144
 ; ALL-LABEL: @shuffle_v8f32_00014445
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[0,0,0,1]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,0,0,1]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 4, i32 4, i32 4, i32 5>
   ret <8 x float> %shuffle
@@ -289,9 +289,9 @@ define <8 x float> @shuffle_v8f32_002044
 ; ALL-LABEL: @shuffle_v8f32_00204464
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[0,0,2,0]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,0,2,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 0, i32 4, i32 4, i32 6, i32 4>
   ret <8 x float> %shuffle
@@ -301,9 +301,9 @@ define <8 x float> @shuffle_v8f32_030047
 ; ALL-LABEL: @shuffle_v8f32_03004744
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[0,3,0,0]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,3,0,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 3, i32 0, i32 0, i32 4, i32 7, i32 4, i32 4>
   ret <8 x float> %shuffle
@@ -313,9 +313,9 @@ define <8 x float> @shuffle_v8f32_100054
 ; ALL-LABEL: @shuffle_v8f32_10005444
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[1,0,0,0]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[1,0,0,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 0, i32 0, i32 0, i32 5, i32 4, i32 4, i32 4>
   ret <8 x float> %shuffle
@@ -325,9 +325,9 @@ define <8 x float> @shuffle_v8f32_220066
 ; ALL-LABEL: @shuffle_v8f32_22006644
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[2,2,0,0]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[2,2,0,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 2, i32 2, i32 0, i32 0, i32 6, i32 6, i32 4, i32 4>
   ret <8 x float> %shuffle
@@ -337,9 +337,9 @@ define <8 x float> @shuffle_v8f32_333077
 ; ALL-LABEL: @shuffle_v8f32_33307774
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[3,3,3,0]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[3,3,3,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 3, i32 3, i32 0, i32 7, i32 7, i32 7, i32 4>
   ret <8 x float> %shuffle
@@ -349,9 +349,9 @@ define <8 x float> @shuffle_v8f32_321076
 ; ALL-LABEL: @shuffle_v8f32_32107654
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[3,2,1,0]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[3,2,1,0]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
   ret <8 x float> %shuffle
@@ -361,9 +361,9 @@ define <8 x float> @shuffle_v8f32_002344
 ; ALL-LABEL: @shuffle_v8f32_00234467
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[0,0,2,3]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,0,2,3]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 3, i32 4, i32 4, i32 6, i32 7>
   ret <8 x float> %shuffle
@@ -373,9 +373,9 @@ define <8 x float> @shuffle_v8f32_002244
 ; ALL-LABEL: @shuffle_v8f32_00224466
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vmovsldup {{.*}} # xmm1 = xmm0[0,0,2,2]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vmovsldup {{.*}} # xmm0 = xmm0[0,0,2,2]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
   ret <8 x float> %shuffle
@@ -385,9 +385,9 @@ define <8 x float> @shuffle_v8f32_103254
 ; ALL-LABEL: @shuffle_v8f32_10325476
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[1,0,3,2]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[1,0,3,2]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
   ret <8 x float> %shuffle
@@ -397,9 +397,9 @@ define <8 x float> @shuffle_v8f32_113355
 ; ALL-LABEL: @shuffle_v8f32_11335577
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vmovshdup {{.*}} # xmm1 = xmm0[1,1,3,3]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vmovshdup {{.*}} # xmm0 = xmm0[1,1,3,3]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
   ret <8 x float> %shuffle
@@ -409,9 +409,9 @@ define <8 x float> @shuffle_v8f32_102354
 ; ALL-LABEL: @shuffle_v8f32_10235467
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[1,0,2,3]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[1,0,2,3]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
   ret <8 x float> %shuffle
@@ -421,9 +421,9 @@ define <8 x float> @shuffle_v8f32_102254
 ; ALL-LABEL: @shuffle_v8f32_10225466
 ; ALL:       # BB#0:
 ; ALL-NEXT:    vpermilps {{.*}} # xmm1 = xmm0[1,0,2,2]
-; ALL-NEXT:    vextractf128  $1, %ymm0, %xmm0
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; ALL-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[1,0,2,2]
-; ALL-NEXT:    vinsertf128  $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 2, i32 5, i32 4, i32 6, i32 6>
   ret <8 x float> %shuffle





More information about the llvm-commits mailing list