[llvm] r319665 - [CodeGen] Unify MBB reference format in both MIR and debug output

Francis Visoiu Mistrih via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 4 09:18:56 PST 2017


Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll Mon Dec  4 09:18:51 2017
@@ -18,7 +18,7 @@ declare <8 x i16> @llvm.x86.sse2.pshufh.
 
 define <4 x i32> @combine_pshufd1(<4 x i32> %a) {
 ; ALL-LABEL: combine_pshufd1:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    retq
 entry:
   %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
@@ -28,7 +28,7 @@ entry:
 
 define <4 x i32> @combine_pshufd2(<4 x i32> %a) {
 ; ALL-LABEL: combine_pshufd2:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    retq
 entry:
   %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
@@ -41,7 +41,7 @@ entry:
 
 define <4 x i32> @combine_pshufd3(<4 x i32> %a) {
 ; ALL-LABEL: combine_pshufd3:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    retq
 entry:
   %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
@@ -54,12 +54,12 @@ entry:
 
 define <4 x i32> @combine_pshufd4(<4 x i32> %a) {
 ; SSE-LABEL: combine_pshufd4:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_pshufd4:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
 ; AVX-NEXT:    retq
 entry:
@@ -73,12 +73,12 @@ entry:
 
 define <4 x i32> @combine_pshufd5(<4 x i32> %a) {
 ; SSE-LABEL: combine_pshufd5:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_pshufd5:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
 ; AVX-NEXT:    retq
 entry:
@@ -92,17 +92,17 @@ entry:
 
 define <4 x i32> @combine_pshufd6(<4 x i32> %a) {
 ; SSE-LABEL: combine_pshufd6:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_pshufd6:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_pshufd6:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vbroadcastss %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 entry:
@@ -113,7 +113,7 @@ entry:
 
 define <8 x i16> @combine_pshuflw1(<8 x i16> %a) {
 ; ALL-LABEL: combine_pshuflw1:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    retq
 entry:
   %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
@@ -123,7 +123,7 @@ entry:
 
 define <8 x i16> @combine_pshuflw2(<8 x i16> %a) {
 ; ALL-LABEL: combine_pshuflw2:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    retq
 entry:
   %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
@@ -134,12 +134,12 @@ entry:
 
 define <8 x i16> @combine_pshuflw3(<8 x i16> %a) {
 ; SSE-LABEL: combine_pshuflw3:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_pshuflw3:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
 ; AVX-NEXT:    retq
 entry:
@@ -151,12 +151,12 @@ entry:
 
 define <8 x i16> @combine_pshufhw1(<8 x i16> %a) {
 ; SSE-LABEL: combine_pshufhw1:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_pshufhw1:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
 ; AVX-NEXT:    retq
 entry:
@@ -168,13 +168,13 @@ entry:
 
 define <4 x i32> @combine_bitwise_ops_test1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_bitwise_ops_test1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand %xmm1, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-NEXT:    retq
@@ -186,13 +186,13 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test2(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_bitwise_ops_test2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    por %xmm1, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-NEXT:    retq
@@ -204,13 +204,13 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_bitwise_ops_test3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm1, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-NEXT:    retq
@@ -222,13 +222,13 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test4(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_bitwise_ops_test4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand %xmm1, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-NEXT:    retq
@@ -240,13 +240,13 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test5(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_bitwise_ops_test5:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    por %xmm1, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test5:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-NEXT:    retq
@@ -258,13 +258,13 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test6(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_bitwise_ops_test6:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm1, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test6:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-NEXT:    retq
@@ -280,7 +280,7 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test1b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE2-LABEL: combine_bitwise_ops_test1b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pand %xmm1, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -288,7 +288,7 @@ define <4 x i32> @combine_bitwise_ops_te
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_bitwise_ops_test1b:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pand %xmm1, %xmm0
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -296,19 +296,19 @@ define <4 x i32> @combine_bitwise_ops_te
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_bitwise_ops_test1b:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pand %xmm1, %xmm0
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_bitwise_ops_test1b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_bitwise_ops_test1b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX2-NEXT:    retq
@@ -320,7 +320,7 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test2b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE2-LABEL: combine_bitwise_ops_test2b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    por %xmm1, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -328,7 +328,7 @@ define <4 x i32> @combine_bitwise_ops_te
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_bitwise_ops_test2b:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    por %xmm1, %xmm0
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
@@ -336,19 +336,19 @@ define <4 x i32> @combine_bitwise_ops_te
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_bitwise_ops_test2b:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_bitwise_ops_test2b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_bitwise_ops_test2b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX2-NEXT:    retq
@@ -360,33 +360,33 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test3b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE2-LABEL: combine_bitwise_ops_test3b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorps %xmm1, %xmm0
 ; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_bitwise_ops_test3b:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorps %xmm1, %xmm0
 ; SSSE3-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_bitwise_ops_test3b:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_bitwise_ops_test3b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_bitwise_ops_test3b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vxorps %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
@@ -399,7 +399,7 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test4b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE2-LABEL: combine_bitwise_ops_test4b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pand %xmm1, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
@@ -407,7 +407,7 @@ define <4 x i32> @combine_bitwise_ops_te
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_bitwise_ops_test4b:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pand %xmm1, %xmm0
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
@@ -415,19 +415,19 @@ define <4 x i32> @combine_bitwise_ops_te
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_bitwise_ops_test4b:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pand %xmm1, %xmm0
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_bitwise_ops_test4b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_bitwise_ops_test4b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
 ; AVX2-NEXT:    retq
@@ -439,7 +439,7 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test5b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE2-LABEL: combine_bitwise_ops_test5b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    por %xmm1, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
@@ -447,7 +447,7 @@ define <4 x i32> @combine_bitwise_ops_te
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_bitwise_ops_test5b:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    por %xmm1, %xmm0
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
@@ -455,19 +455,19 @@ define <4 x i32> @combine_bitwise_ops_te
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_bitwise_ops_test5b:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_bitwise_ops_test5b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_bitwise_ops_test5b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
 ; AVX2-NEXT:    retq
@@ -479,33 +479,33 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test6b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE2-LABEL: combine_bitwise_ops_test6b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorps %xmm1, %xmm0
 ; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_bitwise_ops_test6b:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorps %xmm1, %xmm0
 ; SSSE3-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_bitwise_ops_test6b:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm0
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_bitwise_ops_test6b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_bitwise_ops_test6b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vxorps %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
@@ -518,13 +518,13 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test1c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_bitwise_ops_test1c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps %xmm1, %xmm0
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test1c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
 ; AVX-NEXT:    retq
@@ -536,13 +536,13 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test2c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_bitwise_ops_test2c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test2c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
 ; AVX-NEXT:    retq
@@ -554,27 +554,27 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test3c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE2-LABEL: combine_bitwise_ops_test3c:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorps %xmm1, %xmm0
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_bitwise_ops_test3c:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorps %xmm1, %xmm0
 ; SSSE3-NEXT:    xorps %xmm1, %xmm1
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_bitwise_ops_test3c:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    xorps %xmm1, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test3c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
 ; AVX-NEXT:    retq
@@ -586,14 +586,14 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test4c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_bitwise_ops_test4c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps %xmm1, %xmm0
 ; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
 ; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test4c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
 ; AVX-NEXT:    retq
@@ -605,14 +605,14 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test5c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_bitwise_ops_test5c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
 ; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test5c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
 ; AVX-NEXT:    retq
@@ -624,7 +624,7 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_bitwise_ops_test6c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE2-LABEL: combine_bitwise_ops_test6c:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorps %xmm1, %xmm0
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,3]
@@ -632,7 +632,7 @@ define <4 x i32> @combine_bitwise_ops_te
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_bitwise_ops_test6c:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorps %xmm1, %xmm0
 ; SSSE3-NEXT:    xorps %xmm1, %xmm1
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,3]
@@ -640,13 +640,13 @@ define <4 x i32> @combine_bitwise_ops_te
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_bitwise_ops_test6c:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    xorps %xmm1, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = zero,zero,xmm0[1,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_bitwise_ops_test6c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[1,3]
 ; AVX-NEXT:    retq
@@ -658,12 +658,12 @@ define <4 x i32> @combine_bitwise_ops_te
 
 define <4 x i32> @combine_nested_undef_test1(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 3, i32 1>
@@ -673,12 +673,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test2(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
@@ -688,12 +688,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test3(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 3>
@@ -703,17 +703,17 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test4(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_nested_undef_test4:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_nested_undef_test4:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %xmm0
 ; AVX2-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 7, i32 1>
@@ -723,12 +723,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test5(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test5:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test5:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 5, i32 5, i32 2, i32 3>
@@ -738,12 +738,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test6(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test6:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test6:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
@@ -753,12 +753,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test7(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test7:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test7:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
@@ -768,12 +768,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test8(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -783,12 +783,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test9(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test9:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,2]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test9:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,2]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 3, i32 2, i32 5>
@@ -798,12 +798,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test10(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test10:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,1,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test10:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 1, i32 5, i32 5>
@@ -813,12 +813,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test11(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test11:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test11:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 2, i32 5, i32 4>
@@ -828,17 +828,17 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test12(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test12:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_nested_undef_test12:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_nested_undef_test12:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %xmm0
 ; AVX2-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 0, i32 2, i32 4>
@@ -849,7 +849,7 @@ define <4 x i32> @combine_nested_undef_t
 ; The following pair of shuffles is folded into vector %A.
 define <4 x i32> @combine_nested_undef_test13(<4 x i32> %A, <4 x i32> %B) {
 ; ALL-LABEL: combine_nested_undef_test13:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 4, i32 2, i32 6>
   %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 0, i32 2, i32 4>
@@ -859,12 +859,12 @@ define <4 x i32> @combine_nested_undef_t
 ; The following pair of shuffles is folded into vector %B.
 define <4 x i32> @combine_nested_undef_test14(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test14:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test14:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
@@ -881,35 +881,35 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test15(<4 x i32> %A, <4 x i32> %B) {
 ; SSE2-LABEL: combine_nested_undef_test15:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,1]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_nested_undef_test15:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,1]
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_nested_undef_test15:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_nested_undef_test15:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_nested_undef_test15:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vbroadcastss %xmm1, %xmm1
 ; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,1]
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
@@ -921,33 +921,33 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test16(<4 x i32> %A, <4 x i32> %B) {
 ; SSE2-LABEL: combine_nested_undef_test16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_nested_undef_test16:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
 ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_nested_undef_test16:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_nested_undef_test16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_nested_undef_test16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; AVX2-NEXT:    retq
@@ -958,31 +958,31 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test17(<4 x i32> %A, <4 x i32> %B) {
 ; SSE2-LABEL: combine_nested_undef_test17:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[0,2]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_nested_undef_test17:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[0,2]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_nested_undef_test17:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_nested_undef_test17:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_nested_undef_test17:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,1]
 ; AVX2-NEXT:    retq
@@ -993,12 +993,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test18(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test18:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,0,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test18:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[1,1,0,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
@@ -1008,31 +1008,31 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test19(<4 x i32> %A, <4 x i32> %B) {
 ; SSE2-LABEL: combine_nested_undef_test19:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_nested_undef_test19:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_nested_undef_test19:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,0,0,0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_nested_undef_test19:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,0,0,0]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_nested_undef_test19:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
 ; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,0,0,0]
 ; AVX2-NEXT:    retq
@@ -1043,33 +1043,33 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test20(<4 x i32> %A, <4 x i32> %B) {
 ; SSE2-LABEL: combine_nested_undef_test20:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_nested_undef_test20:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_nested_undef_test20:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,3,0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_nested_undef_test20:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,3,0]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_nested_undef_test20:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
 ; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,3,0]
 ; AVX2-NEXT:    retq
@@ -1080,31 +1080,31 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test21(<4 x i32> %A, <4 x i32> %B) {
 ; SSE2-LABEL: combine_nested_undef_test21:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,3,0,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_nested_undef_test21:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,3,0,3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_nested_undef_test21:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_nested_undef_test21:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_nested_undef_test21:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %xmm0
 ; AVX2-NEXT:    retq
@@ -1119,12 +1119,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test22(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test22:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,1,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test22:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[1,1,1,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
@@ -1134,12 +1134,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test23(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test23:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,1,0,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test23:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[0,1,0,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
@@ -1149,12 +1149,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test24(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test24:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,3,2,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test24:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[0,3,2,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
@@ -1164,17 +1164,17 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test25(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test25:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_nested_undef_test25:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_nested_undef_test25:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %xmm0
 ; AVX2-NEXT:    retq
   %1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 1, i32 5, i32 2, i32 4>
@@ -1184,12 +1184,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test26(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test26:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test26:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 1, i32 2, i32 6, i32 7>
@@ -1199,17 +1199,17 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test27(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test27:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_nested_undef_test27:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_nested_undef_test27:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %xmm0
 ; AVX2-NEXT:    retq
   %1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 2, i32 1, i32 5, i32 4>
@@ -1219,12 +1219,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x i32> @combine_nested_undef_test28(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: combine_nested_undef_test28:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_nested_undef_test28:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 1, i32 2, i32 4, i32 5>
@@ -1234,12 +1234,12 @@ define <4 x i32> @combine_nested_undef_t
 
 define <4 x float> @combine_test1(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_test1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1249,24 +1249,24 @@ define <4 x float> @combine_test1(<4 x f
 
 define <4 x float> @combine_test2(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_test2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test2:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test2:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
@@ -1276,12 +1276,12 @@ define <4 x float> @combine_test2(<4 x f
 
 define <4 x float> @combine_test3(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_test3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -1291,12 +1291,12 @@ define <4 x float> @combine_test3(<4 x f
 
 define <4 x float> @combine_test4(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_test4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -1306,24 +1306,24 @@ define <4 x float> @combine_test4(<4 x f
 
 define <4 x float> @combine_test5(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_test5:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test5:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test5:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test5:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1333,12 +1333,12 @@ define <4 x float> @combine_test5(<4 x f
 
 define <4 x i32> @combine_test6(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_test6:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test6:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1348,29 +1348,29 @@ define <4 x i32> @combine_test6(<4 x i32
 
 define <4 x i32> @combine_test7(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: combine_test7:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test7:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test7:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_test7:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_test7:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; AVX2-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
@@ -1380,12 +1380,12 @@ define <4 x i32> @combine_test7(<4 x i32
 
 define <4 x i32> @combine_test8(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_test8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -1395,13 +1395,13 @@ define <4 x i32> @combine_test8(<4 x i32
 
 define <4 x i32> @combine_test9(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_test9:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test9:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -1411,29 +1411,29 @@ define <4 x i32> @combine_test9(<4 x i32
 
 define <4 x i32> @combine_test10(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: combine_test10:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test10:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test10:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_test10:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_test10:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
 ; AVX2-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1443,7 +1443,7 @@ define <4 x i32> @combine_test10(<4 x i3
 
 define <4 x float> @combine_test11(<4 x float> %a, <4 x float> %b) {
 ; ALL-LABEL: combine_test11:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
   %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1452,24 +1452,24 @@ define <4 x float> @combine_test11(<4 x
 
 define <4 x float> @combine_test12(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_test12:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test12:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test12:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test12:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
@@ -1479,12 +1479,12 @@ define <4 x float> @combine_test12(<4 x
 
 define <4 x float> @combine_test13(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_test13:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test13:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -1494,12 +1494,12 @@ define <4 x float> @combine_test13(<4 x
 
 define <4 x float> @combine_test14(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_test14:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test14:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 5, i32 5>
@@ -1509,24 +1509,24 @@ define <4 x float> @combine_test14(<4 x
 
 define <4 x float> @combine_test15(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_test15:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test15:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test15:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test15:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
@@ -1536,7 +1536,7 @@ define <4 x float> @combine_test15(<4 x
 
 define <4 x i32> @combine_test16(<4 x i32> %a, <4 x i32> %b) {
 ; ALL-LABEL: combine_test16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
   %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1545,29 +1545,29 @@ define <4 x i32> @combine_test16(<4 x i3
 
 define <4 x i32> @combine_test17(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: combine_test17:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test17:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test17:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_test17:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_test17:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; AVX2-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
@@ -1577,12 +1577,12 @@ define <4 x i32> @combine_test17(<4 x i3
 
 define <4 x i32> @combine_test18(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_test18:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test18:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -1592,12 +1592,12 @@ define <4 x i32> @combine_test18(<4 x i3
 
 define <4 x i32> @combine_test19(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_test19:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test19:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 6, i32 7, i32 5, i32 5>
@@ -1607,29 +1607,29 @@ define <4 x i32> @combine_test19(<4 x i3
 
 define <4 x i32> @combine_test20(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: combine_test20:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test20:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test20:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_test20:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_test20:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
 ; AVX2-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
@@ -1639,7 +1639,7 @@ define <4 x i32> @combine_test20(<4 x i3
 
 define <4 x i32> @combine_test21(<8 x i32> %a, <4 x i32>* %ptr) {
 ; SSE-LABEL: combine_test21:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
 ; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -1647,7 +1647,7 @@ define <4 x i32> @combine_test21(<8 x i3
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test21:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -1662,13 +1662,13 @@ define <4 x i32> @combine_test21(<8 x i3
 
 define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) {
 ; SSE-LABEL: combine_test22:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test22:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; AVX-NEXT:    retq
@@ -1682,12 +1682,12 @@ define <8 x float> @combine_test22(<2 x
 ; PR22359
 define void @combine_test23(<8 x float> %v, <2 x float>* %ptr) {
 ; SSE-LABEL: combine_test23:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movups %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test23:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups %xmm0, (%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
@@ -1704,13 +1704,13 @@ define void @combine_test23(<8 x float>
 
 define <4 x float> @combine_test1b(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_test1b:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test1b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[0,1,2,0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1720,23 +1720,23 @@ define <4 x float> @combine_test1b(<4 x
 
 define <4 x float> @combine_test2b(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_test2b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0,0]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test2b:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movddup {{.*#+}} xmm0 = xmm1[0,0]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test2b:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movddup {{.*#+}} xmm0 = xmm1[0,0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test2b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm1[0,0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1746,25 +1746,25 @@ define <4 x float> @combine_test2b(<4 x
 
 define <4 x float> @combine_test3b(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_test3b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test3b:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test3b:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; SSE41-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test3b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,3,2,3]
 ; AVX-NEXT:    retq
@@ -1775,13 +1775,13 @@ define <4 x float> @combine_test3b(<4 x
 
 define <4 x float> @combine_test4b(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_test4b:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test4b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[1,1,2,3]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
@@ -1794,7 +1794,7 @@ define <4 x float> @combine_test4b(<4 x
 
 define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) {
 ; SSE2-LABEL: combine_test1c:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1803,7 +1803,7 @@ define <4 x i8> @combine_test1c(<4 x i8>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test1c:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -1812,21 +1812,21 @@ define <4 x i8> @combine_test1c(<4 x i8>
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test1c:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_test1c:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_test1c:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
@@ -1840,7 +1840,7 @@ define <4 x i8> @combine_test1c(<4 x i8>
 
 define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
 ; SSE2-LABEL: combine_test2c:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1851,7 +1851,7 @@ define <4 x i8> @combine_test2c(<4 x i8>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test2c:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1862,14 +1862,14 @@ define <4 x i8> @combine_test2c(<4 x i8>
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test2c:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE41-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test2c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -1883,7 +1883,7 @@ define <4 x i8> @combine_test2c(<4 x i8>
 
 define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
 ; SSE2-LABEL: combine_test3c:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -1894,7 +1894,7 @@ define <4 x i8> @combine_test3c(<4 x i8>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test3c:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -1905,14 +1905,14 @@ define <4 x i8> @combine_test3c(<4 x i8>
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test3c:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE41-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test3c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm1[1],xmm0[1]
@@ -1926,7 +1926,7 @@ define <4 x i8> @combine_test3c(<4 x i8>
 
 define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
 ; SSE2-LABEL: combine_test4c:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1938,7 +1938,7 @@ define <4 x i8> @combine_test4c(<4 x i8>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_test4c:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1950,21 +1950,21 @@ define <4 x i8> @combine_test4c(<4 x i8>
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_test4c:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_test4c:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_test4c:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
@@ -2008,22 +2008,22 @@ define <4 x i8> @combine_test4c(<4 x i8>
 
 define <4 x float> @combine_blend_01(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_blend_01:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_blend_01:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_blend_01:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_blend_01:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; AVX-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 undef, i32 2, i32 3>
@@ -2033,26 +2033,26 @@ define <4 x float> @combine_blend_01(<4
 
 define <4 x float> @combine_blend_02(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_blend_02:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_blend_02:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_blend_02:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_blend_02:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
 ; AVX-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 undef, i32 3>
@@ -2062,24 +2062,24 @@ define <4 x float> @combine_blend_02(<4
 
 define <4 x float> @combine_blend_123(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_blend_123:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_blend_123:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_blend_123:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_blend_123:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; AVX-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 undef, i32 undef>
@@ -2090,13 +2090,13 @@ define <4 x float> @combine_blend_123(<4
 
 define <4 x i32> @combine_test_movhl_1(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_test_movhl_1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test_movhl_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 7, i32 5, i32 3>
@@ -2106,13 +2106,13 @@ define <4 x i32> @combine_test_movhl_1(<
 
 define <4 x i32> @combine_test_movhl_2(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_test_movhl_2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test_movhl_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 0, i32 3, i32 6>
@@ -2122,13 +2122,13 @@ define <4 x i32> @combine_test_movhl_2(<
 
 define <4 x i32> @combine_test_movhl_3(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_test_movhl_3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_test_movhl_3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 7, i32 6, i32 3, i32 2>
@@ -2142,22 +2142,22 @@ define <4 x i32> @combine_test_movhl_3(<
 
 define <4 x float> @combine_undef_input_test1(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_undef_input_test1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_undef_input_test1:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_undef_input_test1:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
@@ -2167,12 +2167,12 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test2(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_undef_input_test2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
@@ -2182,12 +2182,12 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test3(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_undef_input_test3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -2197,12 +2197,12 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test4(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_undef_input_test4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -2212,24 +2212,24 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test5(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_undef_input_test5:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSE2-NEXT:    movapd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_undef_input_test5:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSSE3-NEXT:    movapd %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_undef_input_test5:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test5:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
@@ -2243,7 +2243,7 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test6(<4 x float> %a) {
 ; ALL-LABEL: combine_undef_input_test6:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
   %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 5, i32 1, i32 2>
@@ -2252,22 +2252,22 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test7(<4 x float> %a) {
 ; SSE2-LABEL: combine_undef_input_test7:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_undef_input_test7:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movddup {{.*#+}} xmm0 = xmm0[0,0]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_undef_input_test7:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movddup {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test7:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
@@ -2277,22 +2277,22 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test8(<4 x float> %a) {
 ; SSE2-LABEL: combine_undef_input_test8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_undef_input_test8:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movddup {{.*#+}} xmm0 = xmm0[0,0]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_undef_input_test8:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movddup {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -2302,12 +2302,12 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test9(<4 x float> %a) {
 ; SSE-LABEL: combine_undef_input_test9:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test9:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -2317,7 +2317,7 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test10(<4 x float> %a) {
 ; ALL-LABEL: combine_undef_input_test10:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
   %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 2, i32 6, i32 7>
@@ -2326,22 +2326,22 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test11(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_undef_input_test11:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_undef_input_test11:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_undef_input_test11:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test11:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
@@ -2351,12 +2351,12 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test12(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_undef_input_test12:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test12:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
@@ -2366,12 +2366,12 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test13(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_undef_input_test13:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test13:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -2381,12 +2381,12 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test14(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: combine_undef_input_test14:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test14:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -2396,24 +2396,24 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test15(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_undef_input_test15:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSE2-NEXT:    movapd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_undef_input_test15:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSSE3-NEXT:    movapd %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_undef_input_test15:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test15:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
@@ -2433,7 +2433,7 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test16(<4 x float> %a) {
 ; ALL-LABEL: combine_undef_input_test16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
   %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 0, i32 1, i32 5, i32 3>
@@ -2442,22 +2442,22 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test17(<4 x float> %a) {
 ; SSE2-LABEL: combine_undef_input_test17:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_undef_input_test17:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movddup {{.*#+}} xmm0 = xmm0[0,0]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_undef_input_test17:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movddup {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test17:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
@@ -2467,22 +2467,22 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test18(<4 x float> %a) {
 ; SSE2-LABEL: combine_undef_input_test18:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_undef_input_test18:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movddup {{.*#+}} xmm0 = xmm0[0,0]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_undef_input_test18:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movddup {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test18:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
@@ -2492,12 +2492,12 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test19(<4 x float> %a) {
 ; SSE-LABEL: combine_undef_input_test19:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_undef_input_test19:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,1]
 ; AVX-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
@@ -2507,7 +2507,7 @@ define <4 x float> @combine_undef_input_
 
 define <4 x float> @combine_undef_input_test20(<4 x float> %a) {
 ; ALL-LABEL: combine_undef_input_test20:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
   %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 2, i32 3>
@@ -2521,14 +2521,14 @@ define <4 x float> @combine_undef_input_
 
 define <8 x i32> @combine_unneeded_subvector1(<8 x i32> %a) {
 ; SSE-LABEL: combine_unneeded_subvector1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,2,1,0]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_unneeded_subvector1:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -2537,7 +2537,7 @@ define <8 x i32> @combine_unneeded_subve
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_unneeded_subvector1:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
@@ -2549,14 +2549,14 @@ define <8 x i32> @combine_unneeded_subve
 
 define <8 x i32> @combine_unneeded_subvector2(<8 x i32> %a, <8 x i32> %b) {
 ; SSE-LABEL: combine_unneeded_subvector2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[3,2,1,0]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,2,1,0]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_unneeded_subvector2:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -2565,7 +2565,7 @@ define <8 x i32> @combine_unneeded_subve
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_unneeded_subvector2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
@@ -2577,26 +2577,26 @@ define <8 x i32> @combine_unneeded_subve
 
 define <4 x float> @combine_insertps1(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_insertps1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_insertps1:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_insertps1:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm1[2],xmm0[1,2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_insertps1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[2],xmm0[1,2,3]
 ; AVX-NEXT:    retq
 
@@ -2607,26 +2607,26 @@ define <4 x float> @combine_insertps1(<4
 
 define <4 x float> @combine_insertps2(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_insertps2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_insertps2:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_insertps2:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[2],xmm0[2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_insertps2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[2],xmm0[2,3]
 ; AVX-NEXT:    retq
 
@@ -2637,24 +2637,24 @@ define <4 x float> @combine_insertps2(<4
 
 define <4 x float> @combine_insertps3(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_insertps3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_insertps3:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_insertps3:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_insertps3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
 ; AVX-NEXT:    retq
 
@@ -2665,24 +2665,24 @@ define <4 x float> @combine_insertps3(<4
 
 define <4 x float> @combine_insertps4(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: combine_insertps4:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_insertps4:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_insertps4:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_insertps4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX-NEXT:    retq
 
@@ -2693,13 +2693,13 @@ define <4 x float> @combine_insertps4(<4
 
 define void @combine_scalar_load_with_blend_with_zero(double* %a0, <4 x float>* %a1) {
 ; SSE-LABEL: combine_scalar_load_with_blend_with_zero:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    movaps %xmm0, (%rsi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_scalar_load_with_blend_with_zero:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vmovaps %xmm0, (%rsi)
 ; AVX-NEXT:    retq
@@ -2715,26 +2715,26 @@ define void @combine_scalar_load_with_bl
 ; PR30371
 define <4 x float> @combine_constant_insertion_v4f32(float %f) {
 ; SSE2-LABEL: combine_constant_insertion_v4f32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps {{.*#+}} xmm1 = <u,4,5,3>
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_constant_insertion_v4f32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movaps {{.*#+}} xmm1 = <u,4,5,3>
 ; SSSE3-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_constant_insertion_v4f32:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],mem[1,2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_constant_insertion_v4f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],mem[1,2,3]
 ; AVX-NEXT:    retq
   %a0 = insertelement <4 x float> undef, float %f, i32 0
@@ -2744,33 +2744,33 @@ define <4 x float> @combine_constant_ins
 
 define <4 x i32> @combine_constant_insertion_v4i32(i32 %f) {
 ; SSE2-LABEL: combine_constant_insertion_v4i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %edi, %xmm1
 ; SSE2-NEXT:    movaps {{.*#+}} xmm0 = <u,4,5,30>
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: combine_constant_insertion_v4i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movd %edi, %xmm1
 ; SSSE3-NEXT:    movaps {{.*#+}} xmm0 = <u,4,5,30>
 ; SSSE3-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_constant_insertion_v4i32:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movd %edi, %xmm0
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3,4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_constant_insertion_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovd %edi, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3,4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_constant_insertion_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovd %edi, %xmm0
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],mem[1,2,3]
 ; AVX2-NEXT:    retq
@@ -2781,7 +2781,7 @@ define <4 x i32> @combine_constant_inser
 
 define <4 x float> @PR22377(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: PR22377:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps %xmm0, %xmm1
 ; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,3,1,3]
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2,0,2]
@@ -2790,7 +2790,7 @@ define <4 x float> @PR22377(<4 x float>
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: PR22377:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[1,3,1,3]
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
 ; AVX-NEXT:    vaddps %xmm0, %xmm1, %xmm1
@@ -2806,7 +2806,7 @@ entry:
 
 define <4 x float> @PR22390(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: PR22390:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
 ; SSE2-NEXT:    movaps %xmm0, %xmm2
 ; SSE2-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
@@ -2815,7 +2815,7 @@ define <4 x float> @PR22390(<4 x float>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: PR22390:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
 ; SSSE3-NEXT:    movaps %xmm0, %xmm2
 ; SSSE3-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
@@ -2824,14 +2824,14 @@ define <4 x float> @PR22390(<4 x float>
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: PR22390:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
 ; SSE41-NEXT:    blendps {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3]
 ; SSE41-NEXT:    addps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: PR22390:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,0,1,2]
 ; AVX-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3]
 ; AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
@@ -2845,7 +2845,7 @@ entry:
 
 define <8 x float> @PR22412(<8 x float> %a, <8 x float> %b) {
 ; SSE2-LABEL: PR22412:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
 ; SSE2-NEXT:    movapd %xmm2, %xmm0
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm3[3,2]
@@ -2854,7 +2854,7 @@ define <8 x float> @PR22412(<8 x float>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: PR22412:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
 ; SSSE3-NEXT:    movapd %xmm2, %xmm0
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm3[3,2]
@@ -2863,7 +2863,7 @@ define <8 x float> @PR22412(<8 x float>
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: PR22412:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm0[0],xmm2[1]
 ; SSE41-NEXT:    movapd %xmm0, %xmm1
 ; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,0],xmm3[3,2]
@@ -2873,14 +2873,14 @@ define <8 x float> @PR22412(<8 x float>
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: PR22412:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
 ; AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm1[3,2],ymm0[5,4],ymm1[7,6]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: PR22412:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
@@ -2893,7 +2893,7 @@ entry:
 
 define <4 x float> @PR30264(<4 x float> %x) {
 ; SSE2-LABEL: PR30264:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],mem[2,3]
@@ -2901,7 +2901,7 @@ define <4 x float> @PR30264(<4 x float>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: PR30264:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorps %xmm1, %xmm1
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],mem[2,3]
@@ -2909,14 +2909,14 @@ define <4 x float> @PR30264(<4 x float>
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: PR30264:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movaps {{.*#+}} xmm1 = <u,u,4,1>
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm0[0],zero,xmm1[2,3]
 ; SSE41-NEXT:    movaps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: PR30264:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm1 = <u,u,4,1>
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2,3]
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-masked.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-masked.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-masked.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-masked.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <4 x i32> @mask_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_shuffle_v4i32_1234:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignd {{.*#+}} xmm2 {%k1} = xmm0[1,2,3],xmm1[0]
 ; CHECK-NEXT:    vmovdqa %xmm2, %xmm0
@@ -17,7 +17,7 @@ define <4 x i32> @mask_shuffle_v4i32_123
 
 define <4 x i32> @maskz_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v4i32_1234:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,2,3],xmm1[0]
 ; CHECK-NEXT:    retq
@@ -30,7 +30,7 @@ define <4 x i32> @maskz_shuffle_v4i32_12
 
 define <4 x i32> @mask_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_shuffle_v4i32_2345:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignd {{.*#+}} xmm2 {%k1} = xmm0[2,3],xmm1[0,1]
 ; CHECK-NEXT:    vmovdqa %xmm2, %xmm0
@@ -44,7 +44,7 @@ define <4 x i32> @mask_shuffle_v4i32_234
 
 define <4 x i32> @maskz_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v4i32_2345:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignd {{.*#+}} xmm0 {%k1} {z} = xmm0[2,3],xmm1[0,1]
 ; CHECK-NEXT:    retq
@@ -57,7 +57,7 @@ define <4 x i32> @maskz_shuffle_v4i32_23
 
 define <2 x i64> @mask_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, <2 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_shuffle_v2i64_12:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignq {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[0]
 ; CHECK-NEXT:    vmovdqa %xmm2, %xmm0
@@ -71,7 +71,7 @@ define <2 x i64> @mask_shuffle_v2i64_12(
 
 define <2 x i64> @maskz_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v2i64_12:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignq {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[0]
 ; CHECK-NEXT:    retq
@@ -84,7 +84,7 @@ define <2 x i64> @maskz_shuffle_v2i64_12
 
 define <4 x i64> @mask_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, <4 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_shuffle_v4i64_1234:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignq {{.*#+}} ymm2 {%k1} = ymm0[1,2,3],ymm1[0]
 ; CHECK-NEXT:    vmovdqa %ymm2, %ymm0
@@ -98,7 +98,7 @@ define <4 x i64> @mask_shuffle_v4i64_123
 
 define <4 x i64> @maskz_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v4i64_1234:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3],ymm1[0]
 ; CHECK-NEXT:    retq
@@ -111,7 +111,7 @@ define <4 x i64> @maskz_shuffle_v4i64_12
 
 define <4 x i64> @mask_shuffle_v4i64_1230(<4 x i64> %a, <4 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_shuffle_v4i64_1230:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpermq {{.*#+}} ymm1 {%k1} = ymm0[1,2,3,0]
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0
@@ -125,7 +125,7 @@ define <4 x i64> @mask_shuffle_v4i64_123
 
 define <4 x i64> @maskz_shuffle_v4i64_1230(<4 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v4i64_1230:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,0]
 ; CHECK-NEXT:    retq
@@ -138,7 +138,7 @@ define <4 x i64> @maskz_shuffle_v4i64_12
 
 define <8 x i32> @mask_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_shuffle_v8i32_12345678:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignd {{.*#+}} ymm2 {%k1} = ymm0[1,2,3,4,5,6,7],ymm1[0]
 ; CHECK-NEXT:    vmovdqa %ymm2, %ymm0
@@ -151,7 +151,7 @@ define <8 x i32> @mask_shuffle_v8i32_123
 
 define <8 x i32> @maskz_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v8i32_12345678:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,4,5,6,7],ymm1[0]
 ; CHECK-NEXT:    retq
@@ -163,7 +163,7 @@ define <8 x i32> @maskz_shuffle_v8i32_12
 
 define <8 x i32> @mask_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_shuffle_v8i32_23456789:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignd {{.*#+}} ymm2 {%k1} = ymm0[2,3,4,5,6,7],ymm1[0,1]
 ; CHECK-NEXT:    vmovdqa %ymm2, %ymm0
@@ -176,7 +176,7 @@ define <8 x i32> @mask_shuffle_v8i32_234
 
 define <8 x i32> @maskz_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v8i32_23456789:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,4,5,6,7],ymm1[0,1]
 ; CHECK-NEXT:    retq
@@ -188,7 +188,7 @@ define <8 x i32> @maskz_shuffle_v8i32_23
 
 define <8 x i32> @mask_shuffle_v8i32_12345670(<8 x i32> %a, <8 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_shuffle_v8i32_12345670:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignd {{.*#+}} ymm1 {%k1} = ymm0[1,2,3,4,5,6,7,0]
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0
@@ -201,7 +201,7 @@ define <8 x i32> @mask_shuffle_v8i32_123
 
 define <8 x i32> @maskz_shuffle_v8i32_12345670(<8 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v8i32_12345670:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    valignd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,4,5,6,7,0]
 ; CHECK-NEXT:    retq
@@ -213,7 +213,7 @@ define <8 x i32> @maskz_shuffle_v8i32_12
 
 define <8 x i32> @mask_shuffle_v8i32_23456701(<8 x i32> %a, <8 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_shuffle_v8i32_23456701:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,2,3,0]
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
@@ -226,7 +226,7 @@ define <8 x i32> @mask_shuffle_v8i32_234
 
 define <8 x i32> @maskz_shuffle_v8i32_23456701(<8 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v8i32_23456701:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,2,3,0]
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -239,7 +239,7 @@ define <8 x i32> @maskz_shuffle_v8i32_23
 
 define <4 x i32> @mask_extract_v8i32_v4i32_0(<8 x i32> %a, <4 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i32_v4i32_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -253,7 +253,7 @@ define <4 x i32> @mask_extract_v8i32_v4i
 
 define <4 x i32> @mask_extract_v8i32_v4i32_0_z(<8 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i32_v4i32_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -267,7 +267,7 @@ define <4 x i32> @mask_extract_v8i32_v4i
 
 define <4 x i32> @mask_extract_v8i32_v4i32_1(<8 x i32> %a, <4 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i32_v4i32_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x4 $1, %ymm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0
@@ -282,7 +282,7 @@ define <4 x i32> @mask_extract_v8i32_v4i
 
 define <4 x i32> @mask_extract_v8i32_v4i32_1_z(<8 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i32_v4i32_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x4 $1, %ymm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -296,7 +296,7 @@ define <4 x i32> @mask_extract_v8i32_v4i
 
 define <4 x float> @mask_extract_v8f32_v4f32_0(<8 x float> %a, <4 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f32_v4f32_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vblendmps %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -310,7 +310,7 @@ define <4 x float> @mask_extract_v8f32_v
 
 define <4 x float> @mask_extract_v8f32_v4f32_0_z(<8 x float> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f32_v4f32_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovaps %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -324,7 +324,7 @@ define <4 x float> @mask_extract_v8f32_v
 
 define <4 x float> @mask_extract_v8f32_v4f32_1(<8 x float> %a, <4 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f32_v4f32_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x4 $1, %ymm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm0
@@ -339,7 +339,7 @@ define <4 x float> @mask_extract_v8f32_v
 
 define <4 x float> @mask_extract_v8f32_v4f32_1_z(<8 x float> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f32_v4f32_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x4 $1, %ymm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -353,7 +353,7 @@ define <4 x float> @mask_extract_v8f32_v
 
 define <2 x i64> @mask_extract_v4i64_v2i64_0(<4 x i64> %a, <2 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v4i64_v2i64_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -367,7 +367,7 @@ define <2 x i64> @mask_extract_v4i64_v2i
 
 define <2 x i64> @mask_extract_v4i64_v2i64_0_z(<4 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v4i64_v2i64_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -381,7 +381,7 @@ define <2 x i64> @mask_extract_v4i64_v2i
 
 define <2 x i64> @mask_extract_v4i64_v2i64_1(<4 x i64> %a, <2 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v4i64_v2i64_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x2 $1, %ymm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0
@@ -396,7 +396,7 @@ define <2 x i64> @mask_extract_v4i64_v2i
 
 define <2 x i64> @mask_extract_v4i64_v2i64_1_z(<4 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v4i64_v2i64_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x2 $1, %ymm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -410,7 +410,7 @@ define <2 x i64> @mask_extract_v4i64_v2i
 
 define <2 x double> @mask_extract_v4f64_v2f64_0(<4 x double> %a, <2 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v4f64_v2f64_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -424,7 +424,7 @@ define <2 x double> @mask_extract_v4f64_
 
 define <2 x double> @mask_extract_v4f64_v2f64_0_z(<4 x double> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v4f64_v2f64_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -438,7 +438,7 @@ define <2 x double> @mask_extract_v4f64_
 
 define <2 x double> @mask_extract_v4f64_v2f64_1(<4 x double> %a, <2 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v4f64_v2f64_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x2 $1, %ymm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm0
@@ -453,7 +453,7 @@ define <2 x double> @mask_extract_v4f64_
 
 define <2 x double> @mask_extract_v4f64_v2f64_1_z(<4 x double> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v4f64_v2f64_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -467,7 +467,7 @@ define <2 x double> @mask_extract_v4f64_
 
 define <4 x i32> @mask_extract_v16i32_v4i32_0(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16i32_v4i32_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -481,7 +481,7 @@ define <4 x i32> @mask_extract_v16i32_v4
 
 define <4 x i32> @mask_extract_v16i32_v4i32_0_z(<16 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16i32_v4i32_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -495,7 +495,7 @@ define <4 x i32> @mask_extract_v16i32_v4
 
 define <4 x i32> @mask_extract_v16i32_v4i32_1(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16i32_v4i32_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x4 $1, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0
@@ -510,7 +510,7 @@ define <4 x i32> @mask_extract_v16i32_v4
 
 define <4 x i32> @mask_extract_v16i32_v4i32_1_z(<16 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16i32_v4i32_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x4 $1, %zmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -524,7 +524,7 @@ define <4 x i32> @mask_extract_v16i32_v4
 
 define <4 x i32> @mask_extract_v16i32_v4i32_2(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16i32_v4i32_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x4 $2, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0
@@ -539,7 +539,7 @@ define <4 x i32> @mask_extract_v16i32_v4
 
 define <4 x i32> @mask_extract_v16i32_v4i32_3(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16i32_v4i32_3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x4 $3, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0
@@ -554,7 +554,7 @@ define <4 x i32> @mask_extract_v16i32_v4
 
 define <4 x float> @mask_extract_v16f32_v4f32_0(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16f32_v4f32_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vblendmps %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -568,7 +568,7 @@ define <4 x float> @mask_extract_v16f32_
 
 define <4 x float> @mask_extract_v16f32_v4f32_0_z(<16 x float> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16f32_v4f32_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovaps %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -582,7 +582,7 @@ define <4 x float> @mask_extract_v16f32_
 
 define <4 x float> @mask_extract_v16f32_v4f32_1(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16f32_v4f32_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x4 $1, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm0
@@ -597,7 +597,7 @@ define <4 x float> @mask_extract_v16f32_
 
 define <4 x float> @mask_extract_v16f32_v4f32_1_z(<16 x float> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16f32_v4f32_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x4 $1, %zmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -611,7 +611,7 @@ define <4 x float> @mask_extract_v16f32_
 
 define <4 x float> @mask_extract_v16f32_v4f32_2(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16f32_v4f32_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x4 $2, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm0
@@ -626,7 +626,7 @@ define <4 x float> @mask_extract_v16f32_
 
 define <4 x float> @mask_extract_v16f32_v4f32_3(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16f32_v4f32_3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x4 $3, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm0
@@ -641,7 +641,7 @@ define <4 x float> @mask_extract_v16f32_
 
 define <8 x i32> @mask_extract_v16i32_v8i32_0(<16 x i32> %a, <8 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16i32_v8i32_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -653,7 +653,7 @@ define <8 x i32> @mask_extract_v16i32_v8
 
 define <8 x i32> @mask_extract_v16i32_v8i32_0_z(<16 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16i32_v8i32_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -665,7 +665,7 @@ define <8 x i32> @mask_extract_v16i32_v8
 
 define <8 x i32> @mask_extract_v16i32_v8i32_1(<16 x i32> %a, <8 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16i32_v8i32_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x8 $1, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0
@@ -678,7 +678,7 @@ define <8 x i32> @mask_extract_v16i32_v8
 
 define <8 x i32> @mask_extract_v16i32_v8i32_1_z(<16 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16i32_v8i32_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x8 $1, %zmm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -690,7 +690,7 @@ define <8 x i32> @mask_extract_v16i32_v8
 
 define <8 x float> @mask_extract_v16f32_v8f32_0(<16 x float> %a, <8 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16f32_v8f32_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vblendmps %ymm0, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -702,7 +702,7 @@ define <8 x float> @mask_extract_v16f32_
 
 define <8 x float> @mask_extract_v16f32_v8f32_0_z(<16 x float> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16f32_v8f32_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovaps %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -714,7 +714,7 @@ define <8 x float> @mask_extract_v16f32_
 
 define <8 x float> @mask_extract_v16f32_v8f32_1(<16 x float> %a, <8 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16f32_v8f32_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x8 $1, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm0
@@ -727,7 +727,7 @@ define <8 x float> @mask_extract_v16f32_
 
 define <8 x float> @mask_extract_v16f32_v8f32_1_z(<16 x float> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v16f32_v8f32_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -739,7 +739,7 @@ define <8 x float> @mask_extract_v16f32_
 
 define <2 x i64> @mask_extract_v8i64_v2i64_0(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i64_v2i64_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -753,7 +753,7 @@ define <2 x i64> @mask_extract_v8i64_v2i
 
 define <2 x i64> @mask_extract_v8i64_v2i64_0_z(<8 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i64_v2i64_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -767,7 +767,7 @@ define <2 x i64> @mask_extract_v8i64_v2i
 
 define <2 x i64> @mask_extract_v8i64_v2i64_1(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i64_v2i64_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x2 $1, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0
@@ -782,7 +782,7 @@ define <2 x i64> @mask_extract_v8i64_v2i
 
 define <2 x i64> @mask_extract_v8i64_v2i64_1_z(<8 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i64_v2i64_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x2 $1, %zmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -796,7 +796,7 @@ define <2 x i64> @mask_extract_v8i64_v2i
 
 define <2 x i64> @mask_extract_v8i64_v2i64_2(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i64_v2i64_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x2 $2, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0
@@ -811,7 +811,7 @@ define <2 x i64> @mask_extract_v8i64_v2i
 
 define <2 x i64> @mask_extract_v8i64_v2i64_3(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i64_v2i64_3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x2 $3, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0
@@ -826,7 +826,7 @@ define <2 x i64> @mask_extract_v8i64_v2i
 
 define <2 x double> @mask_extract_v8f64_v2f64_0(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f64_v2f64_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -840,7 +840,7 @@ define <2 x double> @mask_extract_v8f64_
 
 define <2 x double> @mask_extract_v8f64_v2f64_0_z(<8 x double> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f64_v2f64_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -854,7 +854,7 @@ define <2 x double> @mask_extract_v8f64_
 
 define <2 x double> @mask_extract_v8f64_v2f64_1(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f64_v2f64_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x2 $1, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm0
@@ -869,7 +869,7 @@ define <2 x double> @mask_extract_v8f64_
 
 define <2 x double> @mask_extract_v8f64_v2f64_1_z(<8 x double> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f64_v2f64_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x2 $1, %zmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -883,7 +883,7 @@ define <2 x double> @mask_extract_v8f64_
 
 define <2 x double> @mask_extract_v8f64_v2f64_2(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f64_v2f64_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x2 $2, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm0
@@ -898,7 +898,7 @@ define <2 x double> @mask_extract_v8f64_
 
 define <2 x double> @mask_extract_v8f64_v2f64_3(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f64_v2f64_3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x2 $3, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm0
@@ -913,7 +913,7 @@ define <2 x double> @mask_extract_v8f64_
 
 define <4 x i64> @mask_extract_v8i64_v4i64_0(<8 x i64> %a, <4 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i64_v4i64_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -926,7 +926,7 @@ define <4 x i64> @mask_extract_v8i64_v4i
 
 define <4 x i64> @mask_extract_v8i64_v4i64_0_z(<8 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i64_v4i64_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -939,7 +939,7 @@ define <4 x i64> @mask_extract_v8i64_v4i
 
 define <4 x i64> @mask_extract_v8i64_v4i64_1(<8 x i64> %a, <4 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i64_v4i64_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x4 $1, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0
@@ -953,7 +953,7 @@ define <4 x i64> @mask_extract_v8i64_v4i
 
 define <4 x i64> @mask_extract_v8i64_v4i64_1_z(<8 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8i64_v4i64_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x4 $1, %zmm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -966,7 +966,7 @@ define <4 x i64> @mask_extract_v8i64_v4i
 
 define <4 x double> @mask_extract_v8f64_v4f64_0(<8 x double> %a, <4 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f64_v4f64_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -979,7 +979,7 @@ define <4 x double> @mask_extract_v8f64_
 
 define <4 x double> @mask_extract_v8f64_v4f64_0_z(<8 x double> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f64_v4f64_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovapd %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -992,7 +992,7 @@ define <4 x double> @mask_extract_v8f64_
 
 define <4 x double> @mask_extract_v8f64_v4f64_1(<8 x double> %a, <4 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f64_v4f64_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x4 $1, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm0
@@ -1006,7 +1006,7 @@ define <4 x double> @mask_extract_v8f64_
 
 define <4 x double> @mask_extract_v8f64_v4f64_1_z(<8 x double> %a, i8 %mask) {
 ; CHECK-LABEL: mask_extract_v8f64_v4f64_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x4 $1, %zmm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1019,7 +1019,7 @@ define <4 x double> @mask_extract_v8f64_
 
 define <8 x i32> @mask_cast_extract_v8i64_v8i32_0(<8 x i64> %a, <8 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8i64_v8i32_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -1032,7 +1032,7 @@ define <8 x i32> @mask_cast_extract_v8i6
 
 define <8 x i32> @mask_cast_extract_v8i64_v8i32_0_z(<8 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8i64_v8i32_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1045,7 +1045,7 @@ define <8 x i32> @mask_cast_extract_v8i6
 
 define <8 x i32> @mask_cast_extract_v8i64_v8i32_1(<8 x i64> %a, <8 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8i64_v8i32_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x8 $1, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0
@@ -1059,7 +1059,7 @@ define <8 x i32> @mask_cast_extract_v8i6
 
 define <8 x i32> @mask_cast_extract_v8i64_v8i32_1_z(<8 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8i64_v8i32_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x8 $1, %zmm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1072,7 +1072,7 @@ define <8 x i32> @mask_cast_extract_v8i6
 
 define <8 x float> @mask_cast_extract_v8f64_v8f32_0(<8 x double> %a, <8 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8f64_v8f32_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vblendmps %ymm0, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -1085,7 +1085,7 @@ define <8 x float> @mask_cast_extract_v8
 
 define <8 x float> @mask_cast_extract_v8f64_v8f32_0_z(<8 x double> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8f64_v8f32_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovaps %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1098,7 +1098,7 @@ define <8 x float> @mask_cast_extract_v8
 
 define <8 x float> @mask_cast_extract_v8f64_v8f32_1(<8 x double> %a, <8 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8f64_v8f32_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x8 $1, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm0
@@ -1112,7 +1112,7 @@ define <8 x float> @mask_cast_extract_v8
 
 define <8 x float> @mask_cast_extract_v8f64_v8f32_1_z(<8 x double> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8f64_v8f32_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1125,7 +1125,7 @@ define <8 x float> @mask_cast_extract_v8
 
 define <4 x i32> @mask_cast_extract_v8i64_v4i32_0(<8 x i64> %a, <4 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8i64_v4i32_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -1140,7 +1140,7 @@ define <4 x i32> @mask_cast_extract_v8i6
 
 define <4 x i32> @mask_cast_extract_v8i64_v4i32_0_z(<8 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8i64_v4i32_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -1155,7 +1155,7 @@ define <4 x i32> @mask_cast_extract_v8i6
 
 define <4 x i32> @mask_cast_extract_v8i64_v4i32_1(<8 x i64> %a, <4 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8i64_v4i32_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x4 $1, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0
@@ -1171,7 +1171,7 @@ define <4 x i32> @mask_cast_extract_v8i6
 
 define <4 x i32> @mask_cast_extract_v8i64_v4i32_1_z(<8 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8i64_v4i32_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti32x4 $1, %zmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -1186,7 +1186,7 @@ define <4 x i32> @mask_cast_extract_v8i6
 
 define <4 x float> @mask_cast_extract_v8f64_v4f32_0(<8 x double> %a, <4 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8f64_v4f32_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vblendmps %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -1201,7 +1201,7 @@ define <4 x float> @mask_cast_extract_v8
 
 define <4 x float> @mask_cast_extract_v8f64_v4f32_0_z(<8 x double> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8f64_v4f32_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovaps %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -1216,7 +1216,7 @@ define <4 x float> @mask_cast_extract_v8
 
 define <4 x float> @mask_cast_extract_v8f64_v4f32_1(<8 x double> %a, <4 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8f64_v4f32_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x4 $1, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm0
@@ -1232,7 +1232,7 @@ define <4 x float> @mask_cast_extract_v8
 
 define <4 x float> @mask_cast_extract_v8f64_v4f32_1_z(<8 x double> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v8f64_v4f32_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf32x4 $1, %zmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -1247,7 +1247,7 @@ define <4 x float> @mask_cast_extract_v8
 
 define <4 x i64> @mask_cast_extract_v16i32_v4i64_0(<16 x i32> %a, <4 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16i32_v4i64_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -1261,7 +1261,7 @@ define <4 x i64> @mask_cast_extract_v16i
 
 define <4 x i64> @mask_cast_extract_v16i32_v4i64_0_z(<16 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16i32_v4i64_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1275,7 +1275,7 @@ define <4 x i64> @mask_cast_extract_v16i
 
 define <4 x i64> @mask_cast_extract_v16i32_v4i64_1(<16 x i32> %a, <4 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16i32_v4i64_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x4 $1, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0
@@ -1290,7 +1290,7 @@ define <4 x i64> @mask_cast_extract_v16i
 
 define <4 x i64> @mask_cast_extract_v16i32_v4i64_1_z(<16 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16i32_v4i64_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x4 $1, %zmm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1304,7 +1304,7 @@ define <4 x i64> @mask_cast_extract_v16i
 
 define <4 x double> @mask_cast_extract_v16f32_v4f64_0(<16 x float> %a, <4 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16f32_v4f64_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -1318,7 +1318,7 @@ define <4 x double> @mask_cast_extract_v
 
 define <4 x double> @mask_cast_extract_v16f32_v4f64_0_z(<16 x float> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16f32_v4f64_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovapd %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1332,7 +1332,7 @@ define <4 x double> @mask_cast_extract_v
 
 define <4 x double> @mask_cast_extract_v16f32_v4f64_1(<16 x float> %a, <4 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16f32_v4f64_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x4 $1, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm0
@@ -1347,7 +1347,7 @@ define <4 x double> @mask_cast_extract_v
 
 define <4 x double> @mask_cast_extract_v16f32_v4f64_1_z(<16 x float> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16f32_v4f64_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x4 $1, %zmm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1361,7 +1361,7 @@ define <4 x double> @mask_cast_extract_v
 
 define <2 x i64> @mask_cast_extract_v16i32_v2i64_0(<16 x i32> %a, <2 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16i32_v2i64_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -1376,7 +1376,7 @@ define <2 x i64> @mask_cast_extract_v16i
 
 define <2 x i64> @mask_cast_extract_v16i32_v2i64_0_z(<16 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16i32_v2i64_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -1391,7 +1391,7 @@ define <2 x i64> @mask_cast_extract_v16i
 
 define <2 x i64> @mask_cast_extract_v16i32_v2i64_1(<16 x i32> %a, <2 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16i32_v2i64_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x2 $1, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0
@@ -1407,7 +1407,7 @@ define <2 x i64> @mask_cast_extract_v16i
 
 define <2 x i64> @mask_cast_extract_v16i32_v2i64_1_z(<16 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16i32_v2i64_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextracti64x2 $1, %zmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -1422,7 +1422,7 @@ define <2 x i64> @mask_cast_extract_v16i
 
 define <2 x double> @mask_cast_extract_v16f32_v2f64_0(<16 x float> %a, <2 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16f32_v2f64_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -1437,7 +1437,7 @@ define <2 x double> @mask_cast_extract_v
 
 define <2 x double> @mask_cast_extract_v16f32_v2f64_0_z(<16 x float> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16f32_v2f64_0_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -1452,7 +1452,7 @@ define <2 x double> @mask_cast_extract_v
 
 define <2 x double> @mask_cast_extract_v16f32_v2f64_1(<16 x float> %a, <2 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16f32_v2f64_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x2 $1, %zmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm0
@@ -1468,7 +1468,7 @@ define <2 x double> @mask_cast_extract_v
 
 define <2 x double> @mask_cast_extract_v16f32_v2f64_1_z(<16 x float> %a, i8 %mask) {
 ; CHECK-LABEL: mask_cast_extract_v16f32_v2f64_1_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vextractf64x2 $1, %zmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vzeroupper
@@ -1483,7 +1483,7 @@ define <2 x double> @mask_cast_extract_v
 
 define <2 x double> @broadcast_v4f32_0101_from_v2f32_mask(double* %x, <2 x double> %passthru, i8 %mask) {
 ; CHECK-LABEL: broadcast_v4f32_0101_from_v2f32_mask:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0]
 ; CHECK-NEXT:    retq
@@ -1498,7 +1498,7 @@ define <2 x double> @broadcast_v4f32_010
 
 define <2 x double> @broadcast_v4f32_0101_from_v2f32_maskz(double* %x, i8 %mask) {
 ; CHECK-LABEL: broadcast_v4f32_0101_from_v2f32_maskz:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 {%k1} {z} = mem[0,0]
 ; CHECK-NEXT:    retq
@@ -1513,7 +1513,7 @@ define <2 x double> @broadcast_v4f32_010
 
 define <8 x float> @test_broadcast_2f64_8f32(<2 x double> *%p, i8 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_2f64_8f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcastf32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
 ; CHECK-NEXT:    retq
@@ -1527,7 +1527,7 @@ define <8 x float> @test_broadcast_2f64_
 
 define <8 x i32> @test_broadcast_2i64_8i32(<2 x i64> *%p, i8 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_2i64_8i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcasti32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
 ; CHECK-NEXT:    retq
@@ -1541,7 +1541,7 @@ define <8 x i32> @test_broadcast_2i64_8i
 
 define <16 x float> @test_broadcast_2f64_16f32(<2 x double> *%p, i16 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_2f64_16f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcastf32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; CHECK-NEXT:    retq
@@ -1555,7 +1555,7 @@ define <16 x float> @test_broadcast_2f64
 
 define <16 x i32> @test_broadcast_2i64_16i32(<2 x i64> *%p, i16 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_2i64_16i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; CHECK-NEXT:    retq
@@ -1569,7 +1569,7 @@ define <16 x i32> @test_broadcast_2i64_1
 
 define <16 x float> @test_broadcast_4f64_16f32(<4 x double> *%p, i16 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_4f64_16f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcastf32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
 ; CHECK-NEXT:    retq
@@ -1583,7 +1583,7 @@ define <16 x float> @test_broadcast_4f64
 
 define <16 x i32> @test_broadcast_4i64_16i32(<4 x i64> *%p, i16 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_4i64_16i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcasti32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
 ; CHECK-NEXT:    retq
@@ -1597,7 +1597,7 @@ define <16 x i32> @test_broadcast_4i64_1
 
 define <4 x double> @test_broadcast_4f32_4f64(<4 x float> *%p, i8 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_4f32_4f64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcastf64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
 ; CHECK-NEXT:    retq
@@ -1612,7 +1612,7 @@ define <4 x double> @test_broadcast_4f32
 
 define <4 x i64> @test_broadcast_4i32_4i64(<4 x i32> *%p, i8 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_4i32_4i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcasti64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
 ; CHECK-NEXT:    retq
@@ -1627,7 +1627,7 @@ define <4 x i64> @test_broadcast_4i32_4i
 
 define <8 x double> @test_broadcast_4f32_8f64(<4 x float> *%p, i8 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_4f32_8f64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcastf64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
 ; CHECK-NEXT:    retq
@@ -1641,7 +1641,7 @@ define <8 x double> @test_broadcast_4f32
 
 define <8 x i64> @test_broadcast_4i32_8i64(<4 x i32> *%p, i8 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_4i32_8i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcasti64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
 ; CHECK-NEXT:    retq
@@ -1655,7 +1655,7 @@ define <8 x i64> @test_broadcast_4i32_8i
 
 define <8 x double> @test_broadcast_8f32_8f64(<8 x float> *%p, i8 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_8f32_8f64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcastf64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
 ; CHECK-NEXT:    retq
@@ -1669,7 +1669,7 @@ define <8 x double> @test_broadcast_8f32
 
 define <8 x i64> @test_broadcast_8i32_8i64(<8 x i32> *%p, i8 %mask) nounwind {
 ; CHECK-LABEL: test_broadcast_8i32_8i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
 ; CHECK-NEXT:    retq
@@ -1683,7 +1683,7 @@ define <8 x i64> @test_broadcast_8i32_8i
 
 define <4 x float> @test_broadcastf32x2_v4f32(<4 x float> %vec, <4 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: test_broadcastf32x2_v4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vblendmps %xmm0, %xmm1, %xmm0 {%k1}
@@ -1697,7 +1697,7 @@ define <4 x float> @test_broadcastf32x2_
 
 define <4 x float> @test_broadcastf32x2_v4f32_z(<4 x float> %vec, i8 %mask) {
 ; CHECK-LABEL: test_broadcastf32x2_v4f32_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vmovaps %xmm0, %xmm0 {%k1} {z}
@@ -1711,7 +1711,7 @@ define <4 x float> @test_broadcastf32x2_
 
 define <4 x i32> @test_broadcasti32x2_v4i32(<4 x i32> %vec, <4 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: test_broadcasti32x2_v4i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vbroadcasti32x2 {{.*#+}} xmm1 {%k1} = xmm0[0,1,0,1]
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm0
@@ -1725,7 +1725,7 @@ define <4 x i32> @test_broadcasti32x2_v4
 
 define <4 x i32> @test_broadcasti32x2_v4i32_z(<4 x i32> %vec, i8 %mask) {
 ; CHECK-LABEL: test_broadcasti32x2_v4i32_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vbroadcasti32x2 {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,0,1]
 ; CHECK-NEXT:    retq
@@ -1738,7 +1738,7 @@ define <4 x i32> @test_broadcasti32x2_v4
 
 define <8 x float> @test_broadcastf32x2_v8f32(<8 x float> %vec, <8 x float> %passthru, i8 %mask) {
 ; CHECK-LABEL: test_broadcastf32x2_v8f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vbroadcastf32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm0
@@ -1751,7 +1751,7 @@ define <8 x float> @test_broadcastf32x2_
 
 define <8 x float> @test_broadcastf32x2_v8f32_z(<8 x float> %vec, i8 %mask) {
 ; CHECK-LABEL: test_broadcastf32x2_v8f32_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
 ; CHECK-NEXT:    retq
@@ -1763,7 +1763,7 @@ define <8 x float> @test_broadcastf32x2_
 
 define <8 x i32> @test_broadcasti32x2_v8i32(<8 x i32> %vec, <8 x i32> %passthru, i8 %mask) {
 ; CHECK-LABEL: test_broadcasti32x2_v8i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vbroadcasti32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
 ; CHECK-NEXT:    vmovdqa %ymm1, %ymm0
@@ -1776,7 +1776,7 @@ define <8 x i32> @test_broadcasti32x2_v8
 
 define <8 x i32> @test_broadcasti32x2_v8i32_z(<8 x i32> %vec, i8 %mask) {
 ; CHECK-LABEL: test_broadcasti32x2_v8i32_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
 ; CHECK-NEXT:    retq
@@ -1788,7 +1788,7 @@ define <8 x i32> @test_broadcasti32x2_v8
 
 define <16 x float> @test_broadcastf32x2_v16f32_z(<16 x float> %vec, i16 %mask) {
 ; CHECK-LABEL: test_broadcastf32x2_v16f32_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
 ; CHECK-NEXT:    retq
@@ -1800,7 +1800,7 @@ define <16 x float> @test_broadcastf32x2
 
 define <16 x i32> @test_broadcasti32x2_v16i32(<16 x i32> %vec, <16 x i32> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_broadcasti32x2_v16i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1813,7 +1813,7 @@ define <16 x i32> @test_broadcasti32x2_v
 
 define <16 x float> @test_broadcastf32x2_v16f32(<16 x float> %vec, <16 x float> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_broadcastf32x2_v16f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
 ; CHECK-NEXT:    vmovapd %zmm1, %zmm0
@@ -1826,7 +1826,7 @@ define <16 x float> @test_broadcastf32x2
 
 define <16 x i32> @test_broadcasti32x2_v16i32_z(<16 x i32> %vec, i16 %mask) {
 ; CHECK-LABEL: test_broadcasti32x2_v16i32_z:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vbroadcasti32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
 ; CHECK-NEXT:    retq
@@ -1838,7 +1838,7 @@ define <16 x i32> @test_broadcasti32x2_v
 
 define <16 x i8> @mask_shuffle_v16i8_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passthru, i16 %mask) {
 ; CHECK-LABEL: mask_shuffle_v16i8_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpalignr {{.*#+}} xmm2 {%k1} = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
 ; CHECK-NEXT:    vmovdqa %xmm2, %xmm0
@@ -1851,7 +1851,7 @@ define <16 x i8> @mask_shuffle_v16i8_1_2
 
 define <16 x i8> @maskz_shuffle_v16i8_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v16i8_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpalignr {{.*#+}} xmm0 {%k1} {z} = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
 ; CHECK-NEXT:    retq
@@ -1863,7 +1863,7 @@ define <16 x i8> @maskz_shuffle_v16i8_1_
 
 define <16 x i8> @mask_shuffle_v16i8_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passthru, i16 %mask) {
 ; CHECK-LABEL: mask_shuffle_v16i8_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpalignr {{.*#+}} xmm2 {%k1} = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
 ; CHECK-NEXT:    vmovdqa %xmm2, %xmm0
@@ -1876,7 +1876,7 @@ define <16 x i8> @mask_shuffle_v16i8_4_5
 
 define <16 x i8> @maskz_shuffle_v16i8_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v16i8_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpalignr {{.*#+}} xmm0 {%k1} {z} = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
 ; CHECK-NEXT:    retq
@@ -1888,7 +1888,7 @@ define <16 x i8> @maskz_shuffle_v16i8_4_
 
 define <16 x i8> @mask_shuffle_v16i8_8_9_10_11_12_13_14_15_16_17_18_19_20_21_22_23(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passthru, i16 %mask) {
 ; CHECK-LABEL: mask_shuffle_v16i8_8_9_10_11_12_13_14_15_16_17_18_19_20_21_22_23:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpalignr {{.*#+}} xmm2 {%k1} = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; CHECK-NEXT:    vmovdqa %xmm2, %xmm0
@@ -1901,7 +1901,7 @@ define <16 x i8> @mask_shuffle_v16i8_8_9
 
 define <16 x i8> @maskz_shuffle_v16i8_8_9_10_11_12_13_14_15_16_17_18_19_20_21_22_23(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
 ; CHECK-LABEL: maskz_shuffle_v16i8_8_9_10_11_12_13_14_15_16_17_18_19_20_21_22_23:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpalignr {{.*#+}} xmm0 {%k1} {z} = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-mmx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-mmx.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-mmx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-mmx.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @test0(<1 x i64>* %x) {
 ; X32-LABEL: test0:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -14,7 +14,7 @@ define void @test0(<1 x i64>* %x) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test0:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; X64-NEXT:    movq %xmm0, (%rdi)
@@ -30,7 +30,7 @@ entry:
 
 define void @test1() {
 ; X32-LABEL: test1:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    pushl %edi
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    subl $16, %esp
@@ -49,7 +49,7 @@ define void @test1() {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test1:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    movlps %xmm0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movq -{{[0-9]+}}(%rsp), %mm0
@@ -75,7 +75,7 @@ entry:
 
 define void @test2() nounwind {
 ; X32-LABEL: test2:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl L_tmp_V2i$non_lazy_ptr, %eax
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -83,7 +83,7 @@ define void @test2() nounwind {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test2:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movq _tmp_V2i@{{.*}}(%rip), %rax
 ; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-sse1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-sse1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-sse1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-sse1.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <4 x float> @shuffle_v4f32_0001(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_0001:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,1]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
@@ -12,7 +12,7 @@ define <4 x float> @shuffle_v4f32_0001(<
 
 define <4 x float> @shuffle_v4f32_0020(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_0020:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,2,0]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
@@ -21,7 +21,7 @@ define <4 x float> @shuffle_v4f32_0020(<
 
 define <4 x float> @shuffle_v4f32_0300(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_0300:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,3,0,0]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
@@ -30,7 +30,7 @@ define <4 x float> @shuffle_v4f32_0300(<
 
 define <4 x float> @shuffle_v4f32_1000(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_1000:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0,0,0]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
@@ -39,7 +39,7 @@ define <4 x float> @shuffle_v4f32_1000(<
 
 define <4 x float> @shuffle_v4f32_2200(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_2200:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,2,0,0]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
@@ -48,7 +48,7 @@ define <4 x float> @shuffle_v4f32_2200(<
 
 define <4 x float> @shuffle_v4f32_3330(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_3330:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,0]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
@@ -57,7 +57,7 @@ define <4 x float> @shuffle_v4f32_3330(<
 
 define <4 x float> @shuffle_v4f32_3210(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_3210:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -66,7 +66,7 @@ define <4 x float> @shuffle_v4f32_3210(<
 
 define <4 x float> @shuffle_v4f32_0011(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_0011:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
@@ -75,7 +75,7 @@ define <4 x float> @shuffle_v4f32_0011(<
 
 define <4 x float> @shuffle_v4f32_2233(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_2233:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
@@ -84,7 +84,7 @@ define <4 x float> @shuffle_v4f32_2233(<
 
 define <4 x float> @shuffle_v4f32_0022(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_0022:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,2,2]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -93,7 +93,7 @@ define <4 x float> @shuffle_v4f32_0022(<
 
 define <4 x float> @shuffle_v4f32_1133(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_1133:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
@@ -102,7 +102,7 @@ define <4 x float> @shuffle_v4f32_1133(<
 
 define <4 x float> @shuffle_v4f32_0145(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_0145:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -111,7 +111,7 @@ define <4 x float> @shuffle_v4f32_0145(<
 
 define <4 x float> @shuffle_v4f32_0101(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_0101:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -120,7 +120,7 @@ define <4 x float> @shuffle_v4f32_0101(<
 
 define <4 x float> @shuffle_v4f32_2323(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_2323:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 3, i32 2, i32 3>
@@ -129,7 +129,7 @@ define <4 x float> @shuffle_v4f32_2323(<
 
 define <4 x float> @shuffle_v4f32_6723(<4 x float> %a, <4 x float> %b) {
 ; SSE1-LABEL: shuffle_v4f32_6723:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; SSE1-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
@@ -138,7 +138,7 @@ define <4 x float> @shuffle_v4f32_6723(<
 
 define <4 x float> @shuffle_v4f32_4zzz(<4 x float> %a) {
 ; SSE1-LABEL: shuffle_v4f32_4zzz:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    xorps %xmm1, %xmm1
 ; SSE1-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE1-NEXT:    movaps %xmm1, %xmm0
@@ -149,7 +149,7 @@ define <4 x float> @shuffle_v4f32_4zzz(<
 
 define <4 x float> @shuffle_v4f32_z4zz(<4 x float> %a) {
 ; SSE1-LABEL: shuffle_v4f32_z4zz:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    xorps %xmm1, %xmm1
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
@@ -160,7 +160,7 @@ define <4 x float> @shuffle_v4f32_z4zz(<
 
 define <4 x float> @shuffle_v4f32_zz4z(<4 x float> %a) {
 ; SSE1-LABEL: shuffle_v4f32_zz4z:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    xorps %xmm1, %xmm1
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
 ; SSE1-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
@@ -172,7 +172,7 @@ define <4 x float> @shuffle_v4f32_zz4z(<
 
 define <4 x float> @shuffle_v4f32_zuu4(<4 x float> %a) {
 ; SSE1-LABEL: shuffle_v4f32_zuu4:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    xorps %xmm1, %xmm1
 ; SSE1-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
 ; SSE1-NEXT:    movaps %xmm1, %xmm0
@@ -183,7 +183,7 @@ define <4 x float> @shuffle_v4f32_zuu4(<
 
 define <4 x float> @shuffle_v4f32_zzz7(<4 x float> %a) {
 ; SSE1-LABEL: shuffle_v4f32_zzz7:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    xorps %xmm1, %xmm1
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[2,0]
 ; SSE1-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
@@ -195,7 +195,7 @@ define <4 x float> @shuffle_v4f32_zzz7(<
 
 define <4 x float> @shuffle_v4f32_z6zz(<4 x float> %a) {
 ; SSE1-LABEL: shuffle_v4f32_z6zz:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    xorps %xmm1, %xmm1
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,0]
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
@@ -206,7 +206,7 @@ define <4 x float> @shuffle_v4f32_z6zz(<
 
 define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
 ; SSE1-LABEL: insert_reg_and_zero_v4f32:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    xorps %xmm1, %xmm1
 ; SSE1-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE1-NEXT:    movaps %xmm1, %xmm0
@@ -218,7 +218,7 @@ define <4 x float> @insert_reg_and_zero_
 
 define <4 x float> @insert_mem_and_zero_v4f32(float* %ptr) {
 ; SSE1-LABEL: insert_mem_and_zero_v4f32:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE1-NEXT:    retq
   %a = load float, float* %ptr
@@ -229,7 +229,7 @@ define <4 x float> @insert_mem_and_zero_
 
 define <4 x float> @insert_mem_lo_v4f32(<2 x float>* %ptr, <4 x float> %b) {
 ; SSE1-LABEL: insert_mem_lo_v4f32:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    movq (%rdi), %rax
 ; SSE1-NEXT:    movl %eax, -{{[0-9]+}}(%rsp)
 ; SSE1-NEXT:    shrq $32, %rax
@@ -250,7 +250,7 @@ define <4 x float> @insert_mem_lo_v4f32(
 
 define <4 x float> @insert_mem_hi_v4f32(<2 x float>* %ptr, <4 x float> %b) {
 ; SSE1-LABEL: insert_mem_hi_v4f32:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    movq (%rdi), %rax
 ; SSE1-NEXT:    movl %eax, -{{[0-9]+}}(%rsp)
 ; SSE1-NEXT:    shrq $32, %rax
@@ -270,7 +270,7 @@ define <4 x float> @insert_mem_hi_v4f32(
 
 define <4 x float> @shuffle_mem_v4f32_3210(<4 x float>* %ptr) {
 ; SSE1-LABEL: shuffle_mem_v4f32_3210:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    movaps (%rdi), %xmm0
 ; SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
 ; SSE1-NEXT:    retq
@@ -281,7 +281,7 @@ define <4 x float> @shuffle_mem_v4f32_32
 
 define <4 x float> @shuffle_mem_v4f32_0145(<4 x float> %a, <4 x float>* %pb) {
 ; SSE1-LABEL: shuffle_mem_v4f32_0145:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
 ; SSE1-NEXT:    retq
   %b = load <4 x float>, <4 x float>* %pb, align 16
@@ -291,7 +291,7 @@ define <4 x float> @shuffle_mem_v4f32_01
 
 define <4 x float> @shuffle_mem_v4f32_6723(<4 x float> %a, <4 x float>* %pb) {
 ; SSE1-LABEL: shuffle_mem_v4f32_6723:
-; SSE1:       # BB#0:
+; SSE1:       # %bb.0:
 ; SSE1-NEXT:    movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
 ; SSE1-NEXT:    retq
   %b = load <4 x float>, <4 x float>* %pb, align 16

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-sse41.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-sse41.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-sse41.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-sse41.ll Mon Dec  4 09:18:51 2017
@@ -4,12 +4,12 @@
 
 define <8 x i16> @blend_packusdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) {
 ; SSE41-LABEL: blend_packusdw:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    packusdw %xmm2, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: blend_packusdw:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %p0 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
@@ -20,12 +20,12 @@ define <8 x i16> @blend_packusdw(<4 x i3
 
 define <16 x i8> @blend_packuswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
 ; SSE41-LABEL: blend_packuswb:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    packuswb %xmm2, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: blend_packuswb:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %p0 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
@@ -36,14 +36,14 @@ define <16 x i8> @blend_packuswb(<8 x i1
 
 define <8 x i16> @blend_packusdw_packuswb(<4 x i32> %a0, <4 x i32> %a1, <8 x i16> %a2, <8 x i16> %a3) {
 ; SSE41-LABEL: blend_packusdw_packuswb:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    packusdw %xmm1, %xmm0
 ; SSE41-NEXT:    packuswb %xmm3, %xmm2
 ; SSE41-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: blend_packusdw_packuswb:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpackuswb %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-sse4a.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-sse4a.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-sse4a.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-sse4a.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 ; A length of zero is equivalent to a bit length of 64.
 define <2 x i64> @extrqi_len0_idx0(<2 x i64> %a) {
 ; ALL-LABEL: extrqi_len0_idx0:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    retq
   %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a, i8 0, i8 0)
   ret <2 x i64> %1
@@ -18,7 +18,7 @@ define <2 x i64> @extrqi_len0_idx0(<2 x
 
 define <2 x i64> @extrqi_len8_idx16(<2 x i64> %a) {
 ; ALL-LABEL: extrqi_len8_idx16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    extrq {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a, i8 8, i8 16)
@@ -28,7 +28,7 @@ define <2 x i64> @extrqi_len8_idx16(<2 x
 ; If the length + index exceeds the bottom 64 bits the result is undefined.
 define <2 x i64> @extrqi_len32_idx48(<2 x i64> %a) {
 ; ALL-LABEL: extrqi_len32_idx48:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    extrq {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a, i8 32, i8 48)
@@ -37,17 +37,17 @@ define <2 x i64> @extrqi_len32_idx48(<2
 
 define <16 x i8> @shuf_0zzzuuuuuuuuuuuu(<16 x i8> %a0) {
 ; AMD10H-LABEL: shuf_0zzzuuuuuuuuuuuu:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: shuf_0zzzuuuuuuuuuuuu:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: shuf_0zzzuuuuuuuuuuuu:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; BTVER2-NEXT:    retq
   %s = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 16, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -56,7 +56,7 @@ define <16 x i8> @shuf_0zzzuuuuuuuuuuuu(
 
 define <16 x i8> @shuf_0zzzzzzz1zzzzzzz(<16 x i8> %a0) {
 ; AMD10H-LABEL: shuf_0zzzzzzz1zzzzzzz:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    movdqa %xmm0, %xmm1
 ; AMD10H-NEXT:    extrq {{.*#+}} xmm1 = xmm1[1],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
 ; AMD10H-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
@@ -64,12 +64,12 @@ define <16 x i8> @shuf_0zzzzzzz1zzzzzzz(
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: shuf_0zzzzzzz1zzzzzzz:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: shuf_0zzzzzzz1zzzzzzz:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; BTVER2-NEXT:    retq
   %s = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 1, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -78,7 +78,7 @@ define <16 x i8> @shuf_0zzzzzzz1zzzzzzz(
 
 define <16 x i8> @shuf_2zzzzzzz3zzzzzzz(<16 x i8> %a0) {
 ; AMD10H-LABEL: shuf_2zzzzzzz3zzzzzzz:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    movdqa %xmm0, %xmm1
 ; AMD10H-NEXT:    extrq {{.*#+}} xmm1 = xmm1[3],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
 ; AMD10H-NEXT:    extrq {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
@@ -86,12 +86,12 @@ define <16 x i8> @shuf_2zzzzzzz3zzzzzzz(
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: shuf_2zzzzzzz3zzzzzzz:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: shuf_2zzzzzzz3zzzzzzz:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpsrld $16, %xmm0, %xmm0
 ; BTVER2-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; BTVER2-NEXT:    retq
@@ -101,17 +101,17 @@ define <16 x i8> @shuf_2zzzzzzz3zzzzzzz(
 
 define <16 x i8> @shuf_01zzuuuuuuuuuuuu(<16 x i8> %a0) {
 ; AMD10H-LABEL: shuf_01zzuuuuuuuuuuuu:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: shuf_01zzuuuuuuuuuuuu:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: shuf_01zzuuuuuuuuuuuu:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; BTVER2-NEXT:    retq
   %s = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 16, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -120,7 +120,7 @@ define <16 x i8> @shuf_01zzuuuuuuuuuuuu(
 
 define <16 x i8> @shuf_01zzzzzz23zzzzzz(<16 x i8> %a0) {
 ; AMD10H-LABEL: shuf_01zzzzzz23zzzzzz:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    movdqa %xmm0, %xmm1
 ; AMD10H-NEXT:    extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
 ; AMD10H-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
@@ -128,12 +128,12 @@ define <16 x i8> @shuf_01zzzzzz23zzzzzz(
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: shuf_01zzzzzz23zzzzzz:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[2,3],zero,zero,zero,zero,zero,zero
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: shuf_01zzzzzz23zzzzzz:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; BTVER2-NEXT:    retq
   %s = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 2, i32 3, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -142,7 +142,7 @@ define <16 x i8> @shuf_01zzzzzz23zzzzzz(
 
 define <16 x i8> @shuf_1zzzuuuuuuuuuuuu(<16 x i8> %a0) {
 ; ALL-LABEL: shuf_1zzzuuuuuuuuuuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    extrq {{.*#+}} xmm0 = xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 1, i32 16, i32 16, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -151,7 +151,7 @@ define <16 x i8> @shuf_1zzzuuuuuuuuuuuu(
 
 define <8 x i16> @shuf_1zzzuuuu(<8 x i16> %a0) {
 ; ALL-LABEL: shuf_1zzzuuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    extrq {{.*#+}} xmm0 = xmm0[2,3],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 1, i32 8, i32 8, i32 8, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -160,7 +160,7 @@ define <8 x i16> @shuf_1zzzuuuu(<8 x i16
 
 define <8 x i16> @shuf_12zzuuuu(<8 x i16> %a0) {
 ; ALL-LABEL: shuf_12zzuuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    extrq {{.*#+}} xmm0 = xmm0[2,3,4,5],zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 1, i32 2, i32 8, i32 8, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -169,17 +169,17 @@ define <8 x i16> @shuf_12zzuuuu(<8 x i16
 
 define <8 x i16> @shuf_012zuuuu(<8 x i16> %a0) {
 ; AMD10H-LABEL: shuf_012zuuuu:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: shuf_012zuuuu:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: shuf_012zuuuu:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BTVER2-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7]
 ; BTVER2-NEXT:    retq
@@ -189,7 +189,7 @@ define <8 x i16> @shuf_012zuuuu(<8 x i16
 
 define <8 x i16> @shuf_0zzz1zzz(<8 x i16> %a0) {
 ; AMD10H-LABEL: shuf_0zzz1zzz:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    movdqa %xmm0, %xmm1
 ; AMD10H-NEXT:    extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
 ; AMD10H-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
@@ -197,12 +197,12 @@ define <8 x i16> @shuf_0zzz1zzz(<8 x i16
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: shuf_0zzz1zzz:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[2,3],zero,zero,zero,zero,zero,zero
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: shuf_0zzz1zzz:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; BTVER2-NEXT:    retq
   %s = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 8, i32 8, i32 8, i32 1, i32 8, i32 8, i32 8>
@@ -211,19 +211,19 @@ define <8 x i16> @shuf_0zzz1zzz(<8 x i16
 
 define <4 x i32> @shuf_0z1z(<4 x i32> %a0) {
 ; AMD10H-LABEL: shuf_0z1z:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    xorps %xmm1, %xmm1
 ; AMD10H-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: shuf_0z1z:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    xorps %xmm1, %xmm1
 ; BTVER1-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: shuf_0z1z:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BTVER2-NEXT:    retq
   %s = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 1, i32 4>
@@ -237,17 +237,17 @@ define <4 x i32> @shuf_0z1z(<4 x i32> %a
 ; A length of zero is equivalent to a bit length of 64.
 define <2 x i64> @insertqi_len0_idx0(<2 x i64> %a, <2 x i64> %b) {
 ; AMD10H-LABEL: insertqi_len0_idx0:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    movaps %xmm1, %xmm0
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: insertqi_len0_idx0:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    movaps %xmm1, %xmm0
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: insertqi_len0_idx0:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovaps %xmm1, %xmm0
 ; BTVER2-NEXT:    retq
   %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a, <2 x i64> %b, i8 0, i8 0)
@@ -256,7 +256,7 @@ define <2 x i64> @insertqi_len0_idx0(<2
 
 define <2 x i64> @insertqi_len8_idx16(<2 x i64> %a, <2 x i64> %b) {
 ; ALL-LABEL: insertqi_len8_idx16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3,4,5,6,7,u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a, <2 x i64> %b, i8 8, i8 16)
@@ -266,7 +266,7 @@ define <2 x i64> @insertqi_len8_idx16(<2
 ; If the length + index exceeds the bottom 64 bits the result is undefined
 define <2 x i64> @insertqi_len32_idx48(<2 x i64> %a, <2 x i64> %b) {
 ; ALL-LABEL: insertqi_len32_idx48:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a, <2 x i64> %b, i8 32, i8 48)
@@ -275,7 +275,7 @@ define <2 x i64> @insertqi_len32_idx48(<
 
 define <16 x i8> @shuf_0_0_2_3_uuuu_uuuu_uuuu(<16 x i8> %a0, <16 x i8> %a1) {
 ; ALL-LABEL: shuf_0_0_2_3_uuuu_uuuu_uuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7,u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 0, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -284,7 +284,7 @@ define <16 x i8> @shuf_0_0_2_3_uuuu_uuuu
 
 define <16 x i8> @shuf_0_16_2_3_uuuu_uuuu_uuuu(<16 x i8> %a0, <16 x i8> %a1) {
 ; ALL-LABEL: shuf_0_16_2_3_uuuu_uuuu_uuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3,4,5,6,7,u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -293,7 +293,7 @@ define <16 x i8> @shuf_0_16_2_3_uuuu_uuu
 
 define <16 x i8> @shuf_16_1_2_3_uuuu_uuuu_uuuu(<16 x i8> %a0, <16 x i8> %a1) {
 ; ALL-LABEL: shuf_16_1_2_3_uuuu_uuuu_uuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7,u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 16, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -302,7 +302,7 @@ define <16 x i8> @shuf_16_1_2_3_uuuu_uuu
 
 define <8 x i16> @shuf_0823uuuu(<8 x i16> %a0, <8 x i16> %a1) {
 ; ALL-LABEL: shuf_0823uuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1],xmm0[4,5,6,7,u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -311,7 +311,7 @@ define <8 x i16> @shuf_0823uuuu(<8 x i16
 
 define <8 x i16> @shuf_0183uuuu(<8 x i16> %a0, <8 x i16> %a1) {
 ; ALL-LABEL: shuf_0183uuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[0,1],xmm0[6,7,u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 1, i32 8, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -320,7 +320,7 @@ define <8 x i16> @shuf_0183uuuu(<8 x i16
 
 define <8 x i16> @shuf_0128uuuu(<8 x i16> %a0, <8 x i16> %a1) {
 ; ALL-LABEL: shuf_0128uuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[0,1],xmm0[u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 8, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -329,7 +329,7 @@ define <8 x i16> @shuf_0128uuuu(<8 x i16
 
 define <8 x i16> @shuf_0893uuuu(<8 x i16> %a0, <8 x i16> %a1) {
 ; ALL-LABEL: shuf_0893uuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1,2,3],xmm0[6,7,u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 9, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -338,7 +338,7 @@ define <8 x i16> @shuf_0893uuuu(<8 x i16
 
 define <8 x i16> @shuf_089Auuuu(<8 x i16> %a0, <8 x i16> %a1) {
 ; ALL-LABEL: shuf_089Auuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1,2,3,4,5],xmm0[u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 9, i32 10, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -347,7 +347,7 @@ define <8 x i16> @shuf_089Auuuu(<8 x i16
 
 define <8 x i16> @shuf_089uuuuu(<8 x i16> %a0, <8 x i16> %a1) {
 ; ALL-LABEL: shuf_089uuuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    insertq {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1,2,3],xmm0[6,7,u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %s = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -361,7 +361,7 @@ define <8 x i16> @shuf_089uuuuu(<8 x i16
 ; Out of range.
 define <16 x i8> @shuffle_8_18_uuuuuuuuuuuuuu(<16 x i8> %a, <16 x i8> %b) {
 ; AMD10H-LABEL: shuffle_8_18_uuuuuuuuuuuuuu:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; AMD10H-NEXT:    andpd {{.*}}(%rip), %xmm0
 ; AMD10H-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -370,14 +370,14 @@ define <16 x i8> @shuffle_8_18_uuuuuuuuu
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: shuffle_8_18_uuuuuuuuuuuuuu:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    psrld $16, %xmm1
 ; BTVER1-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; BTVER1-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: shuffle_8_18_uuuuuuuuuuuuuu:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpsrld $16, %xmm1, %xmm1
 ; BTVER2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; BTVER2-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -388,19 +388,19 @@ define <16 x i8> @shuffle_8_18_uuuuuuuuu
 
 define <16 x i8> @shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<16 x i8> %v) {
 ; AMD10H-LABEL: shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; AMD10H-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; AMD10H-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,0,5,5,4,4,5,5,4,4,5,5,6,6,7,7]
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: shuffle_uu_0_5_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,0,5,5,4,4,5,5,4,4,5,5,6,6,7,7]
 ; BTVER2-NEXT:    retq
   %1 = shufflevector <16 x i8> %v, <16 x i8> zeroinitializer, <16 x i32> <i32 undef, i32 0, i32 5, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -409,18 +409,18 @@ define <16 x i8> @shuffle_uu_0_5_uu_uu_u
 
 define <16 x i8> @shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<16 x i8> %v) {
 ; AMD10H-LABEL: shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; AMD10H:       # BB#0:
+; AMD10H:       # %bb.0:
 ; AMD10H-NEXT:    psrlq $16, %xmm0
 ; AMD10H-NEXT:    pand {{.*}}(%rip), %xmm0
 ; AMD10H-NEXT:    retq
 ;
 ; BTVER1-LABEL: shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; BTVER1:       # BB#0:
+; BTVER1:       # %bb.0:
 ; BTVER1-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[u],zero,xmm0[4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    retq
 ;
 ; BTVER2-LABEL: shuffle_uu_16_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u],zero,xmm0[4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
 ; BTVER2-NEXT:    retq
   %1 = shufflevector <16 x i8> %v, <16 x i8> zeroinitializer, <16 x i32> <i32 undef, i32 16, i32 4, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -429,7 +429,7 @@ define <16 x i8> @shuffle_uu_16_4_16_uu_
 
 define <16 x i8> @shuffle_uu_uu_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<16 x i8> %v) {
 ; ALL-LABEL: shuffle_uu_uu_4_16_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    extrq {{.*#+}} xmm0 = xmm0[2,3,4],zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; ALL-NEXT:    retq
   %1 = shufflevector <16 x i8> %v, <16 x i8> zeroinitializer, <16 x i32> <i32 undef, i32 undef, i32 4, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll Mon Dec  4 09:18:51 2017
@@ -5,12 +5,12 @@
 
 define <2 x i1> @shuf2i1_1_0(<2 x i1> %a) {
 ; AVX512F-LABEL: shuf2i1_1_0:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf2i1_1_0:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsllq $63, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vptestmq %xmm0, %xmm0, %k1
 ; AVX512VL-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -22,7 +22,7 @@ define <2 x i1> @shuf2i1_1_0(<2 x i1> %a
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf2i1_1_0:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    vpsllq $63, %xmm0, %xmm0
 ; VL_BW_DQ-NEXT:    vptestmq %xmm0, %xmm0, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %xmm0
@@ -36,14 +36,14 @@ define <2 x i1> @shuf2i1_1_0(<2 x i1> %a
 
 define <2 x i1> @shuf2i1_1_2(<2 x i1> %a) {
 ; AVX512F-LABEL: shuf2i1_1_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    movl $1, %eax
 ; AVX512F-NEXT:    vmovq %rax, %xmm1
 ; AVX512F-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf2i1_1_2:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsllq $63, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vptestmq %xmm0, %xmm0, %k1
 ; AVX512VL-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -58,7 +58,7 @@ define <2 x i1> @shuf2i1_1_2(<2 x i1> %a
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf2i1_1_2:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    vpsllq $63, %xmm0, %xmm0
 ; VL_BW_DQ-NEXT:    vptestmq %xmm0, %xmm0, %k0
 ; VL_BW_DQ-NEXT:    movb $1, %al
@@ -76,12 +76,12 @@ define <2 x i1> @shuf2i1_1_2(<2 x i1> %a
 
 define <4 x i1> @shuf4i1_3_2_10(<4 x i1> %a) {
 ; AVX512F-LABEL: shuf4i1_3_2_10:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf4i1_3_2_10:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpslld $31, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vptestmd %xmm0, %xmm0, %k1
 ; AVX512VL-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -93,7 +93,7 @@ define <4 x i1> @shuf4i1_3_2_10(<4 x i1>
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf4i1_3_2_10:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    vpslld $31, %xmm0, %xmm0
 ; VL_BW_DQ-NEXT:    vptestmd %xmm0, %xmm0, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2d %k0, %xmm0
@@ -107,7 +107,7 @@ define <4 x i1> @shuf4i1_3_2_10(<4 x i1>
 
 define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0(<8 x i64> %a, <8 x i64> %b, <8 x i64> %a1, <8 x i64> %b1) {
 ; AVX512F-LABEL: shuf8i1_3_6_1_0_3_7_7_0:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpcmpeqq %zmm2, %zmm0, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [3,6,1,0,3,7,7,0]
@@ -120,7 +120,7 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf8i1_3_6_1_0_3_7_7_0:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpcmpeqq %zmm2, %zmm0, %k1
 ; AVX512VL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512VL-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [3,6,1,0,3,7,7,0]
@@ -134,7 +134,7 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf8i1_3_6_1_0_3_7_7_0:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    vpcmpeqq %zmm2, %zmm0, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %zmm0
 ; VL_BW_DQ-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [3,6,1,0,3,7,7,0]
@@ -151,7 +151,7 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0
 
 define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<16 x i32> %a, <16 x i32> %b, <16 x i32> %a1, <16 x i32> %b1) {
 ; AVX512F-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpcmpeqd %zmm2, %zmm0, %k1
 ; AVX512F-NEXT:    vpcmpeqd %zmm3, %zmm1, %k2
 ; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
@@ -166,7 +166,7 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpcmpeqd %zmm2, %zmm0, %k1
 ; AVX512VL-NEXT:    vpcmpeqd %zmm3, %zmm1, %k2
 ; AVX512VL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
@@ -181,7 +181,7 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    vpcmpeqd %zmm2, %zmm0, %k0
 ; VL_BW_DQ-NEXT:    vpcmpeqd %zmm3, %zmm1, %k1
 ; VL_BW_DQ-NEXT:    vpmovm2d %k1, %zmm0
@@ -200,7 +200,7 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7
 
 define <32 x i1> @shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<32 x i1> %a) {
 ; AVX512F-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[3,6,u,12,3,7,7,0,3,6,1,13,3,u,7,0,u,u,22,u,u,u,u,u,u,u,u,u,u,21,u,u]
 ; AVX512F-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,6,u,u,u,u,u,u,u,u,u,u,5,u,u,19,22,u,28,19,23,23,16,19,22,17,29,19,u,23,16]
@@ -209,7 +209,7 @@ define <32 x i1> @shuf32i1_3_6_22_12_3_7
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[3,6,u,12,3,7,7,0,3,6,1,13,3,u,7,0,u,u,22,u,u,u,u,u,u,u,u,u,u,21,u,u]
 ; AVX512VL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512VL-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,6,u,u,u,u,u,u,u,u,u,u,5,u,u,19,22,u,28,19,23,23,16,19,22,17,29,19,u,23,16]
@@ -218,7 +218,7 @@ define <32 x i1> @shuf32i1_3_6_22_12_3_7
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    vpsllw $7, %ymm0, %ymm0
 ; VL_BW_DQ-NEXT:    vpmovb2m %ymm0, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2w %k0, %zmm0
@@ -233,7 +233,7 @@ define <32 x i1> @shuf32i1_3_6_22_12_3_7
 
 define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u(i8 %a) {
 ; AVX512F-LABEL: shuf8i1_u_2_u_u_2_u_2_u:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -246,7 +246,7 @@ define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf8i1_u_2_u_u_2_u_2_u:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    kmovw %edi, %k1
 ; AVX512VL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -260,7 +260,7 @@ define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf8i1_u_2_u_u_2_u_2_u:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    kmovd %edi, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %zmm0
 ; VL_BW_DQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -276,7 +276,7 @@ define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u
 
 define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %a) {
 ; AVX512F-LABEL: shuf8i1_10_2_9_u_3_u_2_u:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -290,7 +290,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf8i1_10_2_9_u_3_u_2_u:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    kmovw %edi, %k1
 ; AVX512VL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -304,7 +304,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf8i1_10_2_9_u_3_u_2_u:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    kmovd %edi, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %zmm0
 ; VL_BW_DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -323,7 +323,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %
 
 define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
 ; AVX512F-LABEL: shuf8i1_0_1_4_5_u_u_u_u:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
@@ -335,7 +335,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf8i1_0_1_4_5_u_u_u_u:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    kmovw %edi, %k1
 ; AVX512VL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512VL-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
@@ -347,7 +347,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf8i1_0_1_4_5_u_u_u_u:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    kmovd %edi, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %zmm0
 ; VL_BW_DQ-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
@@ -364,7 +364,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a
 
 define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a) {
 ; AVX512F-LABEL: shuf8i1_9_6_1_0_3_7_7_0:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -378,7 +378,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf8i1_9_6_1_0_3_7_7_0:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    kmovw %edi, %k1
 ; AVX512VL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -392,7 +392,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf8i1_9_6_1_0_3_7_7_0:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    kmovd %edi, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %zmm0
 ; VL_BW_DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -411,7 +411,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a
 
 define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %a) {
 ; AVX512F-LABEL: shuf8i1_9_6_1_10_3_7_7_0:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
@@ -425,7 +425,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf8i1_9_6_1_10_3_7_7_0:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    kmovw %edi, %k1
 ; AVX512VL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512VL-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
@@ -439,7 +439,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf8i1_9_6_1_10_3_7_7_0:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    kmovd %edi, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %zmm0
 ; VL_BW_DQ-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
@@ -458,7 +458,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %
 
 define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8 %a) {
 ; AVX512F-LABEL: shuf8i1__9_6_1_10_3_7_7_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    movb $51, %al
 ; AVX512F-NEXT:    kmovw %eax, %k2
@@ -474,7 +474,7 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf8i1__9_6_1_10_3_7_7_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    kmovw %edi, %k1
 ; AVX512VL-NEXT:    movb $51, %al
 ; AVX512VL-NEXT:    kmovw %eax, %k2
@@ -490,7 +490,7 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf8i1__9_6_1_10_3_7_7_1:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    kmovd %edi, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %zmm0
 ; VL_BW_DQ-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [9,6,1,0,3,7,7,1]
@@ -509,7 +509,7 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8
 
 define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
 ; AVX512F-LABEL: shuf8i1_9_6_1_10_3_7_7_0_all_ones:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; AVX512F-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; AVX512F-NEXT:    vptestmq %zmm0, %zmm0, %k1
@@ -525,7 +525,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf8i1_9_6_1_10_3_7_7_0_all_ones:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; AVX512VL-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; AVX512VL-NEXT:    vptestmq %zmm0, %zmm0, %k1
@@ -541,7 +541,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf8i1_9_6_1_10_3_7_7_0_all_ones:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; VL_BW_DQ-NEXT:    vpmovw2m %xmm0, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2q %k0, %zmm0
@@ -561,7 +561,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_
 
 define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0(i16 %a) {
 ; AVX512F-LABEL: shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512F-NEXT:    vpbroadcastd %xmm0, %zmm0
@@ -573,7 +573,7 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    kmovw %edi, %k1
 ; AVX512VL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512VL-NEXT:    vpbroadcastd %xmm0, %zmm0
@@ -585,7 +585,7 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    kmovd %edi, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2d %k0, %zmm0
 ; VL_BW_DQ-NEXT:    vpbroadcastd %xmm0, %zmm0
@@ -602,7 +602,7 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0
 
 define i64 @shuf64i1_zero(i64 %a) {
 ; AVX512F-LABEL: shuf64i1_zero:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    pushq %rbp
 ; AVX512F-NEXT:    .cfi_def_cfa_offset 16
 ; AVX512F-NEXT:    .cfi_offset %rbp, -16
@@ -634,7 +634,7 @@ define i64 @shuf64i1_zero(i64 %a) {
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuf64i1_zero:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    pushq %rbp
 ; AVX512VL-NEXT:    .cfi_def_cfa_offset 16
 ; AVX512VL-NEXT:    .cfi_offset %rbp, -16
@@ -666,7 +666,7 @@ define i64 @shuf64i1_zero(i64 %a) {
 ; AVX512VL-NEXT:    retq
 ;
 ; VL_BW_DQ-LABEL: shuf64i1_zero:
-; VL_BW_DQ:       # BB#0:
+; VL_BW_DQ:       # %bb.0:
 ; VL_BW_DQ-NEXT:    kmovq %rdi, %k0
 ; VL_BW_DQ-NEXT:    vpmovm2b %k0, %zmm0
 ; VL_BW_DQ-NEXT:    vpbroadcastb %xmm0, %zmm0

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-v48.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-v48.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-v48.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-v48.ll Mon Dec  4 09:18:51 2017
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=x86_64-pc-linux  -mattr=+avx2 < %s | FileCheck %s
 define <32 x i8> @foo(<48 x i8>* %x0, <16 x i32> %x1, <16 x i32> %x2) {
 ; CHECK-LABEL: foo:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqu 32(%rdi), %xmm0
 ; CHECK-NEXT:    vmovdqu (%rdi), %ymm1
 ; CHECK-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll Mon Dec  4 09:18:51 2017
@@ -11,7 +11,7 @@
 
 define <2 x double> @var_shuffle_v2f64_v2f64_xx_i64(<2 x double> %x, i64 %i0, i64 %i1) nounwind {
 ; SSE-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andl $1, %esi
 ; SSE-NEXT:    andl $1, %edi
 ; SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
@@ -20,7 +20,7 @@ define <2 x double> @var_shuffle_v2f64_v
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    andl $1, %esi
 ; AVX-NEXT:    andl $1, %edi
 ; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
@@ -36,7 +36,7 @@ define <2 x double> @var_shuffle_v2f64_v
 
 define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) nounwind {
 ; SSE-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SSE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SSE-NEXT:    andl $1, %edi
@@ -48,7 +48,7 @@ define <2 x i64> @var_shuffle_v2i64_v2i6
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; AVX-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; AVX-NEXT:    andl $1, %edi
@@ -67,7 +67,7 @@ define <2 x i64> @var_shuffle_v2i64_v2i6
 
 define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
 ; SSE2-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
 ; SSE2-NEXT:    # kill: %edx<def> %edx<kill> %rdx<def>
 ; SSE2-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
@@ -87,7 +87,7 @@ define <4 x float> @var_shuffle_v4f32_v4
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
 ; SSSE3-NEXT:    # kill: %edx<def> %edx<kill> %rdx<def>
 ; SSSE3-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
@@ -107,7 +107,7 @@ define <4 x float> @var_shuffle_v4f32_v4
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
 ; SSE41-NEXT:    # kill: %edx<def> %edx<kill> %rdx<def>
 ; SSE41-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
@@ -124,7 +124,7 @@ define <4 x float> @var_shuffle_v4f32_v4
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
 ; AVX-NEXT:    # kill: %edx<def> %edx<kill> %rdx<def>
 ; AVX-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
@@ -152,7 +152,7 @@ define <4 x float> @var_shuffle_v4f32_v4
 
 define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
 ; SSE2-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
 ; SSE2-NEXT:    # kill: %edx<def> %edx<kill> %rdx<def>
 ; SSE2-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
@@ -172,7 +172,7 @@ define <4 x i32> @var_shuffle_v4i32_v4i3
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
 ; SSSE3-NEXT:    # kill: %edx<def> %edx<kill> %rdx<def>
 ; SSSE3-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
@@ -192,7 +192,7 @@ define <4 x i32> @var_shuffle_v4i32_v4i3
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
 ; SSE41-NEXT:    # kill: %edx<def> %edx<kill> %rdx<def>
 ; SSE41-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
@@ -209,7 +209,7 @@ define <4 x i32> @var_shuffle_v4i32_v4i3
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
 ; AVX-NEXT:    # kill: %edx<def> %edx<kill> %rdx<def>
 ; AVX-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
@@ -237,7 +237,7 @@ define <4 x i32> @var_shuffle_v4i32_v4i3
 
 define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
 ; SSE2-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; SSE2-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; SSE2-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -281,7 +281,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; SSSE3-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; SSSE3-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -325,7 +325,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; SSE41-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; SSE41-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -355,7 +355,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; AVX-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; AVX-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -404,7 +404,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 
 define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %i0, i8 %i1, i8 %i2, i8 %i3, i8 %i4, i8 %i5, i8 %i6, i8 %i7, i8 %i8, i8 %i9, i8 %i10, i8 %i11, i8 %i12, i8 %i13, i8 %i14, i8 %i15) nounwind {
 ; SSE2-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; SSE2-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; SSE2-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -489,7 +489,7 @@ define <16 x i8> @var_shuffle_v16i8_v16i
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; SSSE3-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; SSSE3-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -574,7 +574,7 @@ define <16 x i8> @var_shuffle_v16i8_v16i
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; SSE41-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; SSE41-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -629,7 +629,7 @@ define <16 x i8> @var_shuffle_v16i8_v16i
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; AVX-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; AVX-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -723,7 +723,7 @@ define <16 x i8> @var_shuffle_v16i8_v16i
 
 define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwind {
 ; SSE2-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movl (%rdi), %eax
 ; SSE2-NEXT:    movl 4(%rdi), %ecx
 ; SSE2-NEXT:    andl $3, %eax
@@ -743,7 +743,7 @@ define <4 x i32> @mem_shuffle_v4i32_v4i3
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movl (%rdi), %eax
 ; SSSE3-NEXT:    movl 4(%rdi), %ecx
 ; SSSE3-NEXT:    andl $3, %eax
@@ -763,7 +763,7 @@ define <4 x i32> @mem_shuffle_v4i32_v4i3
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movl (%rdi), %eax
 ; SSE41-NEXT:    movl 4(%rdi), %ecx
 ; SSE41-NEXT:    andl $3, %eax
@@ -780,7 +780,7 @@ define <4 x i32> @mem_shuffle_v4i32_v4i3
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    movl (%rdi), %eax
 ; AVX-NEXT:    movl 4(%rdi), %ecx
 ; AVX-NEXT:    andl $3, %eax
@@ -816,7 +816,7 @@ define <4 x i32> @mem_shuffle_v4i32_v4i3
 
 define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8* %i) nounwind {
 ; SSE2-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movzbl (%rdi), %eax
 ; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-NEXT:    movzbl 15(%rdi), %edx
@@ -901,7 +901,7 @@ define <16 x i8> @mem_shuffle_v16i8_v16i
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movzbl (%rdi), %eax
 ; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSSE3-NEXT:    movzbl 15(%rdi), %edx
@@ -986,7 +986,7 @@ define <16 x i8> @mem_shuffle_v16i8_v16i
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movzbl (%rdi), %eax
 ; SSE41-NEXT:    andl $15, %eax
 ; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
@@ -1041,7 +1041,7 @@ define <16 x i8> @mem_shuffle_v16i8_v16i
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    movzbl (%rdi), %eax
 ; AVX-NEXT:    andl $15, %eax
 ; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
@@ -1167,7 +1167,7 @@ define <16 x i8> @mem_shuffle_v16i8_v16i
 
 define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float> %y, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
 ; SSE-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
 ; SSE-NEXT:    # kill: %edx<def> %edx<kill> %rdx<def>
 ; SSE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
@@ -1184,7 +1184,7 @@ define <4 x float> @var_shuffle_v4f32_v4
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
 ; AVX-NEXT:    # kill: %edx<def> %edx<kill> %rdx<def>
 ; AVX-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
@@ -1212,7 +1212,7 @@ define <4 x float> @var_shuffle_v4f32_v4
 
 define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %y, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
 ; SSE2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; SSE2-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; SSE2-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -1249,7 +1249,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; SSSE3-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; SSSE3-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -1286,7 +1286,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; SSE41-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; SSE41-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -1311,7 +1311,7 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; AVX-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; AVX-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define <4 x double> @var_shuffle_v4f64_v4f64_xxxx_i64(<4 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
 ; ALL-LABEL: var_shuffle_v4f64_v4f64_xxxx_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    pushq %rbp
 ; ALL-NEXT:    movq %rsp, %rbp
 ; ALL-NEXT:    andq $-32, %rsp
@@ -39,7 +39,7 @@ define <4 x double> @var_shuffle_v4f64_v
 
 define <4 x double> @var_shuffle_v4f64_v4f64_uxx0_i64(<4 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
 ; ALL-LABEL: var_shuffle_v4f64_v4f64_uxx0_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    pushq %rbp
 ; ALL-NEXT:    movq %rsp, %rbp
 ; ALL-NEXT:    andq $-32, %rsp
@@ -67,7 +67,7 @@ define <4 x double> @var_shuffle_v4f64_v
 
 define <4 x double> @var_shuffle_v4f64_v2f64_xxxx_i64(<2 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
 ; ALL-LABEL: var_shuffle_v4f64_v2f64_xxxx_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    andl $1, %esi
 ; ALL-NEXT:    andl $1, %edi
 ; ALL-NEXT:    andl $1, %ecx
@@ -92,7 +92,7 @@ define <4 x double> @var_shuffle_v4f64_v
 
 define <4 x i64> @var_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
 ; ALL-LABEL: var_shuffle_v4i64_v4i64_xxxx_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    pushq %rbp
 ; ALL-NEXT:    movq %rsp, %rbp
 ; ALL-NEXT:    andq $-32, %rsp
@@ -125,7 +125,7 @@ define <4 x i64> @var_shuffle_v4i64_v4i6
 
 define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
 ; ALL-LABEL: var_shuffle_v4i64_v4i64_xx00_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    pushq %rbp
 ; ALL-NEXT:    movq %rsp, %rbp
 ; ALL-NEXT:    andq $-32, %rsp
@@ -153,7 +153,7 @@ define <4 x i64> @var_shuffle_v4i64_v4i6
 
 define <4 x i64> @var_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
 ; ALL-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    andl $1, %edi
 ; ALL-NEXT:    andl $1, %esi
 ; ALL-NEXT:    andl $1, %edx
@@ -180,7 +180,7 @@ define <4 x i64> @var_shuffle_v4i64_v2i6
 
 define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
 ; ALL-LABEL: var_shuffle_v8f32_v8f32_xxxxxxxx_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    pushq %rbp
 ; ALL-NEXT:    movq %rsp, %rbp
 ; ALL-NEXT:    andq $-32, %rsp
@@ -235,7 +235,7 @@ define <8 x float> @var_shuffle_v8f32_v8
 
 define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
 ; ALL-LABEL: var_shuffle_v8f32_v4f32_xxxxxxxx_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; ALL-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; ALL-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -284,7 +284,7 @@ define <8 x float> @var_shuffle_v8f32_v4
 
 define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
 ; AVX1-LABEL: var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    pushq %rbp
 ; AVX1-NEXT:    movq %rsp, %rbp
 ; AVX1-NEXT:    andq $-32, %rsp
@@ -346,7 +346,7 @@ define <16 x i16> @var_shuffle_v16i16_v1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    pushq %rbp
 ; AVX2-NEXT:    movq %rsp, %rbp
 ; AVX2-NEXT:    andq $-32, %rsp
@@ -443,7 +443,7 @@ define <16 x i16> @var_shuffle_v16i16_v1
 
 define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
 ; AVX1-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; AVX1-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; AVX1-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -499,7 +499,7 @@ define <16 x i16> @var_shuffle_v16i16_v8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    # kill: %r9d<def> %r9d<kill> %r9<def>
 ; AVX2-NEXT:    # kill: %r8d<def> %r8d<kill> %r8<def>
 ; AVX2-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
@@ -594,7 +594,7 @@ define <16 x i16> @var_shuffle_v16i16_v8
 
 define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64* %i) nounwind {
 ; ALL-LABEL: mem_shuffle_v4i64_v4i64_xxxx_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    pushq %rbp
 ; ALL-NEXT:    movq %rsp, %rbp
 ; ALL-NEXT:    andq $-32, %rsp
@@ -639,7 +639,7 @@ define <4 x i64> @mem_shuffle_v4i64_v4i6
 
 define <4 x i64> @mem_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64* %i) nounwind {
 ; ALL-LABEL: mem_shuffle_v4i64_v2i64_xxxx_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movq (%rdi), %rax
 ; ALL-NEXT:    movq 8(%rdi), %rcx
 ; ALL-NEXT:    andl $1, %eax

Modified: llvm/trunk/test/CodeGen/X86/vector-sqrt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-sqrt.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-sqrt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-sqrt.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 ; Function Attrs: nounwind readonly uwtable
 define <2 x double> @sqrtd2(double* nocapture readonly %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: sqrtd2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsqrtsd (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    vsqrtsd 8(%rdi), %xmm1, %xmm1
 ; CHECK-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -26,7 +26,7 @@ declare double @sqrt(double) local_unnam
 ; Function Attrs: nounwind readonly uwtable
 define <4 x float> @sqrtf4(float* nocapture readonly %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: sqrtf4:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsqrtss (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    vsqrtss 4(%rdi), %xmm1, %xmm1
 ; CHECK-NEXT:    vsqrtss 8(%rdi), %xmm2, %xmm2

Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll Mon Dec  4 09:18:51 2017
@@ -12,14 +12,14 @@
 
 define <4 x i32> @trunc_add_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_add_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddq %xmm3, %xmm1
 ; SSE-NEXT:    paddq %xmm2, %xmm0
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
@@ -29,7 +29,7 @@ define <4 x i32> @trunc_add_v4i64_v4i32(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -38,7 +38,7 @@ define <4 x i32> @trunc_add_v4i64_v4i32(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_add_v4i64_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -51,7 +51,7 @@ define <4 x i32> @trunc_add_v4i64_v4i32(
 
 define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_add_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddq %xmm6, %xmm2
 ; SSE-NEXT:    paddq %xmm7, %xmm3
 ; SSE-NEXT:    paddq %xmm4, %xmm0
@@ -70,7 +70,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpaddq %xmm2, %xmm0, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -91,7 +91,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddq %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddq %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -106,7 +106,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_add_v8i64_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -118,7 +118,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(
 
 define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_add_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd %xmm2, %xmm0
 ; SSE-NEXT:    paddd %xmm3, %xmm1
 ; SSE-NEXT:    pslld $16, %xmm1
@@ -129,7 +129,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -142,7 +142,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -151,7 +151,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_add_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -164,7 +164,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(
 
 define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_add_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddq {{[0-9]+}}(%rsp), %xmm0
 ; SSE-NEXT:    paddq {{[0-9]+}}(%rsp), %xmm1
 ; SSE-NEXT:    paddq {{[0-9]+}}(%rsp), %xmm2
@@ -192,7 +192,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpaddq %xmm4, %xmm0, %xmm8
 ; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -229,7 +229,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddq %ymm5, %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddq %ymm4, %ymm0, %ymm0
 ; AVX2-NEXT:    vpaddq %ymm7, %ymm3, %ymm3
@@ -257,7 +257,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_add_v16i64_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpaddq %zmm3, %zmm1, %zmm1
 ; AVX512-NEXT:    vpaddq %zmm2, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
@@ -273,7 +273,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8
 
 define <16 x i8> @trunc_add_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_add_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd %xmm4, %xmm0
 ; SSE-NEXT:    paddd %xmm5, %xmm1
 ; SSE-NEXT:    paddd %xmm6, %xmm2
@@ -289,7 +289,7 @@ define <16 x i8> @trunc_add_v16i32_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpaddd %xmm2, %xmm0, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -310,7 +310,7 @@ define <16 x i8> @trunc_add_v16i32_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpaddd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -326,7 +326,7 @@ define <16 x i8> @trunc_add_v16i32_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_add_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -338,7 +338,7 @@ define <16 x i8> @trunc_add_v16i32_v16i8
 
 define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
 ; SSE-LABEL: trunc_add_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddw %xmm2, %xmm0
 ; SSE-NEXT:    paddw %xmm3, %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -348,7 +348,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpaddw %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -361,7 +361,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -372,7 +372,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_add_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
@@ -380,7 +380,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_add_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -388,7 +388,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_add_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
@@ -401,7 +401,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8
 
 define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
 ; SSE-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $16, %xmm2
 ; SSE-NEXT:    psrad $16, %xmm2
 ; SSE-NEXT:    pslld $16, %xmm1
@@ -413,7 +413,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
@@ -425,7 +425,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmovsxbw %xmm0, %xmm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -434,7 +434,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512-NEXT:    vpmovdw %zmm1, %ymm1
 ; AVX512-NEXT:    vpmovsxbw %xmm0, %xmm0
@@ -454,13 +454,13 @@ define <8 x i16> @trunc_add_v8i32_v8i16_
 
 define <4 x i32> @trunc_add_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_add_const_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_const_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
@@ -468,7 +468,7 @@ define <4 x i32> @trunc_add_const_v4i64_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_const_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
@@ -476,7 +476,7 @@ define <4 x i32> @trunc_add_const_v4i64_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_add_const_v4i64_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
@@ -489,7 +489,7 @@ define <4 x i32> @trunc_add_const_v4i64_
 
 define <8 x i16> @trunc_add_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_add_const_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -505,7 +505,7 @@ define <8 x i16> @trunc_add_const_v8i64_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_const_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -521,7 +521,7 @@ define <8 x i16> @trunc_add_const_v8i64_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_const_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -534,7 +534,7 @@ define <8 x i16> @trunc_add_const_v8i64_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_add_const_v8i64_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -546,7 +546,7 @@ define <8 x i16> @trunc_add_const_v8i64_
 
 define <8 x i16> @trunc_add_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_add_const_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $16, %xmm1
 ; SSE-NEXT:    psrad $16, %xmm1
 ; SSE-NEXT:    pslld $16, %xmm0
@@ -556,7 +556,7 @@ define <8 x i16> @trunc_add_const_v8i32_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_const_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -567,7 +567,7 @@ define <8 x i16> @trunc_add_const_v8i32_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_const_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
@@ -575,7 +575,7 @@ define <8 x i16> @trunc_add_const_v8i32_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_add_const_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
@@ -588,7 +588,7 @@ define <8 x i16> @trunc_add_const_v8i32_
 
 define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_add_const_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; SSE-NEXT:    pand %xmm8, %xmm7
 ; SSE-NEXT:    pand %xmm8, %xmm6
@@ -609,7 +609,7 @@ define <16 x i8> @trunc_add_const_v16i64
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_const_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; AVX1-NEXT:    vpand %xmm5, %xmm4, %xmm4
@@ -635,7 +635,7 @@ define <16 x i8> @trunc_add_const_v16i64
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_const_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -660,7 +660,7 @@ define <16 x i8> @trunc_add_const_v16i64
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_add_const_v16i64_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    vpmovqd %zmm1, %ymm1
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
@@ -675,7 +675,7 @@ define <16 x i8> @trunc_add_const_v16i64
 
 define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_add_const_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; SSE-NEXT:    pand %xmm4, %xmm3
 ; SSE-NEXT:    pand %xmm4, %xmm2
@@ -688,7 +688,7 @@ define <16 x i8> @trunc_add_const_v16i32
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_const_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
@@ -704,7 +704,7 @@ define <16 x i8> @trunc_add_const_v16i32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_const_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -719,7 +719,7 @@ define <16 x i8> @trunc_add_const_v16i32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_add_const_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -731,7 +731,7 @@ define <16 x i8> @trunc_add_const_v16i32
 
 define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
 ; SSE-LABEL: trunc_add_const_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
 ; SSE-NEXT:    pand %xmm2, %xmm1
 ; SSE-NEXT:    pand %xmm2, %xmm0
@@ -740,7 +740,7 @@ define <16 x i8> @trunc_add_const_v16i16
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_add_const_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -751,7 +751,7 @@ define <16 x i8> @trunc_add_const_v16i16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_add_const_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX2-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -762,7 +762,7 @@ define <16 x i8> @trunc_add_const_v16i16
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_add_const_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512F-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm0
@@ -770,7 +770,7 @@ define <16 x i8> @trunc_add_const_v16i16
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_add_const_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm0
@@ -778,7 +778,7 @@ define <16 x i8> @trunc_add_const_v16i16
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_add_const_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512DQ-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm0
@@ -795,14 +795,14 @@ define <16 x i8> @trunc_add_const_v16i16
 
 define <4 x i32> @trunc_sub_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_sub_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubq %xmm3, %xmm1
 ; SSE-NEXT:    psubq %xmm2, %xmm0
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm2
@@ -812,7 +812,7 @@ define <4 x i32> @trunc_sub_v4i64_v4i32(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubq %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -821,7 +821,7 @@ define <4 x i32> @trunc_sub_v4i64_v4i32(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_sub_v4i64_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsubq %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -834,7 +834,7 @@ define <4 x i32> @trunc_sub_v4i64_v4i32(
 
 define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_sub_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubq %xmm6, %xmm2
 ; SSE-NEXT:    psubq %xmm7, %xmm3
 ; SSE-NEXT:    psubq %xmm4, %xmm0
@@ -853,7 +853,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsubq %xmm2, %xmm0, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -874,7 +874,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubq %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -889,7 +889,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_sub_v8i64_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsubq %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -901,7 +901,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(
 
 define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_sub_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd %xmm2, %xmm0
 ; SSE-NEXT:    psubd %xmm3, %xmm1
 ; SSE-NEXT:    pslld $16, %xmm1
@@ -912,7 +912,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -925,7 +925,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -934,7 +934,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_sub_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -947,7 +947,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(
 
 define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_sub_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubq {{[0-9]+}}(%rsp), %xmm0
 ; SSE-NEXT:    psubq {{[0-9]+}}(%rsp), %xmm1
 ; SSE-NEXT:    psubq {{[0-9]+}}(%rsp), %xmm2
@@ -975,7 +975,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsubq %xmm4, %xmm0, %xmm8
 ; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -1012,7 +1012,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubq %ymm5, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsubq %ymm4, %ymm0, %ymm0
 ; AVX2-NEXT:    vpsubq %ymm7, %ymm3, %ymm3
@@ -1040,7 +1040,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_sub_v16i64_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsubq %zmm3, %zmm1, %zmm1
 ; AVX512-NEXT:    vpsubq %zmm2, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
@@ -1056,7 +1056,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8
 
 define <16 x i8> @trunc_sub_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_sub_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd %xmm4, %xmm0
 ; SSE-NEXT:    psubd %xmm5, %xmm1
 ; SSE-NEXT:    psubd %xmm6, %xmm2
@@ -1072,7 +1072,7 @@ define <16 x i8> @trunc_sub_v16i32_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsubd %xmm2, %xmm0, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -1093,7 +1093,7 @@ define <16 x i8> @trunc_sub_v16i32_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpsubd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -1109,7 +1109,7 @@ define <16 x i8> @trunc_sub_v16i32_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_sub_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsubd %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -1121,7 +1121,7 @@ define <16 x i8> @trunc_sub_v16i32_v16i8
 
 define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
 ; SSE-LABEL: trunc_sub_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubw %xmm2, %xmm0
 ; SSE-NEXT:    psubw %xmm3, %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -1131,7 +1131,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsubw %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -1144,7 +1144,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -1155,7 +1155,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_sub_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpsubw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
@@ -1163,7 +1163,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_sub_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpsubw %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1171,7 +1171,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_sub_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpsubw %ymm1, %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
@@ -1188,7 +1188,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8
 
 define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_sub_const_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movl $1, %eax
 ; SSE-NEXT:    movq %rax, %xmm2
 ; SSE-NEXT:    pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
@@ -1198,7 +1198,7 @@ define <4 x i32> @trunc_sub_const_v4i64_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_const_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movl $1, %eax
 ; AVX1-NEXT:    vmovq %rax, %xmm1
 ; AVX1-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
@@ -1210,7 +1210,7 @@ define <4 x i32> @trunc_sub_const_v4i64_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_const_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubq {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -1219,7 +1219,7 @@ define <4 x i32> @trunc_sub_const_v4i64_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_sub_const_v4i64_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsubq {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1232,7 +1232,7 @@ define <4 x i32> @trunc_sub_const_v4i64_
 
 define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_sub_const_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movl $1, %eax
 ; SSE-NEXT:    movq %rax, %xmm4
 ; SSE-NEXT:    pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
@@ -1255,7 +1255,7 @@ define <8 x i16> @trunc_sub_const_v8i64_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_const_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movl $1, %eax
 ; AVX1-NEXT:    vmovq %rax, %xmm2
 ; AVX1-NEXT:    vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
@@ -1277,7 +1277,7 @@ define <8 x i16> @trunc_sub_const_v8i64_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_const_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubq {{.*}}(%rip), %ymm1, %ymm1
 ; AVX2-NEXT:    vpsubq {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -1292,7 +1292,7 @@ define <8 x i16> @trunc_sub_const_v8i64_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_sub_const_v8i64_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsubq {{.*}}(%rip), %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -1304,7 +1304,7 @@ define <8 x i16> @trunc_sub_const_v8i64_
 
 define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_sub_const_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    psubd {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    pslld $16, %xmm1
@@ -1315,7 +1315,7 @@ define <8 x i16> @trunc_sub_const_v8i32_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_const_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsubd {{.*}}(%rip), %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpsubd {{.*}}(%rip), %xmm0, %xmm0
@@ -1327,7 +1327,7 @@ define <8 x i16> @trunc_sub_const_v8i32_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_const_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubd {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -1336,7 +1336,7 @@ define <8 x i16> @trunc_sub_const_v8i32_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_sub_const_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsubd {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1349,7 +1349,7 @@ define <8 x i16> @trunc_sub_const_v8i32_
 
 define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_sub_const_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movl $1, %eax
 ; SSE-NEXT:    movq %rax, %xmm8
 ; SSE-NEXT:    pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7]
@@ -1380,7 +1380,7 @@ define <16 x i8> @trunc_sub_const_v16i64
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_const_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movl $1, %eax
 ; AVX1-NEXT:    vmovq %rax, %xmm4
 ; AVX1-NEXT:    vpslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
@@ -1416,7 +1416,7 @@ define <16 x i8> @trunc_sub_const_v16i64
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_const_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubq {{.*}}(%rip), %ymm1, %ymm1
 ; AVX2-NEXT:    vpsubq {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vpsubq {{.*}}(%rip), %ymm3, %ymm3
@@ -1444,7 +1444,7 @@ define <16 x i8> @trunc_sub_const_v16i64
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_sub_const_v16i64_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsubq {{.*}}(%rip), %zmm1, %zmm1
 ; AVX512-NEXT:    vpsubq {{.*}}(%rip), %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
@@ -1460,7 +1460,7 @@ define <16 x i8> @trunc_sub_const_v16i64
 
 define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_sub_const_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    psubd {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    psubd {{.*}}(%rip), %xmm2
@@ -1476,7 +1476,7 @@ define <16 x i8> @trunc_sub_const_v16i32
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_const_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsubd {{.*}}(%rip), %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpsubd {{.*}}(%rip), %xmm0, %xmm0
@@ -1495,7 +1495,7 @@ define <16 x i8> @trunc_sub_const_v16i32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_const_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubd {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vpsubd {{.*}}(%rip), %ymm1, %ymm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -1511,7 +1511,7 @@ define <16 x i8> @trunc_sub_const_v16i32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_sub_const_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsubd {{.*}}(%rip), %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -1523,7 +1523,7 @@ define <16 x i8> @trunc_sub_const_v16i32
 
 define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
 ; SSE-LABEL: trunc_sub_const_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubw {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    psubw {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -1533,7 +1533,7 @@ define <16 x i8> @trunc_sub_const_v16i16
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_sub_const_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsubw {{.*}}(%rip), %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpsubw {{.*}}(%rip), %xmm0, %xmm0
@@ -1545,7 +1545,7 @@ define <16 x i8> @trunc_sub_const_v16i16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_sub_const_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsubw {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -1556,7 +1556,7 @@ define <16 x i8> @trunc_sub_const_v16i16
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_sub_const_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpsubw {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
@@ -1564,7 +1564,7 @@ define <16 x i8> @trunc_sub_const_v16i16
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_sub_const_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpsubw {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1572,7 +1572,7 @@ define <16 x i8> @trunc_sub_const_v16i16
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_sub_const_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpsubw {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
@@ -1589,7 +1589,7 @@ define <16 x i8> @trunc_sub_const_v16i16
 
 define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_mul_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm1, %xmm4
 ; SSE-NEXT:    psrlq $32, %xmm4
 ; SSE-NEXT:    pmuludq %xmm3, %xmm4
@@ -1614,7 +1614,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
@@ -1624,7 +1624,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -1634,7 +1634,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_mul_v4i64_v4i32:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512F-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512F-NEXT:    vpmovqd %zmm1, %ymm1
@@ -1644,7 +1644,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_mul_v4i64_v4i32:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512BW-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512BW-NEXT:    vpmovqd %zmm1, %ymm1
@@ -1654,7 +1654,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_mul_v4i64_v4i32:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512DQ-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512DQ-NEXT:    vpmullq %zmm1, %zmm0, %zmm0
@@ -1669,7 +1669,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(
 
 define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_mul_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[0,2,2,3,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
@@ -1696,7 +1696,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3],xmm4[4],xmm5[5,6,7]
@@ -1721,7 +1721,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -1742,7 +1742,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_mul_v8i64_v8i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovqw %zmm1, %xmm1
 ; AVX512F-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512F-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
@@ -1750,7 +1750,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_mul_v8i64_v8i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpmovqw %zmm1, %xmm1
 ; AVX512BW-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512BW-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
@@ -1758,7 +1758,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_mul_v8i64_v8i16:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpmullq %zmm1, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512DQ-NEXT:    vzeroupper
@@ -1770,7 +1770,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(
 
 define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_mul_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
 ; SSE-NEXT:    pmuludq %xmm2, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1793,7 +1793,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmulld %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -1806,7 +1806,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -1815,7 +1815,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_mul_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1828,7 +1828,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(
 
 define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_mul_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm8
 ; SSE-NEXT:    movdqa %xmm0, %xmm9
 ; SSE-NEXT:    psrlq $32, %xmm9
@@ -1936,7 +1936,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsrlq $32, %xmm0, %xmm8
 ; AVX1-NEXT:    vpmuludq %xmm4, %xmm8, %xmm8
 ; AVX1-NEXT:    vpsrlq $32, %xmm4, %xmm9
@@ -2029,7 +2029,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -2065,7 +2065,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_mul_v16i64_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovqd %zmm3, %ymm3
 ; AVX512F-NEXT:    vpmovqd %zmm1, %ymm1
 ; AVX512F-NEXT:    vpmulld %ymm3, %ymm1, %ymm1
@@ -2078,7 +2078,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_mul_v16i64_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpmovqd %zmm3, %ymm3
 ; AVX512BW-NEXT:    vpmovqd %zmm1, %ymm1
 ; AVX512BW-NEXT:    vpmulld %ymm3, %ymm1, %ymm1
@@ -2091,7 +2091,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_mul_v16i64_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpmullq %zmm3, %zmm1, %zmm1
 ; AVX512DQ-NEXT:    vpmullq %zmm2, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovqd %zmm0, %ymm0
@@ -2107,7 +2107,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8
 
 define <16 x i8> @trunc_mul_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_mul_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm8 = xmm0[1,1,3,3]
 ; SSE-NEXT:    pmuludq %xmm4, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -2147,7 +2147,7 @@ define <16 x i8> @trunc_mul_v16i32_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmulld %xmm2, %xmm0, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -2168,7 +2168,7 @@ define <16 x i8> @trunc_mul_v16i32_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmulld %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmulld %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -2184,7 +2184,7 @@ define <16 x i8> @trunc_mul_v16i32_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_mul_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmulld %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -2196,7 +2196,7 @@ define <16 x i8> @trunc_mul_v16i32_v16i8
 
 define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
 ; SSE-LABEL: trunc_mul_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmullw %xmm2, %xmm0
 ; SSE-NEXT:    pmullw %xmm3, %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -2206,7 +2206,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmullw %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -2219,7 +2219,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -2230,7 +2230,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_mul_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
@@ -2238,7 +2238,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_mul_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2246,7 +2246,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_mul_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
@@ -2259,7 +2259,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8
 
 define <8 x i16> @trunc_mul_v8i32_v8i16_zext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
 ; SSE-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm3, %xmm3
 ; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
 ; SSE-NEXT:    pslld $16, %xmm2
@@ -2271,7 +2271,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
@@ -2283,7 +2283,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -2292,7 +2292,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512-NEXT:    vpmovdw %zmm1, %ymm1
 ; AVX512-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -2312,7 +2312,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16_
 
 define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_mul_const_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2,3]
 ; SSE-NEXT:    movdqa %xmm1, %xmm3
 ; SSE-NEXT:    pmuludq %xmm2, %xmm3
@@ -2333,7 +2333,7 @@ define <4 x i32> @trunc_mul_const_v4i64_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_const_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
@@ -2341,7 +2341,7 @@ define <4 x i32> @trunc_mul_const_v4i64_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_const_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
@@ -2349,7 +2349,7 @@ define <4 x i32> @trunc_mul_const_v4i64_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_mul_const_v4i64_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
@@ -2362,7 +2362,7 @@ define <4 x i32> @trunc_mul_const_v4i64_
 
 define <8 x i16> @trunc_mul_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_mul_const_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -2378,7 +2378,7 @@ define <8 x i16> @trunc_mul_const_v8i64_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_const_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -2394,7 +2394,7 @@ define <8 x i16> @trunc_mul_const_v8i64_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_const_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -2407,7 +2407,7 @@ define <8 x i16> @trunc_mul_const_v8i64_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_mul_const_v8i64_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -2419,7 +2419,7 @@ define <8 x i16> @trunc_mul_const_v8i64_
 
 define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_mul_const_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $16, %xmm1
 ; SSE-NEXT:    psrad $16, %xmm1
 ; SSE-NEXT:    pslld $16, %xmm0
@@ -2429,7 +2429,7 @@ define <8 x i16> @trunc_mul_const_v8i32_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_const_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -2440,7 +2440,7 @@ define <8 x i16> @trunc_mul_const_v8i32_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_const_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
@@ -2448,7 +2448,7 @@ define <8 x i16> @trunc_mul_const_v8i32_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_mul_const_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
@@ -2461,7 +2461,7 @@ define <8 x i16> @trunc_mul_const_v8i32_
 
 define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_mul_const_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movl $1, %eax
 ; SSE-NEXT:    movq %rax, %xmm8
 ; SSE-NEXT:    pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7]
@@ -2539,7 +2539,7 @@ define <16 x i8> @trunc_mul_const_v16i64
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_const_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movl $1, %eax
 ; AVX1-NEXT:    vmovq %rax, %xmm4
 ; AVX1-NEXT:    vpslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
@@ -2614,7 +2614,7 @@ define <16 x i8> @trunc_mul_const_v16i64
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_const_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
 ; AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm2, %xmm2
@@ -2642,7 +2642,7 @@ define <16 x i8> @trunc_mul_const_v16i64
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_mul_const_v16i64_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    vpmulld {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovqd %zmm1, %ymm1
@@ -2658,7 +2658,7 @@ define <16 x i8> @trunc_mul_const_v16i64
 
 define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_mul_const_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [0,1,2,3]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
 ; SSE-NEXT:    pmuludq %xmm4, %xmm0
@@ -2702,7 +2702,7 @@ define <16 x i8> @trunc_mul_const_v16i32
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_const_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
@@ -2721,7 +2721,7 @@ define <16 x i8> @trunc_mul_const_v16i32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_const_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -2737,7 +2737,7 @@ define <16 x i8> @trunc_mul_const_v16i32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_mul_const_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmulld {{.*}}(%rip), %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -2749,7 +2749,7 @@ define <16 x i8> @trunc_mul_const_v16i32
 
 define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
 ; SSE-LABEL: trunc_mul_const_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmullw {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    pmullw {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -2759,7 +2759,7 @@ define <16 x i8> @trunc_mul_const_v16i16
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_mul_const_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
@@ -2771,7 +2771,7 @@ define <16 x i8> @trunc_mul_const_v16i16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_mul_const_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -2782,7 +2782,7 @@ define <16 x i8> @trunc_mul_const_v16i16
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_mul_const_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
@@ -2790,7 +2790,7 @@ define <16 x i8> @trunc_mul_const_v16i16
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_mul_const_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2798,7 +2798,7 @@ define <16 x i8> @trunc_mul_const_v16i16
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_mul_const_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
@@ -2815,14 +2815,14 @@ define <16 x i8> @trunc_mul_const_v16i16
 
 define <4 x i32> @trunc_and_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_and_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps %xmm3, %xmm1
 ; SSE-NEXT:    andps %xmm2, %xmm0
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -2830,7 +2830,7 @@ define <4 x i32> @trunc_and_v4i64_v4i32(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -2839,7 +2839,7 @@ define <4 x i32> @trunc_and_v4i64_v4i32(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_and_v4i64_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2852,7 +2852,7 @@ define <4 x i32> @trunc_and_v4i64_v4i32(
 
 define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_and_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand %xmm6, %xmm2
 ; SSE-NEXT:    pand %xmm7, %xmm3
 ; SSE-NEXT:    pand %xmm4, %xmm0
@@ -2871,7 +2871,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
 ; AVX1-NEXT:    vandps %ymm3, %ymm1, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
@@ -2888,7 +2888,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpand %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -2903,7 +2903,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_and_v8i64_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -2915,7 +2915,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(
 
 define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_and_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand %xmm3, %xmm1
 ; SSE-NEXT:    pslld $16, %xmm1
 ; SSE-NEXT:    psrad $16, %xmm1
@@ -2926,7 +2926,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -2937,7 +2937,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -2946,7 +2946,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_and_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2959,7 +2959,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(
 
 define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_and_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand {{[0-9]+}}(%rsp), %xmm0
 ; SSE-NEXT:    pand {{[0-9]+}}(%rsp), %xmm1
 ; SSE-NEXT:    pand {{[0-9]+}}(%rsp), %xmm2
@@ -2987,7 +2987,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps %ymm4, %ymm0, %ymm0
 ; AVX1-NEXT:    vandps %ymm5, %ymm1, %ymm1
 ; AVX1-NEXT:    vandps %ymm6, %ymm2, %ymm2
@@ -3016,7 +3016,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpand %ymm5, %ymm1, %ymm1
 ; AVX2-NEXT:    vpand %ymm4, %ymm0, %ymm0
 ; AVX2-NEXT:    vpand %ymm7, %ymm3, %ymm3
@@ -3044,7 +3044,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_and_v16i64_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpandq %zmm3, %zmm1, %zmm1
 ; AVX512-NEXT:    vpandq %zmm2, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
@@ -3060,7 +3060,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8
 
 define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_and_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm8 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; SSE-NEXT:    pand %xmm8, %xmm7
 ; SSE-NEXT:    pand %xmm3, %xmm7
@@ -3076,7 +3076,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
 ; AVX1-NEXT:    vandps %ymm3, %ymm1, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
@@ -3093,7 +3093,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpand %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -3109,7 +3109,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_and_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -3121,7 +3121,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8
 
 define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
 ; SSE-LABEL: trunc_and_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
 ; SSE-NEXT:    pand %xmm4, %xmm3
 ; SSE-NEXT:    pand %xmm1, %xmm3
@@ -3131,7 +3131,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3142,7 +3142,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3153,7 +3153,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_and_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
@@ -3161,7 +3161,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_and_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3169,7 +3169,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_and_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
@@ -3186,13 +3186,13 @@ define <16 x i8> @trunc_and_v16i16_v16i8
 
 define <4 x i32> @trunc_and_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_and_const_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_const_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
@@ -3200,7 +3200,7 @@ define <4 x i32> @trunc_and_const_v4i64_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_const_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
@@ -3208,7 +3208,7 @@ define <4 x i32> @trunc_and_const_v4i64_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_and_const_v4i64_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3221,7 +3221,7 @@ define <4 x i32> @trunc_and_const_v4i64_
 
 define <8 x i16> @trunc_and_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_and_const_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -3237,7 +3237,7 @@ define <8 x i16> @trunc_and_const_v8i64_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_const_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -3253,7 +3253,7 @@ define <8 x i16> @trunc_and_const_v8i64_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_const_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -3266,7 +3266,7 @@ define <8 x i16> @trunc_and_const_v8i64_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_and_const_v8i64_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -3278,7 +3278,7 @@ define <8 x i16> @trunc_and_const_v8i64_
 
 define <8 x i16> @trunc_and_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_and_const_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $16, %xmm1
 ; SSE-NEXT:    psrad $16, %xmm1
 ; SSE-NEXT:    pslld $16, %xmm0
@@ -3288,7 +3288,7 @@ define <8 x i16> @trunc_and_const_v8i32_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_const_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -3299,7 +3299,7 @@ define <8 x i16> @trunc_and_const_v8i32_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_const_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3307,7 +3307,7 @@ define <8 x i16> @trunc_and_const_v8i32_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_and_const_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3320,7 +3320,7 @@ define <8 x i16> @trunc_and_const_v8i32_
 
 define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_and_const_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; SSE-NEXT:    pand %xmm8, %xmm7
 ; SSE-NEXT:    pand %xmm8, %xmm6
@@ -3341,7 +3341,7 @@ define <16 x i8> @trunc_and_const_v16i64
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_const_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; AVX1-NEXT:    vpand %xmm5, %xmm4, %xmm4
@@ -3367,7 +3367,7 @@ define <16 x i8> @trunc_and_const_v16i64
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_const_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -3392,7 +3392,7 @@ define <16 x i8> @trunc_and_const_v16i64
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_and_const_v16i64_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    vpmovqd %zmm1, %ymm1
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
@@ -3407,7 +3407,7 @@ define <16 x i8> @trunc_and_const_v16i64
 
 define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_and_const_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; SSE-NEXT:    pand %xmm4, %xmm3
 ; SSE-NEXT:    pand %xmm4, %xmm2
@@ -3420,7 +3420,7 @@ define <16 x i8> @trunc_and_const_v16i32
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_const_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
@@ -3436,7 +3436,7 @@ define <16 x i8> @trunc_and_const_v16i32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_const_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -3451,7 +3451,7 @@ define <16 x i8> @trunc_and_const_v16i32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_and_const_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -3463,7 +3463,7 @@ define <16 x i8> @trunc_and_const_v16i32
 
 define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
 ; SSE-LABEL: trunc_and_const_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
 ; SSE-NEXT:    pand %xmm2, %xmm1
 ; SSE-NEXT:    pand %xmm2, %xmm0
@@ -3472,7 +3472,7 @@ define <16 x i8> @trunc_and_const_v16i16
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_and_const_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -3483,7 +3483,7 @@ define <16 x i8> @trunc_and_const_v16i16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_and_const_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX2-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -3494,7 +3494,7 @@ define <16 x i8> @trunc_and_const_v16i16
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_and_const_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3502,7 +3502,7 @@ define <16 x i8> @trunc_and_const_v16i16
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_and_const_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3510,7 +3510,7 @@ define <16 x i8> @trunc_and_const_v16i16
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_and_const_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512DQ-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3527,14 +3527,14 @@ define <16 x i8> @trunc_and_const_v16i16
 
 define <4 x i32> @trunc_xor_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_xor_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm3, %xmm1
 ; SSE-NEXT:    xorps %xmm2, %xmm0
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vxorps %ymm1, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -3542,7 +3542,7 @@ define <4 x i32> @trunc_xor_v4i64_v4i32(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vxorps %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -3551,7 +3551,7 @@ define <4 x i32> @trunc_xor_v4i64_v4i32(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_xor_v4i64_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3564,7 +3564,7 @@ define <4 x i32> @trunc_xor_v4i64_v4i32(
 
 define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_xor_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm6, %xmm2
 ; SSE-NEXT:    pxor %xmm7, %xmm3
 ; SSE-NEXT:    pxor %xmm4, %xmm0
@@ -3583,7 +3583,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vxorps %ymm2, %ymm0, %ymm0
 ; AVX1-NEXT:    vxorps %ymm3, %ymm1, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
@@ -3600,7 +3600,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -3615,7 +3615,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_xor_v8i64_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -3627,7 +3627,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(
 
 define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_xor_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm3, %xmm1
 ; SSE-NEXT:    pslld $16, %xmm1
 ; SSE-NEXT:    psrad $16, %xmm1
@@ -3638,7 +3638,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vxorps %ymm1, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -3649,7 +3649,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -3658,7 +3658,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_xor_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3671,7 +3671,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(
 
 define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_xor_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor {{[0-9]+}}(%rsp), %xmm0
 ; SSE-NEXT:    pxor {{[0-9]+}}(%rsp), %xmm1
 ; SSE-NEXT:    pxor {{[0-9]+}}(%rsp), %xmm2
@@ -3699,7 +3699,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vxorps %ymm4, %ymm0, %ymm0
 ; AVX1-NEXT:    vxorps %ymm5, %ymm1, %ymm1
 ; AVX1-NEXT:    vxorps %ymm6, %ymm2, %ymm2
@@ -3728,7 +3728,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %ymm5, %ymm1, %ymm1
 ; AVX2-NEXT:    vpxor %ymm4, %ymm0, %ymm0
 ; AVX2-NEXT:    vpxor %ymm7, %ymm3, %ymm3
@@ -3756,7 +3756,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_xor_v16i64_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxorq %zmm3, %zmm1, %zmm1
 ; AVX512-NEXT:    vpxorq %zmm2, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
@@ -3772,7 +3772,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8
 
 define <16 x i8> @trunc_xor_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_xor_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm4, %xmm0
 ; SSE-NEXT:    pxor %xmm5, %xmm1
 ; SSE-NEXT:    pxor %xmm6, %xmm2
@@ -3788,7 +3788,7 @@ define <16 x i8> @trunc_xor_v16i32_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vxorps %ymm2, %ymm0, %ymm0
 ; AVX1-NEXT:    vxorps %ymm3, %ymm1, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
@@ -3805,7 +3805,7 @@ define <16 x i8> @trunc_xor_v16i32_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpxor %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -3821,7 +3821,7 @@ define <16 x i8> @trunc_xor_v16i32_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_xor_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -3833,7 +3833,7 @@ define <16 x i8> @trunc_xor_v16i32_v16i8
 
 define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
 ; SSE-LABEL: trunc_xor_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm2, %xmm0
 ; SSE-NEXT:    pxor %xmm3, %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -3843,7 +3843,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vxorps %ymm1, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3854,7 +3854,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3865,7 +3865,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_xor_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
@@ -3873,7 +3873,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_xor_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3881,7 +3881,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_xor_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
@@ -3898,13 +3898,13 @@ define <16 x i8> @trunc_xor_v16i16_v16i8
 
 define <4 x i32> @trunc_xor_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_xor_const_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    xorps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_const_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vxorps {{.*}}(%rip), %xmm0, %xmm0
@@ -3912,7 +3912,7 @@ define <4 x i32> @trunc_xor_const_v4i64_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_const_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vxorps {{.*}}(%rip), %xmm0, %xmm0
@@ -3920,7 +3920,7 @@ define <4 x i32> @trunc_xor_const_v4i64_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_xor_const_v4i64_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -3933,7 +3933,7 @@ define <4 x i32> @trunc_xor_const_v4i64_
 
 define <8 x i16> @trunc_xor_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_xor_const_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -3949,7 +3949,7 @@ define <8 x i16> @trunc_xor_const_v8i64_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_const_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -3965,7 +3965,7 @@ define <8 x i16> @trunc_xor_const_v8i64_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_const_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -3978,7 +3978,7 @@ define <8 x i16> @trunc_xor_const_v8i64_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_xor_const_v8i64_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -3990,7 +3990,7 @@ define <8 x i16> @trunc_xor_const_v8i64_
 
 define <8 x i16> @trunc_xor_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_xor_const_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $16, %xmm1
 ; SSE-NEXT:    psrad $16, %xmm1
 ; SSE-NEXT:    pslld $16, %xmm0
@@ -4000,7 +4000,7 @@ define <8 x i16> @trunc_xor_const_v8i32_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_const_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -4011,7 +4011,7 @@ define <8 x i16> @trunc_xor_const_v8i32_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_const_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -4019,7 +4019,7 @@ define <8 x i16> @trunc_xor_const_v8i32_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_xor_const_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -4032,7 +4032,7 @@ define <8 x i16> @trunc_xor_const_v8i32_
 
 define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_xor_const_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; SSE-NEXT:    pand %xmm8, %xmm7
 ; SSE-NEXT:    pand %xmm8, %xmm6
@@ -4053,7 +4053,7 @@ define <16 x i8> @trunc_xor_const_v16i64
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_const_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; AVX1-NEXT:    vpand %xmm5, %xmm4, %xmm4
@@ -4079,7 +4079,7 @@ define <16 x i8> @trunc_xor_const_v16i64
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_const_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -4104,7 +4104,7 @@ define <16 x i8> @trunc_xor_const_v16i64
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_xor_const_v16i64_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    vpmovqd %zmm1, %ymm1
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
@@ -4119,7 +4119,7 @@ define <16 x i8> @trunc_xor_const_v16i64
 
 define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_xor_const_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; SSE-NEXT:    pand %xmm4, %xmm3
 ; SSE-NEXT:    pand %xmm4, %xmm2
@@ -4132,7 +4132,7 @@ define <16 x i8> @trunc_xor_const_v16i32
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_const_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
@@ -4148,7 +4148,7 @@ define <16 x i8> @trunc_xor_const_v16i32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_const_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -4163,7 +4163,7 @@ define <16 x i8> @trunc_xor_const_v16i32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_xor_const_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -4175,7 +4175,7 @@ define <16 x i8> @trunc_xor_const_v16i32
 
 define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
 ; SSE-LABEL: trunc_xor_const_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
 ; SSE-NEXT:    pand %xmm2, %xmm1
 ; SSE-NEXT:    pand %xmm2, %xmm0
@@ -4184,7 +4184,7 @@ define <16 x i8> @trunc_xor_const_v16i16
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_xor_const_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -4195,7 +4195,7 @@ define <16 x i8> @trunc_xor_const_v16i16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_xor_const_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX2-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -4206,7 +4206,7 @@ define <16 x i8> @trunc_xor_const_v16i16
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_xor_const_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512F-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -4214,7 +4214,7 @@ define <16 x i8> @trunc_xor_const_v16i16
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_xor_const_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -4222,7 +4222,7 @@ define <16 x i8> @trunc_xor_const_v16i16
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_xor_const_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512DQ-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -4239,14 +4239,14 @@ define <16 x i8> @trunc_xor_const_v16i16
 
 define <4 x i32> @trunc_or_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_or_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    orps %xmm3, %xmm1
 ; SSE-NEXT:    orps %xmm2, %xmm0
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -4254,7 +4254,7 @@ define <4 x i32> @trunc_or_v4i64_v4i32(<
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vorps %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -4263,7 +4263,7 @@ define <4 x i32> @trunc_or_v4i64_v4i32(<
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_or_v4i64_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -4276,7 +4276,7 @@ define <4 x i32> @trunc_or_v4i64_v4i32(<
 
 define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_or_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    por %xmm6, %xmm2
 ; SSE-NEXT:    por %xmm7, %xmm3
 ; SSE-NEXT:    por %xmm4, %xmm0
@@ -4295,7 +4295,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
 ; AVX1-NEXT:    vorps %ymm3, %ymm1, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
@@ -4312,7 +4312,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpor %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vpor %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -4327,7 +4327,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_or_v8i64_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vporq %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -4339,7 +4339,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<
 
 define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_or_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    por %xmm3, %xmm1
 ; SSE-NEXT:    pslld $16, %xmm1
 ; SSE-NEXT:    psrad $16, %xmm1
@@ -4350,7 +4350,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -4361,7 +4361,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -4370,7 +4370,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_or_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -4383,7 +4383,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<
 
 define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
 ; SSE-LABEL: trunc_or_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    por {{[0-9]+}}(%rsp), %xmm0
 ; SSE-NEXT:    por {{[0-9]+}}(%rsp), %xmm1
 ; SSE-NEXT:    por {{[0-9]+}}(%rsp), %xmm2
@@ -4411,7 +4411,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vorps %ymm4, %ymm0, %ymm0
 ; AVX1-NEXT:    vorps %ymm5, %ymm1, %ymm1
 ; AVX1-NEXT:    vorps %ymm6, %ymm2, %ymm2
@@ -4440,7 +4440,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpor %ymm5, %ymm1, %ymm1
 ; AVX2-NEXT:    vpor %ymm4, %ymm0, %ymm0
 ; AVX2-NEXT:    vpor %ymm7, %ymm3, %ymm3
@@ -4468,7 +4468,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_or_v16i64_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vporq %zmm3, %zmm1, %zmm1
 ; AVX512-NEXT:    vporq %zmm2, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
@@ -4484,7 +4484,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(
 
 define <16 x i8> @trunc_or_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
 ; SSE-LABEL: trunc_or_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    por %xmm4, %xmm0
 ; SSE-NEXT:    por %xmm5, %xmm1
 ; SSE-NEXT:    por %xmm6, %xmm2
@@ -4500,7 +4500,7 @@ define <16 x i8> @trunc_or_v16i32_v16i8(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
 ; AVX1-NEXT:    vorps %ymm3, %ymm1, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
@@ -4517,7 +4517,7 @@ define <16 x i8> @trunc_or_v16i32_v16i8(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpor %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpor %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
@@ -4533,7 +4533,7 @@ define <16 x i8> @trunc_or_v16i32_v16i8(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_or_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vporq %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -4545,7 +4545,7 @@ define <16 x i8> @trunc_or_v16i32_v16i8(
 
 define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
 ; SSE-LABEL: trunc_or_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    por %xmm2, %xmm0
 ; SSE-NEXT:    por %xmm3, %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -4555,7 +4555,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -4566,7 +4566,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -4577,7 +4577,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_or_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
@@ -4585,7 +4585,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_or_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -4593,7 +4593,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_or_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
@@ -4610,13 +4610,13 @@ define <16 x i8> @trunc_or_v16i16_v16i8(
 
 define <4 x i32> @trunc_or_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_or_const_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    orps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_const_v4i64_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vorps {{.*}}(%rip), %xmm0, %xmm0
@@ -4624,7 +4624,7 @@ define <4 x i32> @trunc_or_const_v4i64_v
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_const_v4i64_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vorps {{.*}}(%rip), %xmm0, %xmm0
@@ -4632,7 +4632,7 @@ define <4 x i32> @trunc_or_const_v4i64_v
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_or_const_v4i64_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4645,7 +4645,7 @@ define <4 x i32> @trunc_or_const_v4i64_v
 
 define <8 x i16> @trunc_or_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_or_const_v8i64_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -4661,7 +4661,7 @@ define <8 x i16> @trunc_or_const_v8i64_v
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_const_v8i64_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -4677,7 +4677,7 @@ define <8 x i16> @trunc_or_const_v8i64_v
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_const_v8i64_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -4690,7 +4690,7 @@ define <8 x i16> @trunc_or_const_v8i64_v
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_or_const_v8i64_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vpor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -4702,7 +4702,7 @@ define <8 x i16> @trunc_or_const_v8i64_v
 
 define <8 x i16> @trunc_or_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_or_const_v8i32_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $16, %xmm1
 ; SSE-NEXT:    psrad $16, %xmm1
 ; SSE-NEXT:    pslld $16, %xmm0
@@ -4712,7 +4712,7 @@ define <8 x i16> @trunc_or_const_v8i32_v
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_const_v8i32_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -4723,7 +4723,7 @@ define <8 x i16> @trunc_or_const_v8i32_v
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_const_v8i32_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4731,7 +4731,7 @@ define <8 x i16> @trunc_or_const_v8i32_v
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_or_const_v8i32_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4744,7 +4744,7 @@ define <8 x i16> @trunc_or_const_v8i32_v
 
 define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
 ; SSE-LABEL: trunc_or_const_v16i64_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; SSE-NEXT:    pand %xmm8, %xmm7
 ; SSE-NEXT:    pand %xmm8, %xmm6
@@ -4765,7 +4765,7 @@ define <16 x i8> @trunc_or_const_v16i64_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_const_v16i64_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; AVX1-NEXT:    vpand %xmm5, %xmm4, %xmm4
@@ -4791,7 +4791,7 @@ define <16 x i8> @trunc_or_const_v16i64_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_const_v16i64_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@@ -4816,7 +4816,7 @@ define <16 x i8> @trunc_or_const_v16i64_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_or_const_v16i64_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    vpmovqd %zmm1, %ymm1
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
@@ -4831,7 +4831,7 @@ define <16 x i8> @trunc_or_const_v16i64_
 
 define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
 ; SSE-LABEL: trunc_or_const_v16i32_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; SSE-NEXT:    pand %xmm4, %xmm3
 ; SSE-NEXT:    pand %xmm4, %xmm2
@@ -4844,7 +4844,7 @@ define <16 x i8> @trunc_or_const_v16i32_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_const_v16i32_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
@@ -4860,7 +4860,7 @@ define <16 x i8> @trunc_or_const_v16i32_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_const_v16i32_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -4875,7 +4875,7 @@ define <16 x i8> @trunc_or_const_v16i32_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc_or_const_v16i32_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512-NEXT:    vpor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
@@ -4887,7 +4887,7 @@ define <16 x i8> @trunc_or_const_v16i32_
 
 define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
 ; SSE-LABEL: trunc_or_const_v16i16_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
 ; SSE-NEXT:    pand %xmm2, %xmm1
 ; SSE-NEXT:    pand %xmm2, %xmm0
@@ -4896,7 +4896,7 @@ define <16 x i8> @trunc_or_const_v16i16_
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_or_const_v16i16_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -4907,7 +4907,7 @@ define <16 x i8> @trunc_or_const_v16i16_
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_or_const_v16i16_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX2-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -4918,7 +4918,7 @@ define <16 x i8> @trunc_or_const_v16i16_
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_or_const_v16i16_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512F-NEXT:    vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4926,7 +4926,7 @@ define <16 x i8> @trunc_or_const_v16i16_
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_or_const_v16i16_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4934,7 +4934,7 @@ define <16 x i8> @trunc_or_const_v16i16_
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: trunc_or_const_v16i16_v16i8:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512DQ-NEXT:    vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4951,7 +4951,7 @@ define <16 x i8> @trunc_or_const_v16i16_
 
 define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
 ; SSE-LABEL: mul_add_const_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
@@ -4982,7 +4982,7 @@ define <4 x i32> @mul_add_const_v4i64_v4
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: mul_add_const_v4i64_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -4996,7 +4996,7 @@ define <4 x i32> @mul_add_const_v4i64_v4
 
 define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
 ; SSE-LABEL: mul_add_self_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
 ; SSE-NEXT:    movdqa %xmm2, %xmm3
 ; SSE-NEXT:    psrad $31, %xmm3
@@ -5037,7 +5037,7 @@ define <4 x i32> @mul_add_self_v4i64_v4i
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: mul_add_self_v4i64_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -5051,7 +5051,7 @@ define <4 x i32> @mul_add_self_v4i64_v4i
 
 define <4 x i32> @mul_add_multiuse_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
 ; SSE-LABEL: mul_add_multiuse_v4i64_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,1,1,3]
@@ -5081,7 +5081,7 @@ define <4 x i32> @mul_add_multiuse_v4i64
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: mul_add_multiuse_v4i64_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm1
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc.ll Mon Dec  4 09:18:51 2017
@@ -11,14 +11,14 @@
 
 define <8 x i32> @trunc8i64_8i32(<8 x i64> %a) {
 ; SSE-LABEL: trunc8i64_8i32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
 ; SSE-NEXT:    movaps %xmm2, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc8i64_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
@@ -27,7 +27,7 @@ define <8 x i32> @trunc8i64_8i32(<8 x i6
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc8i64_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -36,7 +36,7 @@ define <8 x i32> @trunc8i64_8i32(<8 x i6
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc8i64_8i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -46,7 +46,7 @@ entry:
 
 define <8 x i32> @trunc8i64_8i32_ashr(<8 x i64> %a) {
 ; SSE2-LABEL: trunc8i64_8i32_ashr:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[1,3,2,3]
 ; SSE2-NEXT:    psrad $31, %xmm3
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
@@ -61,7 +61,7 @@ define <8 x i32> @trunc8i64_8i32_ashr(<8
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc8i64_8i32_ashr:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[1,3,2,3]
 ; SSSE3-NEXT:    psrad $31, %xmm3
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
@@ -76,7 +76,7 @@ define <8 x i32> @trunc8i64_8i32_ashr(<8
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc8i64_8i32_ashr:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
 ; SSE41-NEXT:    psrad $31, %xmm3
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
@@ -89,7 +89,7 @@ define <8 x i32> @trunc8i64_8i32_ashr(<8
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc8i64_8i32_ashr:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpsrad $31, %xmm2, %xmm3
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
@@ -104,7 +104,7 @@ define <8 x i32> @trunc8i64_8i32_ashr(<8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc8i64_8i32_ashr:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[1,3,2,3,5,7,6,7]
@@ -113,7 +113,7 @@ define <8 x i32> @trunc8i64_8i32_ashr(<8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc8i64_8i32_ashr:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpsraq $32, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    retq
@@ -125,7 +125,7 @@ entry:
 
 define <8 x i32> @trunc8i64_8i32_lshr(<8 x i64> %a) {
 ; SSE-LABEL: trunc8i64_8i32_lshr:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    psrlq $32, %xmm3
 ; SSE-NEXT:    psrlq $32, %xmm2
 ; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
@@ -136,7 +136,7 @@ define <8 x i32> @trunc8i64_8i32_lshr(<8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc8i64_8i32_lshr:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpsrlq $32, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrlq $32, %xmm0, %xmm0
@@ -149,7 +149,7 @@ define <8 x i32> @trunc8i64_8i32_lshr(<8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc8i64_8i32_lshr:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpsrlq $32, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsrlq $32, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -160,7 +160,7 @@ define <8 x i32> @trunc8i64_8i32_lshr(<8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc8i64_8i32_lshr:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpsrlq $32, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512-NEXT:    retq
@@ -172,7 +172,7 @@ entry:
 
 define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
 ; SSE2-LABEL: trunc8i64_8i16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -187,7 +187,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i6
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc8i64_8i16:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; SSSE3-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -202,7 +202,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i6
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc8i64_8i16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pxor %xmm4, %xmm4
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
@@ -214,7 +214,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i6
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc8i64_8i16:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
@@ -229,7 +229,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i6
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc8i64_8i16:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -242,7 +242,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i6
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc8i64_8i16:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -253,7 +253,7 @@ entry:
 
 define void @trunc8i64_8i8(<8 x i64> %a) {
 ; SSE-LABEL: trunc8i64_8i8:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; SSE-NEXT:    pand %xmm4, %xmm3
 ; SSE-NEXT:    pand %xmm4, %xmm2
@@ -267,7 +267,7 @@ define void @trunc8i64_8i8(<8 x i64> %a)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc8i64_8i8:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
 ; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
@@ -284,7 +284,7 @@ define void @trunc8i64_8i8(<8 x i64> %a)
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc8i64_8i8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -298,7 +298,7 @@ define void @trunc8i64_8i8(<8 x i64> %a)
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc8i64_8i8:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovqb %zmm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -310,7 +310,7 @@ entry:
 
 define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
 ; SSE2-LABEL: trunc8i32_8i16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pslld $16, %xmm1
 ; SSE2-NEXT:    psrad $16, %xmm1
 ; SSE2-NEXT:    pslld $16, %xmm0
@@ -319,7 +319,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i3
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc8i32_8i16:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm1
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm0
@@ -327,7 +327,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i3
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc8i32_8i16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; SSE41-NEXT:    pshufb %xmm2, %xmm1
 ; SSE41-NEXT:    pshufb %xmm2, %xmm0
@@ -335,7 +335,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i3
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc8i32_8i16:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -345,7 +345,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i3
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc8i32_8i16:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -353,7 +353,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i3
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc8i32_8i16:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512F-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -361,13 +361,13 @@ define <8 x i16> @trunc8i32_8i16(<8 x i3
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc8i32_8i16:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpmovdw %ymm0, %xmm0
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc8i32_8i16:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512BW-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512BW-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -375,7 +375,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i3
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc8i32_8i16:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpmovdw %ymm0, %xmm0
 ; AVX512BWVL-NEXT:    vzeroupper
 ; AVX512BWVL-NEXT:    retq
@@ -386,14 +386,14 @@ entry:
 
 define <8 x i16> @trunc8i32_8i16_ashr(<8 x i32> %a) {
 ; SSE-LABEL: trunc8i32_8i16_ashr:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    psrad $16, %xmm1
 ; SSE-NEXT:    psrad $16, %xmm0
 ; SSE-NEXT:    packssdw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc8i32_8i16_ashr:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpsrad $16, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsrad $16, %xmm0, %xmm0
@@ -402,7 +402,7 @@ define <8 x i16> @trunc8i32_8i16_ashr(<8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc8i32_8i16_ashr:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpsrad $16, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
@@ -410,7 +410,7 @@ define <8 x i16> @trunc8i32_8i16_ashr(<8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc8i32_8i16_ashr:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpsrad $16, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512F-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -418,14 +418,14 @@ define <8 x i16> @trunc8i32_8i16_ashr(<8
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc8i32_8i16_ashr:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpsrad $16, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpmovdw %ymm0, %xmm0
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc8i32_8i16_ashr:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpsrad $16, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512BW-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -433,7 +433,7 @@ define <8 x i16> @trunc8i32_8i16_ashr(<8
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc8i32_8i16_ashr:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpsrad $16, %ymm0, %ymm0
 ; AVX512BWVL-NEXT:    vpmovdw %ymm0, %xmm0
 ; AVX512BWVL-NEXT:    vzeroupper
@@ -446,7 +446,7 @@ entry:
 
 define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
 ; SSE2-LABEL: trunc8i32_8i16_lshr:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    psrld $16, %xmm0
 ; SSE2-NEXT:    psrld $16, %xmm1
 ; SSE2-NEXT:    pslld $16, %xmm1
@@ -457,7 +457,7 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc8i32_8i16_lshr:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,10,11,14,15,14,15,255,255]
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm1
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm0
@@ -465,14 +465,14 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc8i32_8i16_lshr:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    psrld $16, %xmm1
 ; SSE41-NEXT:    psrld $16, %xmm0
 ; SSE41-NEXT:    packusdw %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc8i32_8i16_lshr:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpsrld $16, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
@@ -481,7 +481,7 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc8i32_8i16_lshr:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpsrld $16, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
@@ -489,7 +489,7 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc8i32_8i16_lshr:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpsrld $16, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512F-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -497,14 +497,14 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc8i32_8i16_lshr:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpsrld $16, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpmovdw %ymm0, %xmm0
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc8i32_8i16_lshr:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpsrld $16, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512BW-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -512,7 +512,7 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc8i32_8i16_lshr:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpsrld $16, %ymm0, %ymm0
 ; AVX512BWVL-NEXT:    vpmovdw %ymm0, %xmm0
 ; AVX512BWVL-NEXT:    vzeroupper
@@ -525,7 +525,7 @@ entry:
 
 define void @trunc8i32_8i8(<8 x i32> %a) {
 ; SSE2-LABEL: trunc8i32_8i8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; SSE2-NEXT:    pand %xmm2, %xmm1
 ; SSE2-NEXT:    pand %xmm2, %xmm0
@@ -535,7 +535,7 @@ define void @trunc8i32_8i8(<8 x i32> %a)
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc8i32_8i8:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm1
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm0
@@ -544,7 +544,7 @@ define void @trunc8i32_8i8(<8 x i32> %a)
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc8i32_8i8:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
 ; SSE41-NEXT:    pshufb %xmm2, %xmm1
 ; SSE41-NEXT:    pshufb %xmm2, %xmm0
@@ -553,7 +553,7 @@ define void @trunc8i32_8i8(<8 x i32> %a)
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc8i32_8i8:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -564,7 +564,7 @@ define void @trunc8i32_8i8(<8 x i32> %a)
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc8i32_8i8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -573,7 +573,7 @@ define void @trunc8i32_8i8(<8 x i32> %a)
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc8i32_8i8:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -582,13 +582,13 @@ define void @trunc8i32_8i8(<8 x i32> %a)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc8i32_8i8:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpmovdb %ymm0, (%rax)
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc8i32_8i8:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512BW-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -597,7 +597,7 @@ define void @trunc8i32_8i8(<8 x i32> %a)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc8i32_8i8:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpmovdb %ymm0, (%rax)
 ; AVX512BWVL-NEXT:    vzeroupper
 ; AVX512BWVL-NEXT:    retq
@@ -609,7 +609,7 @@ entry:
 
 define void @trunc16i32_16i16(<16 x i32> %a) {
 ; SSE2-LABEL: trunc16i32_16i16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pslld $16, %xmm1
 ; SSE2-NEXT:    psrad $16, %xmm1
 ; SSE2-NEXT:    pslld $16, %xmm0
@@ -625,7 +625,7 @@ define void @trunc16i32_16i16(<16 x i32>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc16i32_16i16:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pslld $16, %xmm1
 ; SSSE3-NEXT:    psrad $16, %xmm1
 ; SSSE3-NEXT:    pslld $16, %xmm0
@@ -641,7 +641,7 @@ define void @trunc16i32_16i16(<16 x i32>
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc16i32_16i16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pxor %xmm4, %xmm4
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
@@ -654,7 +654,7 @@ define void @trunc16i32_16i16(<16 x i32>
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc16i32_16i16:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3],xmm2[4],xmm3[5],xmm2[6],xmm3[7]
@@ -670,7 +670,7 @@ define void @trunc16i32_16i16(<16 x i32>
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc16i32_16i16:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -682,7 +682,7 @@ define void @trunc16i32_16i16(<16 x i32>
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc16i32_16i16:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovdw %zmm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -694,7 +694,7 @@ entry:
 
 define void @trunc16i32_16i16_ashr(<16 x i32> %a) {
 ; SSE-LABEL: trunc16i32_16i16_ashr:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    psrad $16, %xmm3
 ; SSE-NEXT:    psrad $16, %xmm2
 ; SSE-NEXT:    packssdw %xmm3, %xmm2
@@ -706,7 +706,7 @@ define void @trunc16i32_16i16_ashr(<16 x
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc16i32_16i16_ashr:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpsrad $16, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrad $16, %xmm0, %xmm0
@@ -721,7 +721,7 @@ define void @trunc16i32_16i16_ashr(<16 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc16i32_16i16_ashr:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpsrad $16, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsrad $16, %ymm0, %ymm0
 ; AVX2-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0
@@ -731,7 +731,7 @@ define void @trunc16i32_16i16_ashr(<16 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc16i32_16i16_ashr:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpsrld $16, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdw %zmm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
@@ -745,7 +745,7 @@ entry:
 
 define void @trunc16i32_16i16_lshr(<16 x i32> %a) {
 ; SSE2-LABEL: trunc16i32_16i16_lshr:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    psrld $16, %xmm2
 ; SSE2-NEXT:    psrld $16, %xmm3
 ; SSE2-NEXT:    psrld $16, %xmm0
@@ -765,7 +765,7 @@ define void @trunc16i32_16i16_lshr(<16 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc16i32_16i16_lshr:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    psrld $16, %xmm2
 ; SSSE3-NEXT:    psrld $16, %xmm3
 ; SSSE3-NEXT:    psrld $16, %xmm0
@@ -785,7 +785,7 @@ define void @trunc16i32_16i16_lshr(<16 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc16i32_16i16_lshr:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    psrld $16, %xmm3
 ; SSE41-NEXT:    psrld $16, %xmm2
 ; SSE41-NEXT:    packusdw %xmm3, %xmm2
@@ -797,7 +797,7 @@ define void @trunc16i32_16i16_lshr(<16 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc16i32_16i16_lshr:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpsrld $16, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
@@ -812,7 +812,7 @@ define void @trunc16i32_16i16_lshr(<16 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc16i32_16i16_lshr:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpsrld $16, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsrld $16, %ymm0, %ymm0
 ; AVX2-NEXT:    vpackusdw %ymm1, %ymm0, %ymm0
@@ -822,7 +822,7 @@ define void @trunc16i32_16i16_lshr(<16 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc16i32_16i16_lshr:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpsrld $16, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdw %zmm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
@@ -836,7 +836,7 @@ entry:
 
 define void @trunc16i32_16i8(<16 x i32> %a) {
 ; SSE-LABEL: trunc16i32_16i8:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; SSE-NEXT:    pand %xmm4, %xmm3
 ; SSE-NEXT:    pand %xmm4, %xmm2
@@ -849,7 +849,7 @@ define void @trunc16i32_16i8(<16 x i32>
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc16i32_16i8:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
 ; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
@@ -865,7 +865,7 @@ define void @trunc16i32_16i8(<16 x i32>
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc16i32_16i8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@@ -880,7 +880,7 @@ define void @trunc16i32_16i8(<16 x i32>
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc16i32_16i8:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovdb %zmm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -892,7 +892,7 @@ entry:
 
 define void @trunc16i32_16i8_ashr(<16 x i32> %a) {
 ; SSE-LABEL: trunc16i32_16i8_ashr:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    psrad $24, %xmm1
 ; SSE-NEXT:    psrad $24, %xmm0
 ; SSE-NEXT:    packssdw %xmm1, %xmm0
@@ -904,7 +904,7 @@ define void @trunc16i32_16i8_ashr(<16 x
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc16i32_16i8_ashr:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpsrad $24, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrad $24, %xmm0, %xmm0
@@ -919,7 +919,7 @@ define void @trunc16i32_16i8_ashr(<16 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc16i32_16i8_ashr:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpsrad $24, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsrad $24, %ymm0, %ymm0
 ; AVX2-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0
@@ -931,7 +931,7 @@ define void @trunc16i32_16i8_ashr(<16 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc16i32_16i8_ashr:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpsrld $24, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdb %zmm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
@@ -945,7 +945,7 @@ entry:
 
 define void @trunc16i32_16i8_lshr(<16 x i32> %a) {
 ; SSE2-LABEL: trunc16i32_16i8_lshr:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    psrld $24, %xmm1
 ; SSE2-NEXT:    psrld $24, %xmm0
 ; SSE2-NEXT:    packuswb %xmm1, %xmm0
@@ -957,7 +957,7 @@ define void @trunc16i32_16i8_lshr(<16 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc16i32_16i8_lshr:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    psrld $24, %xmm1
 ; SSSE3-NEXT:    psrld $24, %xmm0
 ; SSSE3-NEXT:    packuswb %xmm1, %xmm0
@@ -969,7 +969,7 @@ define void @trunc16i32_16i8_lshr(<16 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc16i32_16i8_lshr:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    psrld $24, %xmm1
 ; SSE41-NEXT:    psrld $24, %xmm0
 ; SSE41-NEXT:    packssdw %xmm1, %xmm0
@@ -981,7 +981,7 @@ define void @trunc16i32_16i8_lshr(<16 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc16i32_16i8_lshr:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpsrld $24, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrld $24, %xmm0, %xmm0
@@ -996,7 +996,7 @@ define void @trunc16i32_16i8_lshr(<16 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc16i32_16i8_lshr:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpsrld $24, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsrld $24, %ymm0, %ymm0
 ; AVX2-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0
@@ -1008,7 +1008,7 @@ define void @trunc16i32_16i8_lshr(<16 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc16i32_16i8_lshr:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpsrld $24, %zmm0, %zmm0
 ; AVX512-NEXT:    vpmovdb %zmm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
@@ -1023,7 +1023,7 @@ entry:
 ;PR25684
 define void @trunc16i16_16i8(<16 x i16> %a) {
 ; SSE2-LABEL: trunc16i16_16i8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
 ; SSE2-NEXT:    pand %xmm2, %xmm1
 ; SSE2-NEXT:    pand %xmm2, %xmm0
@@ -1032,7 +1032,7 @@ define void @trunc16i16_16i8(<16 x i16>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc16i16_16i8:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm1
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm0
@@ -1041,7 +1041,7 @@ define void @trunc16i16_16i8(<16 x i16>
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc16i16_16i8:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; SSE41-NEXT:    pshufb %xmm2, %xmm1
 ; SSE41-NEXT:    pshufb %xmm2, %xmm0
@@ -1050,7 +1050,7 @@ define void @trunc16i16_16i8(<16 x i16>
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc16i16_16i8:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -1061,7 +1061,7 @@ define void @trunc16i16_16i8(<16 x i16>
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc16i16_16i8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX2-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -1072,7 +1072,7 @@ define void @trunc16i16_16i8(<16 x i16>
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc16i16_16i8:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512F-NEXT:    vmovdqu %xmm0, (%rax)
@@ -1080,7 +1080,7 @@ define void @trunc16i16_16i8(<16 x i16>
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc16i16_16i8:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512VL-NEXT:    vmovdqu %xmm0, (%rax)
@@ -1088,7 +1088,7 @@ define void @trunc16i16_16i8(<16 x i16>
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc16i16_16i8:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    vmovdqu %xmm0, (%rax)
@@ -1096,7 +1096,7 @@ define void @trunc16i16_16i8(<16 x i16>
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc16i16_16i8:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpmovwb %ymm0, (%rax)
 ; AVX512BWVL-NEXT:    vzeroupper
 ; AVX512BWVL-NEXT:    retq
@@ -1108,7 +1108,7 @@ entry:
 
 define void @trunc16i16_16i8_ashr(<16 x i16> %a) {
 ; SSE-LABEL: trunc16i16_16i8_ashr:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    psraw $8, %xmm1
 ; SSE-NEXT:    psraw $8, %xmm0
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
@@ -1116,7 +1116,7 @@ define void @trunc16i16_16i8_ashr(<16 x
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc16i16_16i8_ashr:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpsraw $8, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsraw $8, %xmm0, %xmm0
@@ -1126,7 +1126,7 @@ define void @trunc16i16_16i8_ashr(<16 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc16i16_16i8_ashr:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpsraw $8, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
@@ -1135,7 +1135,7 @@ define void @trunc16i16_16i8_ashr(<16 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc16i16_16i8_ashr:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpsraw $8, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
@@ -1144,7 +1144,7 @@ define void @trunc16i16_16i8_ashr(<16 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc16i16_16i8_ashr:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpsraw $8, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
@@ -1153,7 +1153,7 @@ define void @trunc16i16_16i8_ashr(<16 x
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc16i16_16i8_ashr:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpsraw $8, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    vmovdqu %xmm0, (%rax)
@@ -1161,7 +1161,7 @@ define void @trunc16i16_16i8_ashr(<16 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc16i16_16i8_ashr:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpsrlw $8, %ymm0, %ymm0
 ; AVX512BWVL-NEXT:    vpmovwb %ymm0, (%rax)
 ; AVX512BWVL-NEXT:    vzeroupper
@@ -1175,7 +1175,7 @@ entry:
 
 define void @trunc16i16_16i8_lshr(<16 x i16> %a) {
 ; SSE-LABEL: trunc16i16_16i8_lshr:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    psrlw $8, %xmm1
 ; SSE-NEXT:    psrlw $8, %xmm0
 ; SSE-NEXT:    packuswb %xmm1, %xmm0
@@ -1183,7 +1183,7 @@ define void @trunc16i16_16i8_lshr(<16 x
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc16i16_16i8_lshr:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpsrlw $8, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsrlw $8, %xmm0, %xmm0
@@ -1193,7 +1193,7 @@ define void @trunc16i16_16i8_lshr(<16 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc16i16_16i8_lshr:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpsrlw $8, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
@@ -1202,7 +1202,7 @@ define void @trunc16i16_16i8_lshr(<16 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc16i16_16i8_lshr:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpsrlw $8, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
@@ -1211,7 +1211,7 @@ define void @trunc16i16_16i8_lshr(<16 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc16i16_16i8_lshr:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
@@ -1220,7 +1220,7 @@ define void @trunc16i16_16i8_lshr(<16 x
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc16i16_16i8_lshr:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpsrlw $8, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    vmovdqu %xmm0, (%rax)
@@ -1228,7 +1228,7 @@ define void @trunc16i16_16i8_lshr(<16 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc16i16_16i8_lshr:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpsrlw $8, %ymm0, %ymm0
 ; AVX512BWVL-NEXT:    vpmovwb %ymm0, (%rax)
 ; AVX512BWVL-NEXT:    vzeroupper
@@ -1242,7 +1242,7 @@ entry:
 
 define void @trunc32i16_32i8(<32 x i16> %a) {
 ; SSE2-LABEL: trunc32i16_32i8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
 ; SSE2-NEXT:    pand %xmm4, %xmm1
 ; SSE2-NEXT:    pand %xmm4, %xmm0
@@ -1255,7 +1255,7 @@ define void @trunc32i16_32i8(<32 x i16>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc32i16_32i8:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; SSSE3-NEXT:    pshufb %xmm4, %xmm1
 ; SSSE3-NEXT:    pshufb %xmm4, %xmm0
@@ -1268,7 +1268,7 @@ define void @trunc32i16_32i8(<32 x i16>
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc32i16_32i8:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; SSE41-NEXT:    pshufb %xmm4, %xmm1
 ; SSE41-NEXT:    pshufb %xmm4, %xmm0
@@ -1281,7 +1281,7 @@ define void @trunc32i16_32i8(<32 x i16>
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc32i16_32i8:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
@@ -1297,7 +1297,7 @@ define void @trunc32i16_32i8(<32 x i16>
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc32i16_32i8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX2-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
@@ -1313,7 +1313,7 @@ define void @trunc32i16_32i8(<32 x i16>
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc32i16_32i8:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512F-NEXT:    vpmovsxwd %ymm1, %zmm1
@@ -1324,7 +1324,7 @@ define void @trunc32i16_32i8(<32 x i16>
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc32i16_32i8:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512VL-NEXT:    vpmovsxwd %ymm1, %zmm1
@@ -1335,13 +1335,13 @@ define void @trunc32i16_32i8(<32 x i16>
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc32i16_32i8:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmovwb %zmm0, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc32i16_32i8:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpmovwb %zmm0, (%rax)
 ; AVX512BWVL-NEXT:    vzeroupper
 ; AVX512BWVL-NEXT:    retq
@@ -1353,14 +1353,14 @@ entry:
 
 define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
 ; SSE-LABEL: trunc2x4i64_8i32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
 ; SSE-NEXT:    movaps %xmm2, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc2x4i64_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
@@ -1369,7 +1369,7 @@ define <8 x i32> @trunc2x4i64_8i32(<4 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc2x4i64_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -1378,7 +1378,7 @@ define <8 x i32> @trunc2x4i64_8i32(<4 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc2x4i64_8i32:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512F-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512F-NEXT:    vpmovqd %zmm0, %ymm0
@@ -1387,14 +1387,14 @@ define <8 x i32> @trunc2x4i64_8i32(<4 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc2x4i64_8i32:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpmovqd %ymm0, %xmm0
 ; AVX512VL-NEXT:    vpmovqd %ymm1, %xmm1
 ; AVX512VL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc2x4i64_8i32:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512BW-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512BW-NEXT:    vpmovqd %zmm0, %ymm0
@@ -1403,7 +1403,7 @@ define <8 x i32> @trunc2x4i64_8i32(<4 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc2x4i64_8i32:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpmovqd %ymm0, %xmm0
 ; AVX512BWVL-NEXT:    vpmovqd %ymm1, %xmm1
 ; AVX512BWVL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -1417,7 +1417,7 @@ entry:
 
 define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: trunc2x4i64_8i16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1432,7 +1432,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc2x4i64_8i16:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; SSSE3-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1447,7 +1447,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc2x4i64_8i16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
 ; SSE41-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
@@ -1462,7 +1462,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc2x4i64_8i16:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
@@ -1475,7 +1475,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc2x4i64_8i16:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@@ -1488,7 +1488,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc2x4i64_8i16:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512F-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512F-NEXT:    vpmovqd %zmm0, %ymm0
@@ -1501,7 +1501,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc2x4i64_8i16:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpmovqd %ymm0, %xmm0
 ; AVX512VL-NEXT:    vpmovqd %ymm1, %xmm1
 ; AVX512VL-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
@@ -1515,7 +1515,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc2x4i64_8i16:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512BW-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512BW-NEXT:    vpmovqd %zmm0, %ymm0
@@ -1528,7 +1528,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc2x4i64_8i16:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpmovqd %ymm0, %xmm0
 ; AVX512BWVL-NEXT:    vpmovqd %ymm1, %xmm1
 ; AVX512BWVL-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
@@ -1549,17 +1549,17 @@ entry:
 
 define <4 x i32> @trunc2x2i64_4i32(<2 x i64> %a, <2 x i64> %b) {
 ; SSE-LABEL: trunc2x2i64_4i32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: trunc2x2i64_4i32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc2x2i64_4i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX512-NEXT:    retq
 entry:
@@ -1571,37 +1571,37 @@ entry:
 
 define i64 @trunc2i64_i64(<2 x i64> %inval) {
 ; SSE-LABEL: trunc2i64_i64:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; SSE-NEXT:    movq %xmm0, %rax
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: trunc2i64_i64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; AVX-NEXT:    vmovq %xmm0, %rax
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc2i64_i64:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc2i64_i64:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpmovqd %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX512VL-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc2i64_i64:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; AVX512BW-NEXT:    vmovq %xmm0, %rax
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc2i64_i64:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpmovqd %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX512BWVL-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
 ; AVX512BWVL-NEXT:    retq
@@ -1613,7 +1613,7 @@ entry:
 
 define <8 x i16> @trunc2x4i32_8i16(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: trunc2x4i32_8i16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; SSE2-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -1624,7 +1624,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc2x4i32_8i16:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm1
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm0
@@ -1632,7 +1632,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc2x4i32_8i16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; SSE41-NEXT:    pshufb %xmm2, %xmm1
 ; SSE41-NEXT:    pshufb %xmm2, %xmm0
@@ -1640,7 +1640,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: trunc2x4i32_8i16:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
@@ -1648,7 +1648,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc2x4i32_8i16:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX512F-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX512F-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
@@ -1656,7 +1656,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc2x4i32_8i16:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; AVX512VL-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
 ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -1667,7 +1667,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc2x4i32_8i16:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX512BW-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
@@ -1675,7 +1675,7 @@ define <8 x i16> @trunc2x4i32_8i16(<4 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc2x4i32_8i16:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; AVX512BWVL-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -1694,7 +1694,7 @@ entry:
 ; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
 define i64 @trunc4i32_i64(<4 x i32> %inval) {
 ; SSE2-LABEL: trunc4i32_i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; SSE2-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1702,43 +1702,43 @@ define i64 @trunc4i32_i64(<4 x i32> %inv
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc4i32_i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; SSSE3-NEXT:    movq %xmm0, %rax
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc4i32_i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: trunc4i32_i64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX-NEXT:    vmovq %xmm0, %rax
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc4i32_i64:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc4i32_i64:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX512VL-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc4i32_i64:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX512BW-NEXT:    vmovq %xmm0, %rax
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc4i32_i64:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX512BWVL-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
 ; AVX512BWVL-NEXT:    retq
@@ -1750,7 +1750,7 @@ entry:
 
 define <16 x i8> @trunc2x8i16_16i8(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: trunc2x8i16_16i8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
 ; SSE2-NEXT:    pand %xmm2, %xmm1
 ; SSE2-NEXT:    pand %xmm2, %xmm0
@@ -1758,7 +1758,7 @@ define <16 x i8> @trunc2x8i16_16i8(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc2x8i16_16i8:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm1
 ; SSSE3-NEXT:    pshufb %xmm2, %xmm0
@@ -1766,7 +1766,7 @@ define <16 x i8> @trunc2x8i16_16i8(<8 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc2x8i16_16i8:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; SSE41-NEXT:    pshufb %xmm2, %xmm1
 ; SSE41-NEXT:    pshufb %xmm2, %xmm0
@@ -1774,7 +1774,7 @@ define <16 x i8> @trunc2x8i16_16i8(<8 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: trunc2x8i16_16i8:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
@@ -1782,7 +1782,7 @@ define <16 x i8> @trunc2x8i16_16i8(<8 x
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: trunc2x8i16_16i8:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; AVX512-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX512-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
@@ -1798,50 +1798,50 @@ entry:
 ; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
 define i64 @trunc8i16_i64(<8 x i16> %inval) {
 ; SSE2-LABEL: trunc8i16_i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    packuswb %xmm0, %xmm0
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: trunc8i16_i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; SSSE3-NEXT:    movq %xmm0, %rax
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc8i16_i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: trunc8i16_i64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vmovq %xmm0, %rax
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc8i16_i64:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc8i16_i64:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc8i16_i64:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vmovq %xmm0, %rax
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc8i16_i64:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpmovwb %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX512BWVL-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
 ; AVX512BWVL-NEXT:    retq
@@ -1853,32 +1853,32 @@ entry:
 
 define <16 x i8> @trunc16i64_16i8_const() {
 ; SSE-LABEL: trunc16i64_16i8_const:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: trunc16i64_16i8_const:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc16i64_16i8_const:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc16i64_16i8_const:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc16i64_16i8_const:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc16i64_16i8_const:
-; AVX512BWVL:       # BB#0: # %entry
+; AVX512BWVL:       # %bb.0: # %entry
 ; AVX512BWVL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512BWVL-NEXT:    retq
 
@@ -1890,7 +1890,7 @@ entry:
 
 define void @PR34773(i16* %a0, i8* %a1) {
 ; SSE-LABEL: PR34773:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqu (%rdi), %xmm0
 ; SSE-NEXT:    movdqu 16(%rdi), %xmm1
 ; SSE-NEXT:    movdqu 32(%rdi), %xmm2
@@ -1906,7 +1906,7 @@ define void @PR34773(i16* %a0, i8* %a1)
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: PR34773:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX1-NEXT:    vmovdqu 32(%rdi), %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
@@ -1923,7 +1923,7 @@ define void @PR34773(i16* %a0, i8* %a1)
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: PR34773:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX2-NEXT:    vmovdqu 32(%rdi), %ymm1
 ; AVX2-NEXT:    vpsrlw $8, %ymm0, %ymm0
@@ -1938,7 +1938,7 @@ define void @PR34773(i16* %a0, i8* %a1)
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: PR34773:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqu 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vpsrlw $8, %ymm0, %ymm0
@@ -1953,7 +1953,7 @@ define void @PR34773(i16* %a0, i8* %a1)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: PR34773:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqu 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
@@ -1968,7 +1968,7 @@ define void @PR34773(i16* %a0, i8* %a1)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: PR34773:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX512BW-NEXT:    vmovdqu 32(%rdi), %ymm1
 ; AVX512BW-NEXT:    vpsrlw $8, %ymm0, %ymm0
@@ -1981,7 +1981,7 @@ define void @PR34773(i16* %a0, i8* %a1)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: PR34773:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrlw $8, (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vpsrlw $8, 32(%rdi), %ymm1
 ; AVX512BWVL-NEXT:    vpmovwb %ymm0, (%rsi)

Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll Mon Dec  4 09:18:51 2017
@@ -16,7 +16,7 @@
 
 define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
 ; SSE2-LABEL: testv2i64:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    psubq %xmm0, %xmm2
@@ -41,7 +41,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv2i64:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSE3-NEXT:    psubq %xmm0, %xmm2
@@ -66,7 +66,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv2i64:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    psubq %xmm0, %xmm2
@@ -87,7 +87,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv2i64:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    psubq %xmm0, %xmm2
@@ -108,7 +108,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: testv2i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubq %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -126,7 +126,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv2i64:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -138,7 +138,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv2i64:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubq %xmm0, %xmm1, %xmm2
 ; BITALG_NOVLX-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -156,7 +156,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv2i64:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubq %xmm0, %xmm1, %xmm2
 ; BITALG-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -174,7 +174,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: testv2i64:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pxor %xmm1, %xmm1
 ; X32-SSE-NEXT:    pxor %xmm2, %xmm2
 ; X32-SSE-NEXT:    psubq %xmm0, %xmm2
@@ -198,7 +198,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
 
 define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
 ; SSE2-LABEL: testv2i64u:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    psubq %xmm0, %xmm2
@@ -223,7 +223,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv2i64u:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSE3-NEXT:    psubq %xmm0, %xmm2
@@ -248,7 +248,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv2i64u:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    psubq %xmm0, %xmm2
@@ -269,7 +269,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv2i64u:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    psubq %xmm0, %xmm2
@@ -290,7 +290,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: testv2i64u:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm2
 ; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -308,7 +308,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv2i64u:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsubq %xmm0, %xmm1, %xmm2
 ; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -326,7 +326,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; AVX2-NEXT:    retq
 ;
 ; AVX512CDVL-LABEL: testv2i64u:
-; AVX512CDVL:       # BB#0:
+; AVX512CDVL:       # %bb.0:
 ; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -336,7 +336,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; AVX512CDVL-NEXT:    retq
 ;
 ; AVX512CD-LABEL: testv2i64u:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -347,7 +347,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv2i64u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -359,7 +359,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv2i64u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubq %xmm0, %xmm1, %xmm2
 ; BITALG_NOVLX-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -377,7 +377,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv2i64u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubq %xmm0, %xmm1, %xmm2
 ; BITALG-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -395,7 +395,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: testv2i64u:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pxor %xmm1, %xmm1
 ; X32-SSE-NEXT:    pxor %xmm2, %xmm2
 ; X32-SSE-NEXT:    psubq %xmm0, %xmm2
@@ -419,7 +419,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 
 define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
 ; SSE2-LABEL: testv4i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    psubd %xmm0, %xmm2
@@ -449,7 +449,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv4i32:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSE3-NEXT:    psubd %xmm0, %xmm2
@@ -479,7 +479,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv4i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    psubd %xmm0, %xmm2
@@ -505,7 +505,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv4i32:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    psubd %xmm0, %xmm2
@@ -530,7 +530,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: testv4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
 ; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -552,7 +552,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
 ; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -574,7 +574,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; AVX2-NEXT:    retq
 ;
 ; AVX512CDVL-LABEL: testv4i32:
-; AVX512CDVL:       # BB#0:
+; AVX512CDVL:       # %bb.0:
 ; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
 ; AVX512CDVL-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -596,7 +596,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; AVX512CDVL-NEXT:    retq
 ;
 ; AVX512CD-LABEL: testv4i32:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
 ; AVX512CD-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -618,7 +618,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv4i32:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubd %xmm0, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -630,7 +630,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv4i32:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
 ; BITALG_NOVLX-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -652,7 +652,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv4i32:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
 ; BITALG-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -674,7 +674,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: testv4i32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pxor %xmm1, %xmm1
 ; X32-SSE-NEXT:    pxor %xmm2, %xmm2
 ; X32-SSE-NEXT:    psubd %xmm0, %xmm2
@@ -703,7 +703,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 
 define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
 ; SSE2-LABEL: testv4i32u:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    psubd %xmm0, %xmm2
@@ -733,7 +733,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv4i32u:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSE3-NEXT:    psubd %xmm0, %xmm2
@@ -763,7 +763,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv4i32u:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    psubd %xmm0, %xmm2
@@ -789,7 +789,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv4i32u:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    psubd %xmm0, %xmm2
@@ -814,7 +814,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: testv4i32u:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
 ; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -836,7 +836,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv4i32u:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
 ; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -858,7 +858,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; AVX2-NEXT:    retq
 ;
 ; AVX512CDVL-LABEL: testv4i32u:
-; AVX512CDVL:       # BB#0:
+; AVX512CDVL:       # %bb.0:
 ; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubd %xmm0, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -868,7 +868,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; AVX512CDVL-NEXT:    retq
 ;
 ; AVX512CD-LABEL: testv4i32u:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubd %xmm0, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -879,7 +879,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv4i32u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubd %xmm0, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -891,7 +891,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv4i32u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
 ; BITALG_NOVLX-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -913,7 +913,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv4i32u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubd %xmm0, %xmm1, %xmm2
 ; BITALG-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -935,7 +935,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: testv4i32u:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pxor %xmm1, %xmm1
 ; X32-SSE-NEXT:    pxor %xmm2, %xmm2
 ; X32-SSE-NEXT:    psubd %xmm0, %xmm2
@@ -964,7 +964,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 
 define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
 ; SSE2-LABEL: testv8i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    psubw %xmm0, %xmm1
 ; SSE2-NEXT:    pand %xmm0, %xmm1
@@ -991,7 +991,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv8i16:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSE3-NEXT:    psubw %xmm0, %xmm1
 ; SSE3-NEXT:    pand %xmm0, %xmm1
@@ -1018,7 +1018,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv8i16:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    psubw %xmm0, %xmm1
 ; SSSE3-NEXT:    pand %xmm0, %xmm1
@@ -1041,7 +1041,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv8i16:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    psubw %xmm0, %xmm1
 ; SSE41-NEXT:    pand %xmm0, %xmm1
@@ -1064,7 +1064,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: testv8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
 ; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1084,7 +1084,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv8i16:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1097,7 +1097,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv8i16:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1109,7 +1109,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv8i16:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
 ; BITALG-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1119,7 +1119,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: testv8i16:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pxor %xmm1, %xmm1
 ; X32-SSE-NEXT:    psubw %xmm0, %xmm1
 ; X32-SSE-NEXT:    pand %xmm0, %xmm1
@@ -1146,7 +1146,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 
 define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
 ; SSE2-LABEL: testv8i16u:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    psubw %xmm0, %xmm1
 ; SSE2-NEXT:    pand %xmm0, %xmm1
@@ -1173,7 +1173,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv8i16u:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSE3-NEXT:    psubw %xmm0, %xmm1
 ; SSE3-NEXT:    pand %xmm0, %xmm1
@@ -1200,7 +1200,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv8i16u:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    psubw %xmm0, %xmm1
 ; SSSE3-NEXT:    pand %xmm0, %xmm1
@@ -1223,7 +1223,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv8i16u:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    psubw %xmm0, %xmm1
 ; SSE41-NEXT:    pand %xmm0, %xmm1
@@ -1246,7 +1246,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: testv8i16u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
 ; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1266,7 +1266,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv8i16u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1279,7 +1279,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv8i16u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1291,7 +1291,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv8i16u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubw %xmm0, %xmm1, %xmm1
 ; BITALG-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1301,7 +1301,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: testv8i16u:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pxor %xmm1, %xmm1
 ; X32-SSE-NEXT:    psubw %xmm0, %xmm1
 ; X32-SSE-NEXT:    pand %xmm0, %xmm1
@@ -1328,7 +1328,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 
 define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
 ; SSE2-LABEL: testv16i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    psubb %xmm0, %xmm1
 ; SSE2-NEXT:    pand %xmm0, %xmm1
@@ -1351,7 +1351,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv16i8:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSE3-NEXT:    psubb %xmm0, %xmm1
 ; SSE3-NEXT:    pand %xmm0, %xmm1
@@ -1374,7 +1374,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv16i8:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    psubb %xmm0, %xmm1
 ; SSSE3-NEXT:    pand %xmm0, %xmm1
@@ -1393,7 +1393,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv16i8:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    psubb %xmm0, %xmm1
 ; SSE41-NEXT:    pand %xmm0, %xmm1
@@ -1412,7 +1412,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: testv16i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
 ; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1429,7 +1429,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv16i8:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1442,7 +1442,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv16i8:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1454,7 +1454,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv16i8:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
 ; BITALG-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1464,7 +1464,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: testv16i8:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pxor %xmm1, %xmm1
 ; X32-SSE-NEXT:    psubb %xmm0, %xmm1
 ; X32-SSE-NEXT:    pand %xmm0, %xmm1
@@ -1487,7 +1487,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 
 define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
 ; SSE2-LABEL: testv16i8u:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    psubb %xmm0, %xmm1
 ; SSE2-NEXT:    pand %xmm0, %xmm1
@@ -1510,7 +1510,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: testv16i8u:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSE3-NEXT:    psubb %xmm0, %xmm1
 ; SSE3-NEXT:    pand %xmm0, %xmm1
@@ -1533,7 +1533,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: testv16i8u:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    psubb %xmm0, %xmm1
 ; SSSE3-NEXT:    pand %xmm0, %xmm1
@@ -1552,7 +1552,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: testv16i8u:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    psubb %xmm0, %xmm1
 ; SSE41-NEXT:    pand %xmm0, %xmm1
@@ -1571,7 +1571,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: testv16i8u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
 ; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1588,7 +1588,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv16i8u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1601,7 +1601,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv16i8u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1613,7 +1613,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv16i8u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
 ; BITALG-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -1623,7 +1623,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: testv16i8u:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pxor %xmm1, %xmm1
 ; X32-SSE-NEXT:    psubb %xmm0, %xmm1
 ; X32-SSE-NEXT:    pand %xmm0, %xmm1
@@ -1646,37 +1646,37 @@ define <16 x i8> @testv16i8u(<16 x i8> %
 
 define <2 x i64> @foldv2i64() nounwind {
 ; SSE-LABEL: foldv2i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movl $8, %eax
 ; SSE-NEXT:    movq %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: foldv2i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    movl $8, %eax
 ; AVX-NEXT:    vmovq %rax, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: foldv2i64:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    movl $8, %eax
 ; AVX512VPOPCNTDQ-NEXT:    vmovq %rax, %xmm0
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv2i64:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    movl $8, %eax
 ; BITALG_NOVLX-NEXT:    vmovq %rax, %xmm0
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv2i64:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    movl $8, %eax
 ; BITALG-NEXT:    vmovq %rax, %xmm0
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: foldv2i64:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl $8, %eax
 ; X32-SSE-NEXT:    movd %eax, %xmm0
 ; X32-SSE-NEXT:    retl
@@ -1686,37 +1686,37 @@ define <2 x i64> @foldv2i64() nounwind {
 
 define <2 x i64> @foldv2i64u() nounwind {
 ; SSE-LABEL: foldv2i64u:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movl $8, %eax
 ; SSE-NEXT:    movq %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: foldv2i64u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    movl $8, %eax
 ; AVX-NEXT:    vmovq %rax, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: foldv2i64u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    movl $8, %eax
 ; AVX512VPOPCNTDQ-NEXT:    vmovq %rax, %xmm0
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv2i64u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    movl $8, %eax
 ; BITALG_NOVLX-NEXT:    vmovq %rax, %xmm0
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv2i64u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    movl $8, %eax
 ; BITALG-NEXT:    vmovq %rax, %xmm0
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: foldv2i64u:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl $8, %eax
 ; X32-SSE-NEXT:    movd %eax, %xmm0
 ; X32-SSE-NEXT:    retl
@@ -1726,32 +1726,32 @@ define <2 x i64> @foldv2i64u() nounwind
 
 define <4 x i32> @foldv4i32() nounwind {
 ; SSE-LABEL: foldv4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,32,0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: foldv4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,32,0]
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: foldv4i32:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,32,0]
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv4i32:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,32,0]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv4i32:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,32,0]
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: foldv4i32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,32,0]
 ; X32-SSE-NEXT:    retl
   %out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 0)
@@ -1760,32 +1760,32 @@ define <4 x i32> @foldv4i32() nounwind {
 
 define <4 x i32> @foldv4i32u() nounwind {
 ; SSE-LABEL: foldv4i32u:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,32,0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: foldv4i32u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,32,0]
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: foldv4i32u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,32,0]
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv4i32u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,32,0]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv4i32u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,32,0]
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: foldv4i32u:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,32,0]
 ; X32-SSE-NEXT:    retl
   %out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 -1)
@@ -1794,32 +1794,32 @@ define <4 x i32> @foldv4i32u() nounwind
 
 define <8 x i16> @foldv8i16() nounwind {
 ; SSE-LABEL: foldv8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: foldv8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: foldv8i16:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv8i16:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv8i16:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: foldv8i16:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; X32-SSE-NEXT:    retl
   %out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 0)
@@ -1828,32 +1828,32 @@ define <8 x i16> @foldv8i16() nounwind {
 
 define <8 x i16> @foldv8i16u() nounwind {
 ; SSE-LABEL: foldv8i16u:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: foldv8i16u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: foldv8i16u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv8i16u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv8i16u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: foldv8i16u:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
 ; X32-SSE-NEXT:    retl
   %out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 -1)
@@ -1862,32 +1862,32 @@ define <8 x i16> @foldv8i16u() nounwind
 
 define <16 x i8> @foldv16i8() nounwind {
 ; SSE-LABEL: foldv16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: foldv16i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: foldv16i8:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv16i8:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv16i8:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: foldv16i8:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; X32-SSE-NEXT:    retl
   %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 0)
@@ -1896,32 +1896,32 @@ define <16 x i8> @foldv16i8() nounwind {
 
 define <16 x i8> @foldv16i8u() nounwind {
 ; SSE-LABEL: foldv16i8u:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: foldv16i8u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; AVX-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: foldv16i8u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv16i8u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv16i8u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; BITALG-NEXT:    retq
 ;
 ; X32-SSE-LABEL: foldv16i8u:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
 ; X32-SSE-NEXT:    retl
   %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 -1)

Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll Mon Dec  4 09:18:51 2017
@@ -12,7 +12,7 @@
 
 define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
 ; AVX1-LABEL: testv4i64:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubq %xmm1, %xmm2, %xmm3
@@ -42,7 +42,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv4i64:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -60,7 +60,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ; AVX2-NEXT:    retq
 ;
 ; AVX512CDVL-LABEL: testv4i64:
-; AVX512CDVL:       # BB#0:
+; AVX512CDVL:       # %bb.0:
 ; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; AVX512CDVL-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -78,7 +78,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ; AVX512CDVL-NEXT:    retq
 ;
 ; AVX512CD-LABEL: testv4i64:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; AVX512CD-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -96,7 +96,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv4i64:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubq %ymm0, %ymm1, %ymm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -107,7 +107,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv4i64:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; BITALG_NOVLX-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -125,7 +125,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv4i64:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; BITALG-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -143,7 +143,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: testv4i64:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X32-AVX-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; X32-AVX-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -164,7 +164,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 
 define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
 ; AVX1-LABEL: testv4i64u:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubq %xmm1, %xmm2, %xmm3
@@ -194,7 +194,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv4i64u:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -212,7 +212,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ; AVX2-NEXT:    retq
 ;
 ; AVX512CDVL-LABEL: testv4i64u:
-; AVX512CDVL:       # BB#0:
+; AVX512CDVL:       # %bb.0:
 ; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubq %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -222,7 +222,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ; AVX512CDVL-NEXT:    retq
 ;
 ; AVX512CD-LABEL: testv4i64u:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubq %ymm0, %ymm1, %ymm1
 ; AVX512CD-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -232,7 +232,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv4i64u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubq %ymm0, %ymm1, %ymm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -243,7 +243,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv4i64u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; BITALG_NOVLX-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -261,7 +261,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv4i64u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; BITALG-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -279,7 +279,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: testv4i64u:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X32-AVX-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
 ; X32-AVX-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -300,7 +300,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 
 define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
 ; AVX1-LABEL: testv8i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubd %xmm1, %xmm2, %xmm3
@@ -338,7 +338,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv8i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -360,7 +360,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ; AVX2-NEXT:    retq
 ;
 ; AVX512CDVL-LABEL: testv8i32:
-; AVX512CDVL:       # BB#0:
+; AVX512CDVL:       # %bb.0:
 ; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; AVX512CDVL-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -382,7 +382,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ; AVX512CDVL-NEXT:    retq
 ;
 ; AVX512CD-LABEL: testv8i32:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; AVX512CD-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -404,7 +404,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv8i32:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubd %ymm0, %ymm1, %ymm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -415,7 +415,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv8i32:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; BITALG_NOVLX-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -437,7 +437,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv8i32:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; BITALG-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -459,7 +459,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: testv8i32:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X32-AVX-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; X32-AVX-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -485,7 +485,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 
 define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
 ; AVX1-LABEL: testv8i32u:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubd %xmm1, %xmm2, %xmm3
@@ -523,7 +523,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv8i32u:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -545,7 +545,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ; AVX2-NEXT:    retq
 ;
 ; AVX512CDVL-LABEL: testv8i32u:
-; AVX512CDVL:       # BB#0:
+; AVX512CDVL:       # %bb.0:
 ; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubd %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -555,7 +555,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ; AVX512CDVL-NEXT:    retq
 ;
 ; AVX512CD-LABEL: testv8i32u:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubd %ymm0, %ymm1, %ymm1
 ; AVX512CD-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -565,7 +565,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv8i32u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubd %ymm0, %ymm1, %ymm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -576,7 +576,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv8i32u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; BITALG_NOVLX-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -598,7 +598,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv8i32u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; BITALG-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -620,7 +620,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: testv8i32u:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X32-AVX-NEXT:    vpsubd %ymm0, %ymm1, %ymm2
 ; X32-AVX-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -646,7 +646,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 
 define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
 ; AVX1-LABEL: testv16i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsubw %xmm0, %xmm1, %xmm2
 ; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm2
@@ -680,7 +680,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv16i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -700,7 +700,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 ; AVX2-NEXT:    retq
 ;
 ; AVX512CDVL-LABEL: testv16i16:
-; AVX512CDVL:       # BB#0:
+; AVX512CDVL:       # %bb.0:
 ; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -720,7 +720,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 ; AVX512CDVL-NEXT:    retq
 ;
 ; AVX512CD-LABEL: testv16i16:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX512CD-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -740,7 +740,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv16i16:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -752,7 +752,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv16i16:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; BITALG_NOVLX-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -763,7 +763,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv16i16:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; BITALG-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -773,7 +773,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: testv16i16:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X32-AVX-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; X32-AVX-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -797,7 +797,7 @@ define <16 x i16> @testv16i16(<16 x i16>
 
 define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
 ; AVX1-LABEL: testv16i16u:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsubw %xmm0, %xmm1, %xmm2
 ; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm2
@@ -831,7 +831,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv16i16u:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -851,7 +851,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 ; AVX2-NEXT:    retq
 ;
 ; AVX512CDVL-LABEL: testv16i16u:
-; AVX512CDVL:       # BB#0:
+; AVX512CDVL:       # %bb.0:
 ; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -871,7 +871,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 ; AVX512CDVL-NEXT:    retq
 ;
 ; AVX512CD-LABEL: testv16i16u:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX512CD-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -891,7 +891,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv16i16u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -903,7 +903,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv16i16u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; BITALG_NOVLX-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -914,7 +914,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv16i16u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; BITALG-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -924,7 +924,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: testv16i16u:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X32-AVX-NEXT:    vpsubw %ymm0, %ymm1, %ymm1
 ; X32-AVX-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -948,7 +948,7 @@ define <16 x i16> @testv16i16u(<16 x i16
 
 define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
 ; AVX1-LABEL: testv32i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubb %xmm1, %xmm2, %xmm3
@@ -976,7 +976,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv32i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -993,7 +993,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ; AVX2-NEXT:    retq
 ;
 ; AVX512CDVL-LABEL: testv32i8:
-; AVX512CDVL:       # BB#0:
+; AVX512CDVL:       # %bb.0:
 ; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1010,7 +1010,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ; AVX512CDVL-NEXT:    retq
 ;
 ; AVX512CD-LABEL: testv32i8:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX512CD-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1027,7 +1027,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv32i8:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1044,7 +1044,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv32i8:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; BITALG_NOVLX-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1055,7 +1055,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv32i8:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; BITALG-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1065,7 +1065,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: testv32i8:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X32-AVX-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; X32-AVX-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1086,7 +1086,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 
 define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
 ; AVX1-LABEL: testv32i8u:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubb %xmm1, %xmm2, %xmm3
@@ -1114,7 +1114,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: testv32i8u:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1131,7 +1131,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ; AVX2-NEXT:    retq
 ;
 ; AVX512CDVL-LABEL: testv32i8u:
-; AVX512CDVL:       # BB#0:
+; AVX512CDVL:       # %bb.0:
 ; AVX512CDVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDVL-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX512CDVL-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1148,7 +1148,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ; AVX512CDVL-NEXT:    retq
 ;
 ; AVX512CD-LABEL: testv32i8u:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX512CD-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1165,7 +1165,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv32i8u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1182,7 +1182,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: testv32i8u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG_NOVLX-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; BITALG_NOVLX-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1193,7 +1193,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: testv32i8u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; BITALG-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1203,7 +1203,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: testv32i8u:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X32-AVX-NEXT:    vpsubb %ymm0, %ymm1, %ymm1
 ; X32-AVX-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -1224,22 +1224,22 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 
 define <4 x i64> @foldv4i64() nounwind {
 ; AVX-LABEL: foldv4i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,64,0]
 ; AVX-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv4i64:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,64,0]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv4i64:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,64,0]
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: foldv4i64:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
 ; X32-AVX-NEXT:    retl
   %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
@@ -1248,22 +1248,22 @@ define <4 x i64> @foldv4i64() nounwind {
 
 define <4 x i64> @foldv4i64u() nounwind {
 ; AVX-LABEL: foldv4i64u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,64,0]
 ; AVX-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv4i64u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,64,0]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv4i64u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,64,0]
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: foldv4i64u:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
 ; X32-AVX-NEXT:    retl
   %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
@@ -1272,22 +1272,22 @@ define <4 x i64> @foldv4i64u() nounwind
 
 define <8 x i32> @foldv8i32() nounwind {
 ; AVX-LABEL: foldv8i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
 ; AVX-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv8i32:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv8i32:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: foldv8i32:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
 ; X32-AVX-NEXT:    retl
   %out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
@@ -1296,22 +1296,22 @@ define <8 x i32> @foldv8i32() nounwind {
 
 define <8 x i32> @foldv8i32u() nounwind {
 ; AVX-LABEL: foldv8i32u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
 ; AVX-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv8i32u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv8i32u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: foldv8i32u:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
 ; X32-AVX-NEXT:    retl
   %out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
@@ -1320,22 +1320,22 @@ define <8 x i32> @foldv8i32u() nounwind
 
 define <16 x i16> @foldv16i16() nounwind {
 ; AVX-LABEL: foldv16i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
 ; AVX-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv16i16:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv16i16:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: foldv16i16:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
 ; X32-AVX-NEXT:    retl
   %out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
@@ -1344,22 +1344,22 @@ define <16 x i16> @foldv16i16() nounwind
 
 define <16 x i16> @foldv16i16u() nounwind {
 ; AVX-LABEL: foldv16i16u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
 ; AVX-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv16i16u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv16i16u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: foldv16i16u:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
 ; X32-AVX-NEXT:    retl
   %out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
@@ -1368,22 +1368,22 @@ define <16 x i16> @foldv16i16u() nounwin
 
 define <32 x i8> @foldv32i8() nounwind {
 ; AVX-LABEL: foldv32i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
 ; AVX-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv32i8:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv32i8:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: foldv32i8:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
 ; X32-AVX-NEXT:    retl
   %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
@@ -1392,22 +1392,22 @@ define <32 x i8> @foldv32i8() nounwind {
 
 define <32 x i8> @foldv32i8u() nounwind {
 ; AVX-LABEL: foldv32i8u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
 ; AVX-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: foldv32i8u:
-; BITALG_NOVLX:       # BB#0:
+; BITALG_NOVLX:       # %bb.0:
 ; BITALG_NOVLX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
 ; BITALG_NOVLX-NEXT:    retq
 ;
 ; BITALG-LABEL: foldv32i8u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
 ; BITALG-NEXT:    retq
 ;
 ; X32-AVX-LABEL: foldv32i8u:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
 ; X32-AVX-NEXT:    retl
   %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)

Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
 ; AVX512CD-LABEL: testv8i64:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
 ; AVX512CD-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -35,7 +35,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512CDBW-LABEL: testv8i64:
-; AVX512CDBW:       # BB#0:
+; AVX512CDBW:       # %bb.0:
 ; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubq %zmm0, %zmm1, %zmm2
 ; AVX512CDBW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
@@ -53,7 +53,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 ; AVX512CDBW-NEXT:    retq
 ;
 ; AVX512BW-LABEL: testv8i64:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubq %zmm0, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
@@ -71,7 +71,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv8i64:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -81,7 +81,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG-LABEL: testv8i64:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubq %zmm0, %zmm1, %zmm2
 ; BITALG-NEXT:    vpandq %zmm2, %zmm0, %zmm0
@@ -103,7 +103,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
 
 define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
 ; AVX512CD-LABEL: testv8i64u:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
 ; AVX512CD-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -113,7 +113,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512CDBW-LABEL: testv8i64u:
-; AVX512CDBW:       # BB#0:
+; AVX512CDBW:       # %bb.0:
 ; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -123,7 +123,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %
 ; AVX512CDBW-NEXT:    retq
 ;
 ; AVX512BW-LABEL: testv8i64u:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubq %zmm0, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
@@ -141,7 +141,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv8i64u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -151,7 +151,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG-LABEL: testv8i64u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubq %zmm0, %zmm1, %zmm2
 ; BITALG-NEXT:    vpandq %zmm2, %zmm0, %zmm0
@@ -173,7 +173,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %
 
 define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
 ; AVX512CD-LABEL: testv16i32:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubd %zmm0, %zmm1, %zmm1
 ; AVX512CD-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -209,7 +209,7 @@ define <16 x i32> @testv16i32(<16 x i32>
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512CDBW-LABEL: testv16i32:
-; AVX512CDBW:       # BB#0:
+; AVX512CDBW:       # %bb.0:
 ; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubd %zmm0, %zmm1, %zmm2
 ; AVX512CDBW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
@@ -231,7 +231,7 @@ define <16 x i32> @testv16i32(<16 x i32>
 ; AVX512CDBW-NEXT:    retq
 ;
 ; AVX512BW-LABEL: testv16i32:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubd %zmm0, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
@@ -253,7 +253,7 @@ define <16 x i32> @testv16i32(<16 x i32>
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv16i32:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubd %zmm0, %zmm1, %zmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -263,7 +263,7 @@ define <16 x i32> @testv16i32(<16 x i32>
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG-LABEL: testv16i32:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubd %zmm0, %zmm1, %zmm2
 ; BITALG-NEXT:    vpandq %zmm2, %zmm0, %zmm0
@@ -289,7 +289,7 @@ define <16 x i32> @testv16i32(<16 x i32>
 
 define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
 ; AVX512CD-LABEL: testv16i32u:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CD-NEXT:    vpsubd %zmm0, %zmm1, %zmm1
 ; AVX512CD-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -299,7 +299,7 @@ define <16 x i32> @testv16i32u(<16 x i32
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512CDBW-LABEL: testv16i32u:
-; AVX512CDBW:       # BB#0:
+; AVX512CDBW:       # %bb.0:
 ; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubd %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -309,7 +309,7 @@ define <16 x i32> @testv16i32u(<16 x i32
 ; AVX512CDBW-NEXT:    retq
 ;
 ; AVX512BW-LABEL: testv16i32u:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubd %zmm0, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vpandq %zmm2, %zmm0, %zmm0
@@ -331,7 +331,7 @@ define <16 x i32> @testv16i32u(<16 x i32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv16i32u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpsubd %zmm0, %zmm1, %zmm1
 ; AVX512VPOPCNTDQ-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -341,7 +341,7 @@ define <16 x i32> @testv16i32u(<16 x i32
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG-LABEL: testv16i32u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubd %zmm0, %zmm1, %zmm2
 ; BITALG-NEXT:    vpandq %zmm2, %zmm0, %zmm0
@@ -367,7 +367,7 @@ define <16 x i32> @testv16i32u(<16 x i32
 
 define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
 ; AVX512CD-LABEL: testv32i16:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512CD-NEXT:    vpsubw %ymm0, %ymm2, %ymm3
 ; AVX512CD-NEXT:    vpand %ymm3, %ymm0, %ymm0
@@ -399,7 +399,7 @@ define <32 x i16> @testv32i16(<32 x i16>
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512CDBW-LABEL: testv32i16:
-; AVX512CDBW:       # BB#0:
+; AVX512CDBW:       # %bb.0:
 ; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubw %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -419,7 +419,7 @@ define <32 x i16> @testv32i16(<32 x i16>
 ; AVX512CDBW-NEXT:    retq
 ;
 ; AVX512BW-LABEL: testv32i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubw %zmm0, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -439,7 +439,7 @@ define <32 x i16> @testv32i16(<32 x i16>
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv32i16:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512VPOPCNTDQ-NEXT:    vpsubw %ymm0, %ymm2, %ymm3
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm3, %ymm0, %ymm0
@@ -457,7 +457,7 @@ define <32 x i16> @testv32i16(<32 x i16>
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG-LABEL: testv32i16:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubw %zmm0, %zmm1, %zmm1
 ; BITALG-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -471,7 +471,7 @@ define <32 x i16> @testv32i16(<32 x i16>
 
 define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
 ; AVX512CD-LABEL: testv32i16u:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512CD-NEXT:    vpsubw %ymm0, %ymm2, %ymm3
 ; AVX512CD-NEXT:    vpand %ymm3, %ymm0, %ymm0
@@ -503,7 +503,7 @@ define <32 x i16> @testv32i16u(<32 x i16
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512CDBW-LABEL: testv32i16u:
-; AVX512CDBW:       # BB#0:
+; AVX512CDBW:       # %bb.0:
 ; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubw %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -523,7 +523,7 @@ define <32 x i16> @testv32i16u(<32 x i16
 ; AVX512CDBW-NEXT:    retq
 ;
 ; AVX512BW-LABEL: testv32i16u:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubw %zmm0, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -543,7 +543,7 @@ define <32 x i16> @testv32i16u(<32 x i16
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv32i16u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512VPOPCNTDQ-NEXT:    vpsubw %ymm0, %ymm2, %ymm3
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm3, %ymm0, %ymm0
@@ -561,7 +561,7 @@ define <32 x i16> @testv32i16u(<32 x i16
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG-LABEL: testv32i16u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubw %zmm0, %zmm1, %zmm1
 ; BITALG-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -575,7 +575,7 @@ define <32 x i16> @testv32i16u(<32 x i16
 
 define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
 ; AVX512CD-LABEL: testv64i8:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512CD-NEXT:    vpsubb %ymm0, %ymm2, %ymm3
 ; AVX512CD-NEXT:    vpand %ymm3, %ymm0, %ymm0
@@ -601,7 +601,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512CDBW-LABEL: testv64i8:
-; AVX512CDBW:       # BB#0:
+; AVX512CDBW:       # %bb.0:
 ; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -618,7 +618,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
 ; AVX512CDBW-NEXT:    retq
 ;
 ; AVX512BW-LABEL: testv64i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -635,7 +635,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv64i8:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512VPOPCNTDQ-NEXT:    vpsubb %ymm0, %ymm2, %ymm3
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm3, %ymm0, %ymm0
@@ -661,7 +661,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG-LABEL: testv64i8:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; BITALG-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -675,7 +675,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
 
 define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
 ; AVX512CD-LABEL: testv64i8u:
-; AVX512CD:       # BB#0:
+; AVX512CD:       # %bb.0:
 ; AVX512CD-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512CD-NEXT:    vpsubb %ymm0, %ymm2, %ymm3
 ; AVX512CD-NEXT:    vpand %ymm3, %ymm0, %ymm0
@@ -701,7 +701,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %
 ; AVX512CD-NEXT:    retq
 ;
 ; AVX512CDBW-LABEL: testv64i8u:
-; AVX512CDBW:       # BB#0:
+; AVX512CDBW:       # %bb.0:
 ; AVX512CDBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512CDBW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; AVX512CDBW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -718,7 +718,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %
 ; AVX512CDBW-NEXT:    retq
 ;
 ; AVX512BW-LABEL: testv64i8u:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
@@ -735,7 +735,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512VPOPCNTDQ-LABEL: testv64i8u:
-; AVX512VPOPCNTDQ:       # BB#0:
+; AVX512VPOPCNTDQ:       # %bb.0:
 ; AVX512VPOPCNTDQ-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512VPOPCNTDQ-NEXT:    vpsubb %ymm0, %ymm2, %ymm3
 ; AVX512VPOPCNTDQ-NEXT:    vpand %ymm3, %ymm0, %ymm0
@@ -761,7 +761,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %
 ; AVX512VPOPCNTDQ-NEXT:    retq
 ;
 ; BITALG-LABEL: testv64i8u:
-; BITALG:       # BB#0:
+; BITALG:       # %bb.0:
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsubb %zmm0, %zmm1, %zmm1
 ; BITALG-NEXT:    vpandq %zmm1, %zmm0, %zmm0

Modified: llvm/trunk/test/CodeGen/X86/vector-unsigned-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-unsigned-cmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-unsigned-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-unsigned-cmp.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define <2 x i1> @ugt_v2i64(<2 x i64> %x, <2 x i64> %y) {
 ; SSE-LABEL: ugt_v2i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlq $1, %xmm0
 ; SSE-NEXT:    psrlq $1, %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
@@ -27,7 +27,7 @@ define <2 x i1> @ugt_v2i64(<2 x i64> %x,
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: ugt_v2i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrlq $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
@@ -40,7 +40,7 @@ define <2 x i1> @ugt_v2i64(<2 x i64> %x,
 
 define <2 x i1> @ult_v2i64(<2 x i64> %x, <2 x i64> %y) {
 ; SSE-LABEL: ult_v2i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlq $1, %xmm0
 ; SSE-NEXT:    psrlq $1, %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
@@ -57,7 +57,7 @@ define <2 x i1> @ult_v2i64(<2 x i64> %x,
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: ult_v2i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrlq $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
@@ -70,7 +70,7 @@ define <2 x i1> @ult_v2i64(<2 x i64> %x,
 
 define <2 x i1> @uge_v2i64(<2 x i64> %x, <2 x i64> %y) {
 ; SSE-LABEL: uge_v2i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlq $1, %xmm0
 ; SSE-NEXT:    psrlq $1, %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
@@ -89,7 +89,7 @@ define <2 x i1> @uge_v2i64(<2 x i64> %x,
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: uge_v2i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrlq $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
@@ -104,7 +104,7 @@ define <2 x i1> @uge_v2i64(<2 x i64> %x,
 
 define <2 x i1> @ule_v2i64(<2 x i64> %x, <2 x i64> %y) {
 ; SSE-LABEL: ule_v2i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlq $1, %xmm0
 ; SSE-NEXT:    psrlq $1, %xmm1
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
@@ -123,7 +123,7 @@ define <2 x i1> @ule_v2i64(<2 x i64> %x,
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: ule_v2i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrlq $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
@@ -138,14 +138,14 @@ define <2 x i1> @ule_v2i64(<2 x i64> %x,
 
 define <4 x i1> @ugt_v4i32(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: ugt_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrld $1, %xmm0
 ; SSE-NEXT:    psrld $1, %xmm1
 ; SSE-NEXT:    pcmpgtd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: ugt_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrld $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrld $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
@@ -158,7 +158,7 @@ define <4 x i1> @ugt_v4i32(<4 x i32> %x,
 
 define <4 x i1> @ult_v4i32(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: ult_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrld $1, %xmm0
 ; SSE-NEXT:    psrld $1, %xmm1
 ; SSE-NEXT:    pcmpgtd %xmm0, %xmm1
@@ -166,7 +166,7 @@ define <4 x i1> @ult_v4i32(<4 x i32> %x,
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: ult_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrld $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrld $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
@@ -179,7 +179,7 @@ define <4 x i1> @ult_v4i32(<4 x i32> %x,
 
 define <4 x i1> @uge_v4i32(<4 x i32> %x, <4 x i32> %y) {
 ; SSE2-LABEL: uge_v4i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    psrld $1, %xmm0
 ; SSE2-NEXT:    psrld $1, %xmm1
 ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
@@ -188,7 +188,7 @@ define <4 x i1> @uge_v4i32(<4 x i32> %x,
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: uge_v4i32:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    psrld $1, %xmm0
 ; SSE41-NEXT:    psrld $1, %xmm1
 ; SSE41-NEXT:    pmaxud %xmm0, %xmm1
@@ -196,7 +196,7 @@ define <4 x i1> @uge_v4i32(<4 x i32> %x,
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: uge_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrld $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrld $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
@@ -210,7 +210,7 @@ define <4 x i1> @uge_v4i32(<4 x i32> %x,
 
 define <4 x i1> @ule_v4i32(<4 x i32> %x, <4 x i32> %y) {
 ; SSE2-LABEL: ule_v4i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    psrld $1, %xmm0
 ; SSE2-NEXT:    psrld $1, %xmm1
 ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
@@ -219,7 +219,7 @@ define <4 x i1> @ule_v4i32(<4 x i32> %x,
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: ule_v4i32:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    psrld $1, %xmm0
 ; SSE41-NEXT:    psrld $1, %xmm1
 ; SSE41-NEXT:    pminud %xmm0, %xmm1
@@ -227,7 +227,7 @@ define <4 x i1> @ule_v4i32(<4 x i32> %x,
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: ule_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrld $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrld $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm1
@@ -241,14 +241,14 @@ define <4 x i1> @ule_v4i32(<4 x i32> %x,
 
 define <8 x i1> @ugt_v8i16(<8 x i16> %x, <8 x i16> %y) {
 ; SSE-LABEL: ugt_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlw $1, %xmm0
 ; SSE-NEXT:    psrlw $1, %xmm1
 ; SSE-NEXT:    pcmpgtw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: ugt_v8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrlw $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0
@@ -261,7 +261,7 @@ define <8 x i1> @ugt_v8i16(<8 x i16> %x,
 
 define <8 x i1> @ult_v8i16(<8 x i16> %x, <8 x i16> %y) {
 ; SSE-LABEL: ult_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlw $1, %xmm0
 ; SSE-NEXT:    psrlw $1, %xmm1
 ; SSE-NEXT:    pcmpgtw %xmm0, %xmm1
@@ -269,7 +269,7 @@ define <8 x i1> @ult_v8i16(<8 x i16> %x,
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: ult_v8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrlw $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm0
@@ -282,7 +282,7 @@ define <8 x i1> @ult_v8i16(<8 x i16> %x,
 
 define <8 x i1> @uge_v8i16(<8 x i16> %x, <8 x i16> %y) {
 ; SSE2-LABEL: uge_v8i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    psrlw $1, %xmm0
 ; SSE2-NEXT:    psrlw $1, %xmm1
 ; SSE2-NEXT:    psubusw %xmm0, %xmm1
@@ -291,7 +291,7 @@ define <8 x i1> @uge_v8i16(<8 x i16> %x,
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: uge_v8i16:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    psrlw $1, %xmm0
 ; SSE41-NEXT:    psrlw $1, %xmm1
 ; SSE41-NEXT:    pmaxuw %xmm0, %xmm1
@@ -299,7 +299,7 @@ define <8 x i1> @uge_v8i16(<8 x i16> %x,
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: uge_v8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrlw $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
@@ -313,7 +313,7 @@ define <8 x i1> @uge_v8i16(<8 x i16> %x,
 
 define <8 x i1> @ule_v8i16(<8 x i16> %x, <8 x i16> %y) {
 ; SSE2-LABEL: ule_v8i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    psrlw $1, %xmm0
 ; SSE2-NEXT:    psrlw $1, %xmm1
 ; SSE2-NEXT:    psubusw %xmm1, %xmm0
@@ -322,7 +322,7 @@ define <8 x i1> @ule_v8i16(<8 x i16> %x,
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: ule_v8i16:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    psrlw $1, %xmm0
 ; SSE41-NEXT:    psrlw $1, %xmm1
 ; SSE41-NEXT:    pminuw %xmm0, %xmm1
@@ -330,7 +330,7 @@ define <8 x i1> @ule_v8i16(<8 x i16> %x,
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: ule_v8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrlw $1, %xmm1, %xmm1
 ; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm1
@@ -344,7 +344,7 @@ define <8 x i1> @ule_v8i16(<8 x i16> %x,
 
 define <16 x i1> @ugt_v16i8(<16 x i8> %x, <16 x i8> %y) {
 ; SSE-LABEL: ugt_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlw $1, %xmm0
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; SSE-NEXT:    pand %xmm2, %xmm0
@@ -354,7 +354,7 @@ define <16 x i1> @ugt_v16i8(<16 x i8> %x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: ugt_v16i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -370,7 +370,7 @@ define <16 x i1> @ugt_v16i8(<16 x i8> %x
 
 define <16 x i1> @ult_v16i8(<16 x i8> %x, <16 x i8> %y) {
 ; SSE-LABEL: ult_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlw $1, %xmm0
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; SSE-NEXT:    pand %xmm2, %xmm0
@@ -381,7 +381,7 @@ define <16 x i1> @ult_v16i8(<16 x i8> %x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: ult_v16i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -397,7 +397,7 @@ define <16 x i1> @ult_v16i8(<16 x i8> %x
 
 define <16 x i1> @uge_v16i8(<16 x i8> %x, <16 x i8> %y) {
 ; SSE-LABEL: uge_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlw $1, %xmm0
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; SSE-NEXT:    pand %xmm2, %xmm0
@@ -408,7 +408,7 @@ define <16 x i1> @uge_v16i8(<16 x i8> %x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: uge_v16i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -425,7 +425,7 @@ define <16 x i1> @uge_v16i8(<16 x i8> %x
 
 define <16 x i1> @ule_v16i8(<16 x i8> %x, <16 x i8> %y) {
 ; SSE-LABEL: ule_v16i8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlw $1, %xmm0
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; SSE-NEXT:    pand %xmm2, %xmm0
@@ -436,7 +436,7 @@ define <16 x i1> @ule_v16i8(<16 x i8> %x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: ule_v16i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/vector-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-zext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-zext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-zext.ll Mon Dec  4 09:18:51 2017
@@ -9,24 +9,24 @@
 
 define <8 x i16> @zext_16i8_to_8i16(<16 x i8> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_16i8_to_8i16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_16i8_to_8i16:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_16i8_to_8i16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: zext_16i8_to_8i16:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX-NEXT:    retq
 entry:
@@ -38,7 +38,7 @@ entry:
 ; PR17654
 define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
 ; SSE2-LABEL: zext_16i8_to_16i16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
@@ -46,7 +46,7 @@ define <16 x i16> @zext_16i8_to_16i16(<1
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_16i8_to_16i16:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
@@ -54,7 +54,7 @@ define <16 x i16> @zext_16i8_to_16i16(<1
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_16i8_to_16i16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -62,7 +62,7 @@ define <16 x i16> @zext_16i8_to_16i16(<1
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_16i8_to_16i16:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -70,12 +70,12 @@ define <16 x i16> @zext_16i8_to_16i16(<1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_16i8_to_16i16:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_16i8_to_16i16:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX512-NEXT:    retq
 entry:
@@ -85,7 +85,7 @@ entry:
 
 define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
 ; SSE2-LABEL: zext_32i8_to_32i16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
@@ -97,7 +97,7 @@ define <32 x i16> @zext_32i8_to_32i16(<3
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_32i8_to_32i16:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm1, %xmm3
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pxor %xmm4, %xmm4
@@ -109,7 +109,7 @@ define <32 x i16> @zext_32i8_to_32i16(<3
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_32i8_to_32i16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -121,7 +121,7 @@ define <32 x i16> @zext_32i8_to_32i16(<3
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_32i8_to_32i16:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
@@ -135,7 +135,7 @@ define <32 x i16> @zext_32i8_to_32i16(<3
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_32i8_to_32i16:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -143,7 +143,7 @@ define <32 x i16> @zext_32i8_to_32i16(<3
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: zext_32i8_to_32i16:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -151,7 +151,7 @@ define <32 x i16> @zext_32i8_to_32i16(<3
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: zext_32i8_to_32i16:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
 ; AVX512BW-NEXT:    retq
 entry:
@@ -161,26 +161,26 @@ entry:
 
 define <4 x i32> @zext_16i8_to_4i32(<16 x i8> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_16i8_to_4i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_16i8_to_4i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_16i8_to_4i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: zext_16i8_to_4i32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX-NEXT:    retq
 entry:
@@ -191,7 +191,7 @@ entry:
 
 define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_16i8_to_8i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -201,7 +201,7 @@ define <8 x i32> @zext_16i8_to_8i32(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_16i8_to_8i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -211,7 +211,7 @@ define <8 x i32> @zext_16i8_to_8i32(<16
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_16i8_to_8i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -219,7 +219,7 @@ define <8 x i32> @zext_16i8_to_8i32(<16
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_16i8_to_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -227,12 +227,12 @@ define <8 x i32> @zext_16i8_to_8i32(<16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_16i8_to_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_16i8_to_8i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; AVX512-NEXT:    retq
 entry:
@@ -243,7 +243,7 @@ entry:
 
 define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_16i8_to_16i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    movdqa %xmm3, %xmm1
@@ -258,7 +258,7 @@ define <16 x i32> @zext_16i8_to_16i32(<1
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_16i8_to_16i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm3
 ; SSSE3-NEXT:    pxor %xmm4, %xmm4
 ; SSSE3-NEXT:    movdqa %xmm3, %xmm1
@@ -273,7 +273,7 @@ define <16 x i32> @zext_16i8_to_16i32(<1
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_16i8_to_16i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
@@ -285,7 +285,7 @@ define <16 x i32> @zext_16i8_to_16i32(<1
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_16i8_to_16i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
@@ -299,7 +299,7 @@ define <16 x i32> @zext_16i8_to_16i32(<1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_16i8_to_16i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
@@ -307,7 +307,7 @@ define <16 x i32> @zext_16i8_to_16i32(<1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_16i8_to_16i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
 ; AVX512-NEXT:    retq
 entry:
@@ -317,7 +317,7 @@ entry:
 
 define <2 x i64> @zext_16i8_to_2i64(<16 x i8> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_16i8_to_2i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -325,17 +325,17 @@ define <2 x i64> @zext_16i8_to_2i64(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_16i8_to_2i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_16i8_to_2i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: zext_16i8_to_2i64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX-NEXT:    retq
 entry:
@@ -346,7 +346,7 @@ entry:
 
 define <4 x i64> @zext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_16i8_to_4i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -357,14 +357,14 @@ define <4 x i64> @zext_16i8_to_4i64(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_16i8_to_4i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[2],zero,zero,zero,zero,zero,zero,zero,xmm1[3],zero,zero,zero,zero,zero,zero,zero
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_16i8_to_4i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    psrld $16, %xmm0
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
@@ -372,7 +372,7 @@ define <4 x i64> @zext_16i8_to_4i64(<16
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_16i8_to_4i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
@@ -380,12 +380,12 @@ define <4 x i64> @zext_16i8_to_4i64(<16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_16i8_to_4i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_16i8_to_4i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
 ; AVX512-NEXT:    retq
 entry:
@@ -396,7 +396,7 @@ entry:
 
 define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_16i8_to_8i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
@@ -413,7 +413,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_16i8_to_8i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm4 = [0,128,128,128,128,128,128,128,1,128,128,128,128,128,128,128]
 ; SSSE3-NEXT:    pshufb %xmm4, %xmm0
@@ -426,7 +426,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_16i8_to_8i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 ; SSE41-NEXT:    psrld $16, %xmm1
@@ -439,7 +439,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_16i8_to_8i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vpsrld $16, %xmm0, %xmm2
 ; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
@@ -453,7 +453,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_16i8_to_8i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxbq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; AVX2-NEXT:    vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
@@ -461,7 +461,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_16i8_to_8i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
 ; AVX512-NEXT:    retq
 entry:
@@ -472,24 +472,24 @@ entry:
 
 define <4 x i32> @zext_8i16_to_4i32(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_8i16_to_4i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_8i16_to_4i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_8i16_to_4i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: zext_8i16_to_4i32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX-NEXT:    retq
 entry:
@@ -500,7 +500,7 @@ entry:
 
 define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_8i16_to_8i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -508,7 +508,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_8i16_to_8i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -516,7 +516,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_8i16_to_8i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -524,7 +524,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_8i16_to_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -532,12 +532,12 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_8i16_to_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_8i16_to_8i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX512-NEXT:    retq
 entry:
@@ -547,7 +547,7 @@ entry:
 
 define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_16i16_to_16i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
@@ -559,7 +559,7 @@ define <16 x i32> @zext_16i16_to_16i32(<
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_16i16_to_16i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm1, %xmm3
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pxor %xmm4, %xmm4
@@ -571,7 +571,7 @@ define <16 x i32> @zext_16i16_to_16i32(<
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_16i16_to_16i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -583,7 +583,7 @@ define <16 x i32> @zext_16i16_to_16i32(<
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_16i16_to_16i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
@@ -597,7 +597,7 @@ define <16 x i32> @zext_16i16_to_16i32(<
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_16i16_to_16i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -605,7 +605,7 @@ define <16 x i32> @zext_16i16_to_16i32(<
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_16i16_to_16i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; AVX512-NEXT:    retq
 entry:
@@ -615,26 +615,26 @@ entry:
 
 define <2 x i64> @zext_8i16_to_2i64(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_8i16_to_2i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_8i16_to_2i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_8i16_to_2i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: zext_8i16_to_2i64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX-NEXT:    retq
 entry:
@@ -645,7 +645,7 @@ entry:
 
 define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_8i16_to_4i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -655,7 +655,7 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_8i16_to_4i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -665,7 +665,7 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_8i16_to_4i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
@@ -673,7 +673,7 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_8i16_to_4i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
@@ -681,12 +681,12 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_8i16_to_4i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_8i16_to_4i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX512-NEXT:    retq
 entry:
@@ -697,7 +697,7 @@ entry:
 
 define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_8i16_to_8i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    movdqa %xmm3, %xmm1
@@ -712,7 +712,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_8i16_to_8i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm3
 ; SSSE3-NEXT:    pxor %xmm4, %xmm4
 ; SSSE3-NEXT:    movdqa %xmm3, %xmm1
@@ -727,7 +727,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_8i16_to_8i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
 ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
@@ -739,7 +739,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_8i16_to_8i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
 ; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
@@ -753,7 +753,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_8i16_to_8i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -761,7 +761,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_8i16_to_8i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; AVX512-NEXT:    retq
 entry:
@@ -771,24 +771,24 @@ entry:
 
 define <2 x i64> @zext_4i32_to_2i64(<4 x i32> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_4i32_to_2i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_4i32_to_2i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    xorps %xmm1, %xmm1
 ; SSSE3-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_4i32_to_2i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: zext_4i32_to_2i64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; AVX-NEXT:    retq
 entry:
@@ -799,7 +799,7 @@ entry:
 
 define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_4i32_to_4i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -807,7 +807,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_4i32_to_4i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movaps %xmm0, %xmm1
 ; SSSE3-NEXT:    xorps %xmm2, %xmm2
 ; SSSE3-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -815,7 +815,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_4i32_to_4i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
@@ -823,7 +823,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_4i32_to_4i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -831,12 +831,12 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_4i32_to_4i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_4i32_to_4i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX512-NEXT:    retq
 entry:
@@ -846,7 +846,7 @@ entry:
 
 define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: zext_8i32_to_8i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps %xmm1, %xmm3
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
 ; SSE2-NEXT:    xorps %xmm4, %xmm4
@@ -858,7 +858,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_8i32_to_8i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movaps %xmm1, %xmm3
 ; SSSE3-NEXT:    movaps %xmm0, %xmm1
 ; SSSE3-NEXT:    xorps %xmm4, %xmm4
@@ -870,7 +870,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_8i32_to_8i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
 ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -882,7 +882,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_8i32_to_8i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
@@ -896,7 +896,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_8i32_to_8i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -904,7 +904,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_8i32_to_8i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
 ; AVX512-NEXT:    retq
 entry:
@@ -914,7 +914,7 @@ entry:
 
 define <2 x i64> @load_zext_2i8_to_2i64(<2 x i8> *%ptr) {
 ; SSE2-LABEL: load_zext_2i8_to_2i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movzwl (%rdi), %eax
 ; SSE2-NEXT:    movd %eax, %xmm0
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
@@ -924,19 +924,19 @@ define <2 x i64> @load_zext_2i8_to_2i64(
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_2i8_to_2i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movzwl (%rdi), %eax
 ; SSSE3-NEXT:    movd %eax, %xmm0
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_2i8_to_2i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: load_zext_2i8_to_2i64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX-NEXT:    retq
 entry:
@@ -947,7 +947,7 @@ entry:
 
 define <4 x i32> @load_zext_4i8_to_4i32(<4 x i8> *%ptr) {
 ; SSE2-LABEL: load_zext_4i8_to_4i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -955,7 +955,7 @@ define <4 x i32> @load_zext_4i8_to_4i32(
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_4i8_to_4i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -963,12 +963,12 @@ define <4 x i32> @load_zext_4i8_to_4i32(
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_4i8_to_4i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: load_zext_4i8_to_4i32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX-NEXT:    retq
 entry:
@@ -979,7 +979,7 @@ entry:
 
 define <4 x i64> @load_zext_4i8_to_4i64(<4 x i8> *%ptr) {
 ; SSE2-LABEL: load_zext_4i8_to_4i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -990,7 +990,7 @@ define <4 x i64> @load_zext_4i8_to_4i64(
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_4i8_to_4i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSSE3-NEXT:    movdqa %xmm1, %xmm0
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
@@ -998,25 +998,25 @@ define <4 x i64> @load_zext_4i8_to_4i64(
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_4i8_to_4i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: load_zext_4i8_to_4i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_zext_4i8_to_4i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_zext_4i8_to_4i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
 ; AVX512-NEXT:    retq
 entry:
@@ -1027,26 +1027,26 @@ entry:
 
 define <8 x i16> @load_zext_8i8_to_8i16(<8 x i8> *%ptr) {
 ; SSE2-LABEL: load_zext_8i8_to_8i16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_8i8_to_8i16:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_8i8_to_8i16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: load_zext_8i8_to_8i16:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX-NEXT:    retq
 entry:
@@ -1057,7 +1057,7 @@ entry:
 
 define <8 x i32> @load_zext_8i8_to_8i32(<8 x i8> *%ptr) {
 ; SSE2-LABEL: load_zext_8i8_to_8i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1067,7 +1067,7 @@ define <8 x i32> @load_zext_8i8_to_8i32(
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_8i8_to_8i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1077,25 +1077,25 @@ define <8 x i32> @load_zext_8i8_to_8i32(
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_8i8_to_8i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: load_zext_8i8_to_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_zext_8i8_to_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_zext_8i8_to_8i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; AVX512-NEXT:    retq
 entry:
@@ -1106,7 +1106,7 @@ entry:
 
 define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
 ; SSE2-LABEL: load_zext_16i8_to_8i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa (%rdi), %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1116,7 +1116,7 @@ define <8 x i32> @load_zext_16i8_to_8i32
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_16i8_to_8i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa (%rdi), %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1126,7 +1126,7 @@ define <8 x i32> @load_zext_16i8_to_8i32
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_16i8_to_8i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa (%rdi), %xmm1
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
@@ -1134,7 +1134,7 @@ define <8 x i32> @load_zext_16i8_to_8i32
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: load_zext_16i8_to_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -1143,12 +1143,12 @@ define <8 x i32> @load_zext_16i8_to_8i32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_zext_16i8_to_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_zext_16i8_to_8i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; AVX512-NEXT:    retq
 entry:
@@ -1160,7 +1160,7 @@ entry:
 
 define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
 ; SSE2-LABEL: load_zext_8i8_to_8i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
@@ -1177,7 +1177,7 @@ define <8 x i64> @load_zext_8i8_to_8i64(
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_8i8_to_8i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm4 = [0,128,128,128,128,128,128,128,1,128,128,128,128,128,128,128]
 ; SSSE3-NEXT:    movdqa %xmm1, %xmm0
@@ -1191,7 +1191,7 @@ define <8 x i64> @load_zext_8i8_to_8i64(
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_8i8_to_8i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm2 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
@@ -1199,7 +1199,7 @@ define <8 x i64> @load_zext_8i8_to_8i64(
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: load_zext_8i8_to_8i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1209,13 +1209,13 @@ define <8 x i64> @load_zext_8i8_to_8i64(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_zext_8i8_to_8i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_zext_8i8_to_8i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
 ; AVX512-NEXT:    retq
 entry:
@@ -1226,7 +1226,7 @@ entry:
 
 define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
 ; SSE2-LABEL: load_zext_16i8_to_16i16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa (%rdi), %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    movdqa %xmm1, %xmm0
@@ -1235,7 +1235,7 @@ define <16 x i16> @load_zext_16i8_to_16i
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_16i8_to_16i16:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa (%rdi), %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    movdqa %xmm1, %xmm0
@@ -1244,25 +1244,25 @@ define <16 x i16> @load_zext_16i8_to_16i
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_16i8_to_16i16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: load_zext_16i8_to_16i16:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_zext_16i8_to_16i16:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_zext_16i8_to_16i16:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; AVX512-NEXT:    retq
 entry:
@@ -1273,7 +1273,7 @@ entry:
 
 define <2 x i64> @load_zext_2i16_to_2i64(<2 x i16> *%ptr) {
 ; SSE2-LABEL: load_zext_2i16_to_2i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1281,7 +1281,7 @@ define <2 x i64> @load_zext_2i16_to_2i64
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_2i16_to_2i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1289,12 +1289,12 @@ define <2 x i64> @load_zext_2i16_to_2i64
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_2i16_to_2i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: load_zext_2i16_to_2i64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
 ; AVX-NEXT:    retq
 entry:
@@ -1305,26 +1305,26 @@ entry:
 
 define <4 x i32> @load_zext_4i16_to_4i32(<4 x i16> *%ptr) {
 ; SSE2-LABEL: load_zext_4i16_to_4i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_4i16_to_4i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_4i16_to_4i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: load_zext_4i16_to_4i32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX-NEXT:    retq
 entry:
@@ -1335,7 +1335,7 @@ entry:
 
 define <4 x i64> @load_zext_4i16_to_4i64(<4 x i16> *%ptr) {
 ; SSE2-LABEL: load_zext_4i16_to_4i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -1345,7 +1345,7 @@ define <4 x i64> @load_zext_4i16_to_4i64
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_4i16_to_4i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -1355,25 +1355,25 @@ define <4 x i64> @load_zext_4i16_to_4i64
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_4i16_to_4i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
 ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: load_zext_4i16_to_4i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_zext_4i16_to_4i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_zext_4i16_to_4i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX512-NEXT:    retq
 entry:
@@ -1384,7 +1384,7 @@ entry:
 
 define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
 ; SSE2-LABEL: load_zext_8i16_to_8i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa (%rdi), %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    movdqa %xmm1, %xmm0
@@ -1393,7 +1393,7 @@ define <8 x i32> @load_zext_8i16_to_8i32
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_8i16_to_8i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa (%rdi), %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    movdqa %xmm1, %xmm0
@@ -1402,25 +1402,25 @@ define <8 x i32> @load_zext_8i16_to_8i32
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_8i16_to_8i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: load_zext_8i16_to_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_zext_8i16_to_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_zext_8i16_to_8i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX512-NEXT:    retq
 entry:
@@ -1431,26 +1431,26 @@ entry:
 
 define <2 x i64> @load_zext_2i32_to_2i64(<2 x i32> *%ptr) {
 ; SSE2-LABEL: load_zext_2i32_to_2i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_2i32_to_2i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSSE3-NEXT:    xorps %xmm1, %xmm1
 ; SSSE3-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_2i32_to_2i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: load_zext_2i32_to_2i64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
 ; AVX-NEXT:    retq
 entry:
@@ -1461,7 +1461,7 @@ entry:
 
 define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
 ; SSE2-LABEL: load_zext_4i32_to_4i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm1
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
@@ -1470,7 +1470,7 @@ define <4 x i64> @load_zext_4i32_to_4i64
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: load_zext_4i32_to_4i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movaps (%rdi), %xmm1
 ; SSSE3-NEXT:    xorps %xmm2, %xmm2
 ; SSSE3-NEXT:    movaps %xmm1, %xmm0
@@ -1479,25 +1479,25 @@ define <4 x i64> @load_zext_4i32_to_4i64
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: load_zext_4i32_to_4i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
 ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: load_zext_4i32_to_4i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_zext_4i32_to_4i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_zext_4i32_to_4i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX512-NEXT:    retq
 entry:
@@ -1508,7 +1508,7 @@ entry:
 
 define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
 ; SSE2-LABEL: zext_8i8_to_8i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
@@ -1518,7 +1518,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_8i8_to_8i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pand {{.*}}(%rip), %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
@@ -1528,7 +1528,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_8i8_to_8i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1537,7 +1537,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_8i8_to_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1546,13 +1546,13 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_8i8_to_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_8i8_to_8i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX512-NEXT:    retq
@@ -1563,7 +1563,7 @@ entry:
 
 define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: shuf_zext_8i16_to_8i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1571,7 +1571,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: shuf_zext_8i16_to_8i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1579,7 +1579,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: shuf_zext_8i16_to_8i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
@@ -1587,7 +1587,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: shuf_zext_8i16_to_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1595,12 +1595,12 @@ define <8 x i32> @shuf_zext_8i16_to_8i32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuf_zext_8i16_to_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: shuf_zext_8i16_to_8i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX512-NEXT:    retq
 entry:
@@ -1611,7 +1611,7 @@ entry:
 
 define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: shuf_zext_4i32_to_4i64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -1619,7 +1619,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: shuf_zext_4i32_to_4i64:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movaps %xmm0, %xmm1
 ; SSSE3-NEXT:    xorps %xmm2, %xmm2
 ; SSSE3-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -1627,7 +1627,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: shuf_zext_4i32_to_4i64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
@@ -1635,7 +1635,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: shuf_zext_4i32_to_4i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -1643,12 +1643,12 @@ define <4 x i64> @shuf_zext_4i32_to_4i64
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuf_zext_4i32_to_4i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: shuf_zext_4i32_to_4i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX512-NEXT:    retq
 entry:
@@ -1659,7 +1659,7 @@ entry:
 
 define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
 ; SSE2-LABEL: shuf_zext_8i8_to_8i32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
 ; SSE2-NEXT:    packuswb %xmm1, %xmm1
@@ -1671,7 +1671,7 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: shuf_zext_8i8_to_8i32:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pand {{.*}}(%rip), %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
@@ -1681,7 +1681,7 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: shuf_zext_8i8_to_8i32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -1690,7 +1690,7 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: shuf_zext_8i8_to_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -1699,13 +1699,13 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuf_zext_8i8_to_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: shuf_zext_8i8_to_8i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; AVX512-NEXT:    retq
@@ -1717,7 +1717,7 @@ entry:
 
 define <2 x i64> @shuf_zext_16i8_to_2i64_offset6(<16 x i8> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; SSE2-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -1725,18 +1725,18 @@ define <2 x i64> @shuf_zext_16i8_to_2i64
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    psrlq $48, %xmm0
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpsrlq $48, %xmm0, %xmm0
 ; AVX-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX-NEXT:    retq
@@ -1748,7 +1748,7 @@ entry:
 
 define <4 x i64> @shuf_zext_16i8_to_4i64_offset11(<16 x i8> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    psrldq {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
@@ -1761,14 +1761,14 @@ define <4 x i64> @shuf_zext_16i8_to_4i64
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[11],zero,zero,zero,zero,zero,zero,zero,xmm0[12],zero,zero,zero,zero,zero,zero,zero
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[13],zero,zero,zero,zero,zero,zero,zero,xmm1[14],zero,zero,zero,zero,zero,zero,zero
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 ; SSE41-NEXT:    psrldq {{.*#+}} xmm1 = xmm1[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
@@ -1778,7 +1778,7 @@ define <4 x i64> @shuf_zext_16i8_to_4i64
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -1787,13 +1787,13 @@ define <4 x i64> @shuf_zext_16i8_to_4i64
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512-NEXT:    vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
 ; AVX512-NEXT:    retq
@@ -1805,7 +1805,7 @@ entry:
 
 define <2 x i64> @shuf_zext_8i16_to_2i64_offset6(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1813,18 +1813,18 @@ define <2 x i64> @shuf_zext_8i16_to_2i64
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[6,7],zero,zero,zero,zero,zero,zero,xmm0[8,9],zero,zero,zero,zero,zero,zero
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX-NEXT:    retq
@@ -1836,7 +1836,7 @@ entry:
 
 define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1846,7 +1846,7 @@ define <4 x i64> @shuf_zext_8i16_to_4i64
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1856,7 +1856,7 @@ define <4 x i64> @shuf_zext_8i16_to_4i64
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
 ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1865,7 +1865,7 @@ define <4 x i64> @shuf_zext_8i16_to_4i64
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
 ; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1874,13 +1874,13 @@ define <4 x i64> @shuf_zext_8i16_to_4i64
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,3]
 ; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,3]
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX512-NEXT:    retq
@@ -1892,27 +1892,27 @@ entry:
 
 define <4 x i32> @shuf_zext_8i16_to_4i32_offset1(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
 ; SSSE3-NEXT:    pxor %xmm1, %xmm1
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
 ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
 ; AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX-NEXT:    retq
@@ -1924,7 +1924,7 @@ entry:
 
 define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
@@ -1933,7 +1933,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
@@ -1942,7 +1942,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 ; SSE41-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
@@ -1951,7 +1951,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1960,13 +1960,13 @@ define <8 x i32> @shuf_zext_8i16_to_8i32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX512-NEXT:    retq
@@ -1978,7 +1978,7 @@ entry:
 
 define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    movdqa %xmm1, %xmm0
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1986,7 +1986,7 @@ define <8 x i32> @shuf_zext_16i16_to_8i3
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    movdqa %xmm1, %xmm0
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1994,7 +1994,7 @@ define <8 x i32> @shuf_zext_16i16_to_8i3
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
@@ -2003,7 +2003,7 @@ define <8 x i32> @shuf_zext_16i16_to_8i3
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
@@ -2013,13 +2013,13 @@ define <8 x i32> @shuf_zext_16i16_to_8i3
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX512-NEXT:    retq
@@ -2031,13 +2031,13 @@ entry:
 
 define <2 x i64> @shuf_zext_4i32_to_2i64_offset2(<4 x i32> %A) nounwind uwtable readnone ssp {
 ; SSE-LABEL: shuf_zext_4i32_to_2i64_offset2:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    xorps %xmm1, %xmm1
 ; SSE-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuf_zext_4i32_to_2i64_offset2:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; AVX-NEXT:    retq
@@ -2049,7 +2049,7 @@ entry:
 
 define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [0,0,4294967295,0]
 ; SSE2-NEXT:    pand %xmm1, %xmm0
@@ -2057,7 +2057,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; SSSE3:       # BB#0: # %entry
+; SSSE3:       # %bb.0: # %entry
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm0 = [0,0,4294967295,0]
 ; SSSE3-NEXT:    pand %xmm1, %xmm0
@@ -2065,7 +2065,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 ; SSE41-NEXT:    pxor %xmm0, %xmm0
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
@@ -2073,7 +2073,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3]
 ; AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -2081,13 +2081,13 @@ define <4 x i64> @shuf_zext_4i32_to_4i64
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,3]
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,3]
 ; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX512-NEXT:    retq
@@ -2099,7 +2099,7 @@ entry:
 
 define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
 ; SSE2-LABEL: zext_32i8_to_32i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -2131,7 +2131,7 @@ define <32 x i32> @zext_32i8_to_32i32(<3
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_32i8_to_32i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 ; SSSE3-NEXT:    movdqa %xmm0, %xmm3
 ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -2163,7 +2163,7 @@ define <32 x i32> @zext_32i8_to_32i32(<3
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_32i8_to_32i32:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
 ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
@@ -2190,7 +2190,7 @@ define <32 x i32> @zext_32i8_to_32i32(<3
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: zext_32i8_to_32i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
@@ -2214,7 +2214,7 @@ define <32 x i32> @zext_32i8_to_32i32(<3
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: zext_32i8_to_32i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
@@ -2230,7 +2230,7 @@ define <32 x i32> @zext_32i8_to_32i32(<3
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: zext_32i8_to_32i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
@@ -2242,7 +2242,7 @@ define <32 x i32> @zext_32i8_to_32i32(<3
 
 define <2 x i32> @zext_2i8_to_2i32(<2 x i8>* %addr) {
 ; SSE2-LABEL: zext_2i8_to_2i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movzwl (%rdi), %eax
 ; SSE2-NEXT:    movd %eax, %xmm0
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
@@ -2253,7 +2253,7 @@ define <2 x i32> @zext_2i8_to_2i32(<2 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: zext_2i8_to_2i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movzwl (%rdi), %eax
 ; SSSE3-NEXT:    movd %eax, %xmm0
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -2261,13 +2261,13 @@ define <2 x i32> @zext_2i8_to_2i32(<2 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: zext_2i8_to_2i32:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE41-NEXT:    paddq %xmm0, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: zext_2i8_to_2i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX-NEXT:    vpaddq %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-zmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-zmov.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-zmov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-zmov.ll Mon Dec  4 09:18:51 2017
@@ -7,12 +7,12 @@
 
 define <4 x i32> @load_zmov_4i32_to_0zzz(<4 x i32> *%ptr) {
 ; SSE-LABEL: load_zmov_4i32_to_0zzz:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: load_zmov_4i32_to_0zzz:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 entry:
@@ -23,12 +23,12 @@ entry:
 
 define <2 x i64> @load_zmov_2i64_to_0z(<2 x i64> *%ptr) {
 ; SSE-LABEL: load_zmov_2i64_to_0z:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: load_zmov_2i64_to_0z:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/viabs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/viabs.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/viabs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/viabs.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define <4 x i32> @test_abs_gt_v4i32(<4 x i32> %a) nounwind {
 ; SSE2-LABEL: test_abs_gt_v4i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    psrad $31, %xmm1
 ; SSE2-NEXT:    paddd %xmm1, %xmm0
@@ -16,22 +16,22 @@ define <4 x i32> @test_abs_gt_v4i32(<4 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_gt_v4i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsd %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_gt_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_gt_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsd %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_gt_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1e,0xc0]
 ; AVX512-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <4 x i32> zeroinitializer, %a
@@ -42,7 +42,7 @@ define <4 x i32> @test_abs_gt_v4i32(<4 x
 
 define <4 x i32> @test_abs_ge_v4i32(<4 x i32> %a) nounwind {
 ; SSE2-LABEL: test_abs_ge_v4i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    psrad $31, %xmm1
 ; SSE2-NEXT:    paddd %xmm1, %xmm0
@@ -50,22 +50,22 @@ define <4 x i32> @test_abs_ge_v4i32(<4 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_ge_v4i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsd %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_ge_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_ge_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsd %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_ge_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1e,0xc0]
 ; AVX512-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <4 x i32> zeroinitializer, %a
@@ -76,7 +76,7 @@ define <4 x i32> @test_abs_ge_v4i32(<4 x
 
 define <8 x i16> @test_abs_gt_v8i16(<8 x i16> %a) nounwind {
 ; SSE2-LABEL: test_abs_gt_v8i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    psraw $15, %xmm1
 ; SSE2-NEXT:    paddw %xmm1, %xmm0
@@ -84,27 +84,27 @@ define <8 x i16> @test_abs_gt_v8i16(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_gt_v8i16:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsw %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_gt_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsw %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_gt_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsw %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test_abs_gt_v8i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpabsw %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x1d,0xc0]
 ; AVX512F-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512BW-LABEL: test_abs_gt_v8i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpabsw %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1d,0xc0]
 ; AVX512BW-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <8 x i16> zeroinitializer, %a
@@ -115,7 +115,7 @@ define <8 x i16> @test_abs_gt_v8i16(<8 x
 
 define <16 x i8> @test_abs_lt_v16i8(<16 x i8> %a) nounwind {
 ; SSE2-LABEL: test_abs_lt_v16i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pcmpgtb %xmm0, %xmm1
 ; SSE2-NEXT:    paddb %xmm1, %xmm0
@@ -123,27 +123,27 @@ define <16 x i8> @test_abs_lt_v16i8(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_lt_v16i8:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsb %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_lt_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsb %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_lt_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsb %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test_abs_lt_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpabsb %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x1c,0xc0]
 ; AVX512F-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512BW-LABEL: test_abs_lt_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpabsb %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1c,0xc0]
 ; AVX512BW-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <16 x i8> zeroinitializer, %a
@@ -154,7 +154,7 @@ define <16 x i8> @test_abs_lt_v16i8(<16
 
 define <4 x i32> @test_abs_le_v4i32(<4 x i32> %a) nounwind {
 ; SSE2-LABEL: test_abs_le_v4i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    psrad $31, %xmm1
 ; SSE2-NEXT:    paddd %xmm1, %xmm0
@@ -162,22 +162,22 @@ define <4 x i32> @test_abs_le_v4i32(<4 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_le_v4i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsd %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_le_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_le_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsd %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_le_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1e,0xc0]
 ; AVX512-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <4 x i32> zeroinitializer, %a
@@ -188,7 +188,7 @@ define <4 x i32> @test_abs_le_v4i32(<4 x
 
 define <8 x i32> @test_abs_gt_v8i32(<8 x i32> %a) nounwind {
 ; SSE2-LABEL: test_abs_gt_v8i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    psrad $31, %xmm2
 ; SSE2-NEXT:    paddd %xmm2, %xmm0
@@ -200,13 +200,13 @@ define <8 x i32> @test_abs_gt_v8i32(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_gt_v8i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsd %xmm0, %xmm0
 ; SSSE3-NEXT:    pabsd %xmm1, %xmm1
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_gt_v8i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm0
@@ -214,12 +214,12 @@ define <8 x i32> @test_abs_gt_v8i32(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_gt_v8i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsd %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_gt_v8i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsd %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1e,0xc0]
 ; AVX512-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <8 x i32> zeroinitializer, %a
@@ -230,7 +230,7 @@ define <8 x i32> @test_abs_gt_v8i32(<8 x
 
 define <8 x i32> @test_abs_ge_v8i32(<8 x i32> %a) nounwind {
 ; SSE2-LABEL: test_abs_ge_v8i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    psrad $31, %xmm2
 ; SSE2-NEXT:    paddd %xmm2, %xmm0
@@ -242,13 +242,13 @@ define <8 x i32> @test_abs_ge_v8i32(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_ge_v8i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsd %xmm0, %xmm0
 ; SSSE3-NEXT:    pabsd %xmm1, %xmm1
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_ge_v8i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm0
@@ -256,12 +256,12 @@ define <8 x i32> @test_abs_ge_v8i32(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_ge_v8i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsd %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_ge_v8i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsd %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1e,0xc0]
 ; AVX512-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <8 x i32> zeroinitializer, %a
@@ -272,7 +272,7 @@ define <8 x i32> @test_abs_ge_v8i32(<8 x
 
 define <16 x i16> @test_abs_gt_v16i16(<16 x i16> %a) nounwind {
 ; SSE2-LABEL: test_abs_gt_v16i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    psraw $15, %xmm2
 ; SSE2-NEXT:    paddw %xmm2, %xmm0
@@ -284,13 +284,13 @@ define <16 x i16> @test_abs_gt_v16i16(<1
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_gt_v16i16:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsw %xmm0, %xmm0
 ; SSSE3-NEXT:    pabsw %xmm1, %xmm1
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_gt_v16i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsw %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpabsw %xmm0, %xmm0
@@ -298,17 +298,17 @@ define <16 x i16> @test_abs_gt_v16i16(<1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_gt_v16i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsw %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test_abs_gt_v16i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpabsw %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1d,0xc0]
 ; AVX512F-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512BW-LABEL: test_abs_gt_v16i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpabsw %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1d,0xc0]
 ; AVX512BW-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <16 x i16> zeroinitializer, %a
@@ -319,7 +319,7 @@ define <16 x i16> @test_abs_gt_v16i16(<1
 
 define <32 x i8> @test_abs_lt_v32i8(<32 x i8> %a) nounwind {
 ; SSE2-LABEL: test_abs_lt_v32i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
 ; SSE2-NEXT:    pcmpgtb %xmm0, %xmm3
@@ -331,13 +331,13 @@ define <32 x i8> @test_abs_lt_v32i8(<32
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_lt_v32i8:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsb %xmm0, %xmm0
 ; SSSE3-NEXT:    pabsb %xmm1, %xmm1
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_lt_v32i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsb %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpabsb %xmm0, %xmm0
@@ -345,17 +345,17 @@ define <32 x i8> @test_abs_lt_v32i8(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_lt_v32i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsb %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test_abs_lt_v32i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpabsb %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1c,0xc0]
 ; AVX512F-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512BW-LABEL: test_abs_lt_v32i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpabsb %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1c,0xc0]
 ; AVX512BW-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <32 x i8> zeroinitializer, %a
@@ -366,7 +366,7 @@ define <32 x i8> @test_abs_lt_v32i8(<32
 
 define <8 x i32> @test_abs_le_v8i32(<8 x i32> %a) nounwind {
 ; SSE2-LABEL: test_abs_le_v8i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    psrad $31, %xmm2
 ; SSE2-NEXT:    paddd %xmm2, %xmm0
@@ -378,13 +378,13 @@ define <8 x i32> @test_abs_le_v8i32(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_le_v8i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsd %xmm0, %xmm0
 ; SSSE3-NEXT:    pabsd %xmm1, %xmm1
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_le_v8i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm0
@@ -392,12 +392,12 @@ define <8 x i32> @test_abs_le_v8i32(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_le_v8i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsd %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_le_v8i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsd %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1e,0xc0]
 ; AVX512-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <8 x i32> zeroinitializer, %a
@@ -408,7 +408,7 @@ define <8 x i32> @test_abs_le_v8i32(<8 x
 
 define <16 x i32> @test_abs_le_16i32(<16 x i32> %a) nounwind {
 ; SSE2-LABEL: test_abs_le_16i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa %xmm0, %xmm4
 ; SSE2-NEXT:    psrad $31, %xmm4
 ; SSE2-NEXT:    paddd %xmm4, %xmm0
@@ -428,7 +428,7 @@ define <16 x i32> @test_abs_le_16i32(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_le_16i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsd %xmm0, %xmm0
 ; SSSE3-NEXT:    pabsd %xmm1, %xmm1
 ; SSSE3-NEXT:    pabsd %xmm2, %xmm2
@@ -436,7 +436,7 @@ define <16 x i32> @test_abs_le_16i32(<16
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_le_16i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpabsd %xmm0, %xmm0
@@ -448,13 +448,13 @@ define <16 x i32> @test_abs_le_16i32(<16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_le_16i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsd %ymm0, %ymm0
 ; AVX2-NEXT:    vpabsd %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_le_16i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsd %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0x1e,0xc0]
 ; AVX512-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <16 x i32> zeroinitializer, %a
@@ -465,7 +465,7 @@ define <16 x i32> @test_abs_le_16i32(<16
 
 define <2 x i64> @test_abs_ge_v2i64(<2 x i64> %a) nounwind {
 ; SSE-LABEL: test_abs_ge_v2i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrad $31, %xmm1
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
@@ -474,7 +474,7 @@ define <2 x i64> @test_abs_ge_v2i64(<2 x
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_ge_v2i64:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm1
 ; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
@@ -482,7 +482,7 @@ define <2 x i64> @test_abs_ge_v2i64(<2 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_ge_v2i64:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm1
 ; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
@@ -490,7 +490,7 @@ define <2 x i64> @test_abs_ge_v2i64(<2 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_ge_v2i64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsq %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x1f,0xc0]
 ; AVX512-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <2 x i64> zeroinitializer, %a
@@ -501,7 +501,7 @@ define <2 x i64> @test_abs_ge_v2i64(<2 x
 
 define <4 x i64> @test_abs_gt_v4i64(<4 x i64> %a) nounwind {
 ; SSE-LABEL: test_abs_gt_v4i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
 ; SSE-NEXT:    psrad $31, %xmm2
 ; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
@@ -515,7 +515,7 @@ define <4 x i64> @test_abs_gt_v4i64(<4 x
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_gt_v4i64:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm3
@@ -528,7 +528,7 @@ define <4 x i64> @test_abs_gt_v4i64(<4 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_gt_v4i64:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
@@ -536,7 +536,7 @@ define <4 x i64> @test_abs_gt_v4i64(<4 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_gt_v4i64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsq %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x1f,0xc0]
 ; AVX512-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <4 x i64> zeroinitializer, %a
@@ -547,7 +547,7 @@ define <4 x i64> @test_abs_gt_v4i64(<4 x
 
 define <8 x i64> @test_abs_le_v8i64(<8 x i64> %a) nounwind {
 ; SSE-LABEL: test_abs_le_v8i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm4
 ; SSE-NEXT:    psrad $31, %xmm4
 ; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
@@ -571,7 +571,7 @@ define <8 x i64> @test_abs_le_v8i64(<8 x
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_le_v8i64:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
@@ -592,7 +592,7 @@ define <8 x i64> @test_abs_le_v8i64(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_le_v8i64:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm2, %ymm3
 ; AVX2-NEXT:    vpaddq %ymm3, %ymm0, %ymm0
@@ -603,7 +603,7 @@ define <8 x i64> @test_abs_le_v8i64(<8 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_le_v8i64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsq %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x1f,0xc0]
 ; AVX512-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <8 x i64> zeroinitializer, %a
@@ -614,7 +614,7 @@ define <8 x i64> @test_abs_le_v8i64(<8 x
 
 define <8 x i64> @test_abs_le_v8i64_fold(<8 x i64>* %a.ptr) nounwind {
 ; SSE-LABEL: test_abs_le_v8i64_fold:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqu (%rdi), %xmm0
 ; SSE-NEXT:    movdqu 16(%rdi), %xmm1
 ; SSE-NEXT:    movdqu 32(%rdi), %xmm2
@@ -642,7 +642,7 @@ define <8 x i64> @test_abs_le_v8i64_fold
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_le_v8i64_fold:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX1-NEXT:    vmovdqu 32(%rdi), %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
@@ -665,7 +665,7 @@ define <8 x i64> @test_abs_le_v8i64_fold
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_le_v8i64_fold:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX2-NEXT:    vmovdqu 32(%rdi), %ymm1
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
@@ -678,7 +678,7 @@ define <8 x i64> @test_abs_le_v8i64_fold
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_abs_le_v8i64_fold:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsq (%rdi), %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x1f,0x07]
 ; AVX512-NEXT:    retq # encoding: [0xc3]
   %a = load <8 x i64>, <8 x i64>* %a.ptr, align 8
@@ -690,7 +690,7 @@ define <8 x i64> @test_abs_le_v8i64_fold
 
 define <64 x i8> @test_abs_lt_v64i8(<64 x i8> %a) nounwind {
 ; SSE2-LABEL: test_abs_lt_v64i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    pxor %xmm5, %xmm5
 ; SSE2-NEXT:    pcmpgtb %xmm0, %xmm5
@@ -710,7 +710,7 @@ define <64 x i8> @test_abs_lt_v64i8(<64
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_lt_v64i8:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsb %xmm0, %xmm0
 ; SSSE3-NEXT:    pabsb %xmm1, %xmm1
 ; SSSE3-NEXT:    pabsb %xmm2, %xmm2
@@ -718,7 +718,7 @@ define <64 x i8> @test_abs_lt_v64i8(<64
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_lt_v64i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsb %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpabsb %xmm0, %xmm0
@@ -730,19 +730,19 @@ define <64 x i8> @test_abs_lt_v64i8(<64
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_lt_v64i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsb %ymm0, %ymm0
 ; AVX2-NEXT:    vpabsb %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test_abs_lt_v64i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpabsb %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1c,0xc0]
 ; AVX512F-NEXT:    vpabsb %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x7d,0x1c,0xc9]
 ; AVX512F-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512BW-LABEL: test_abs_lt_v64i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpabsb %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0x1c,0xc0]
 ; AVX512BW-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <64 x i8> zeroinitializer, %a
@@ -753,7 +753,7 @@ define <64 x i8> @test_abs_lt_v64i8(<64
 
 define <32 x i16> @test_abs_gt_v32i16(<32 x i16> %a) nounwind {
 ; SSE2-LABEL: test_abs_gt_v32i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa %xmm0, %xmm4
 ; SSE2-NEXT:    psraw $15, %xmm4
 ; SSE2-NEXT:    paddw %xmm4, %xmm0
@@ -773,7 +773,7 @@ define <32 x i16> @test_abs_gt_v32i16(<3
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: test_abs_gt_v32i16:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pabsw %xmm0, %xmm0
 ; SSSE3-NEXT:    pabsw %xmm1, %xmm1
 ; SSSE3-NEXT:    pabsw %xmm2, %xmm2
@@ -781,7 +781,7 @@ define <32 x i16> @test_abs_gt_v32i16(<3
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: test_abs_gt_v32i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpabsw %xmm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpabsw %xmm0, %xmm0
@@ -793,19 +793,19 @@ define <32 x i16> @test_abs_gt_v32i16(<3
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_abs_gt_v32i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpabsw %ymm0, %ymm0
 ; AVX2-NEXT:    vpabsw %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test_abs_gt_v32i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpabsw %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x1d,0xc0]
 ; AVX512F-NEXT:    vpabsw %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x7d,0x1d,0xc9]
 ; AVX512F-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512BW-LABEL: test_abs_gt_v32i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpabsw %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0x1d,0xc0]
 ; AVX512BW-NEXT:    retq # encoding: [0xc3]
   %tmp1neg = sub <32 x i16> zeroinitializer, %a

Modified: llvm/trunk/test/CodeGen/X86/vmovq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vmovq.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vmovq.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vmovq.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <2 x i64> @PR25554(<2 x i64> %v0, <2 x i64> %v1) {
 ; SSE-LABEL: PR25554:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movl $1, %eax
 ; SSE-NEXT:    movq %rax, %xmm1
 ; SSE-NEXT:    por %xmm1, %xmm0
@@ -13,7 +13,7 @@ define <2 x i64> @PR25554(<2 x i64> %v0,
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: PR25554:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    movl $1, %eax
 ; AVX-NEXT:    vmovq %rax, %xmm1
 ; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 declare i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
 define i16 @test_vpshufbitqmb_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
 ; CHECK-LABEL: test_vpshufbitqmb_128:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpshufbitqmb %xmm1, %xmm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovd %k0, %eax
@@ -17,7 +17,7 @@ define i16 @test_vpshufbitqmb_128(<16 x
 declare i32 @llvm.x86.avx512.mask.vpshufbitqmb.256(<32 x i8> %a, <32 x i8> %b, i32 %mask)
 define i32 @test_vpshufbitqmb_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
 ; CHECK-LABEL: test_vpshufbitqmb_256:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vpshufbitqmb %ymm1, %ymm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovd %k0, %eax
@@ -30,7 +30,7 @@ define i32 @test_vpshufbitqmb_256(<32 x
 declare i64 @llvm.x86.avx512.mask.vpshufbitqmb.512(<64 x i8> %a, <64 x i8> %b, i64 %mask)
 define i64 @test_vpshufbitqmb_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
 ; CHECK-LABEL: test_vpshufbitqmb_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovq %rdi, %k1
 ; CHECK-NEXT:    vpshufbitqmb %zmm1, %zmm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovq %k0, %rax

Modified: llvm/trunk/test/CodeGen/X86/vselect-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect-2.ll Mon Dec  4 09:18:51 2017
@@ -6,23 +6,23 @@
 
 define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) {
 ; SSE2-LABEL: test1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSE2-NEXT:    movapd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test1:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test1:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test1:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
 ; AVX2-NEXT:    retq
   %select = select <4 x i1><i1 true, i1 true, i1 false, i1 false>, <4 x i32> %A, <4 x i32> %B
@@ -31,22 +31,22 @@ define <4 x i32> @test1(<4 x i32> %A, <4
 
 define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
 ; SSE2-LABEL: test2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test2:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test2:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
 ; AVX2-NEXT:    retq
   %select = select <4 x i1><i1 false, i1 false, i1 true, i1 true>, <4 x i32> %A, <4 x i32> %B
@@ -55,18 +55,18 @@ define <4 x i32> @test2(<4 x i32> %A, <4
 
 define <4 x float> @test3(<4 x float> %A, <4 x float> %B) {
 ; SSE2-LABEL: test3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSE2-NEXT:    movapd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test3:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; AVX-NEXT:    retq
   %select = select <4 x i1><i1 true, i1 true, i1 false, i1 false>, <4 x float> %A, <4 x float> %B
@@ -75,17 +75,17 @@ define <4 x float> @test3(<4 x float> %A
 
 define <4 x float> @test4(<4 x float> %A, <4 x float> %B) {
 ; SSE2-LABEL: test4:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test4:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; AVX-NEXT:    retq
   %select = select <4 x i1><i1 false, i1 false, i1 true, i1 true>, <4 x float> %A, <4 x float> %B

Modified: llvm/trunk/test/CodeGen/X86/vselect-avx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect-avx.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect-avx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect-avx.ll Mon Dec  4 09:18:51 2017
@@ -17,7 +17,7 @@ target datalayout = "e-m:o-i64:64-f80:12
 
 define void @test(<4 x i16>* %a, <4 x i16>* %b) {
 ; AVX-LABEL: test:
-; AVX:       ## BB#0: ## %body
+; AVX:       ## %bb.0: ## %body
 ; AVX-NEXT:    movq {{.*}}(%rip), %rax
 ; AVX-NEXT:    movq %rax, (%rdi)
 ; AVX-NEXT:    movq {{.*}}(%rip), %rax
@@ -39,7 +39,7 @@ body:
 
 define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) {
 ; AVX1-LABEL: test2:
-; AVX1:       ## BB#0: ## %bb
+; AVX1:       ## %bb.0: ## %bb
 ; AVX1-NEXT:    vpslld $31, %xmm0, %xmm0
 ; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm1
@@ -54,7 +54,7 @@ define void @test2(double** %call1559, i
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test2:
-; AVX2:       ## BB#0: ## %bb
+; AVX2:       ## %bb.0: ## %bb
 ; AVX2-NEXT:    vpslld $31, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmovsxdq %xmm0, %ymm0
 ; AVX2-NEXT:    movq (%rdi,%rsi,8), %rax
@@ -84,7 +84,7 @@ bb:
 
 define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17,  <4 x i16> %tmp3, <4 x i16> %tmp12) {
 ; AVX1-LABEL: test3:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1431655766,1431655766,1431655766,1431655766]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
@@ -106,7 +106,7 @@ define void @test3(<4 x i32> %induction3
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test3:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1431655766,1431655766,1431655766,1431655766]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
@@ -142,7 +142,7 @@ define void @test3(<4 x i32> %induction3
 
 define <32 x i8> @PR22706(<32 x i1> %x) {
 ; AVX1-LABEL: PR22706:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpsllw $7, %xmm1, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
@@ -159,7 +159,7 @@ define <32 x i8> @PR22706(<32 x i1> %x)
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: PR22706:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsllw $7, %ymm0, %ymm0
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/vselect-constants.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect-constants.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect-constants.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect-constants.ll Mon Dec  4 09:18:51 2017
@@ -15,7 +15,7 @@
 
 define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) {
 ; SSE-LABEL: sel_C1_or_C2_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $31, %xmm0
 ; SSE-NEXT:    psrad $31, %xmm0
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
@@ -25,7 +25,7 @@ define <4 x i32> @sel_C1_or_C2_vec(<4 x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sel_C1_or_C2_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $31, %xmm0, %xmm0
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
 ; AVX-NEXT:    vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
@@ -36,7 +36,7 @@ define <4 x i32> @sel_C1_or_C2_vec(<4 x
 
 define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: cmp_sel_C1_or_C2_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqd %xmm1, %xmm0
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    pandn {{.*}}(%rip), %xmm1
@@ -45,7 +45,7 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: cmp_sel_C1_or_C2_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
 ; AVX-NEXT:    vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
@@ -57,13 +57,13 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<
 
 define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) {
 ; SSE-LABEL: sel_Cplus1_or_C_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sel_Cplus1_or_C_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -73,7 +73,7 @@ define <4 x i32> @sel_Cplus1_or_C_vec(<4
 
 define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: cmp_sel_Cplus1_or_C_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqd %xmm1, %xmm0
 ; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
 ; SSE-NEXT:    psubd %xmm0, %xmm1
@@ -81,7 +81,7 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_ve
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: cmp_sel_Cplus1_or_C_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
 ; AVX-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
@@ -93,14 +93,14 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_ve
 
 define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) {
 ; SSE-LABEL: sel_Cminus1_or_C_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $31, %xmm0
 ; SSE-NEXT:    psrad $31, %xmm0
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sel_Cminus1_or_C_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $31, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; AVX-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
@@ -111,13 +111,13 @@ define <4 x i32> @sel_Cminus1_or_C_vec(<
 
 define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: cmp_sel_Cminus1_or_C_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqd %xmm1, %xmm0
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: cmp_sel_Cminus1_or_C_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -128,13 +128,13 @@ define <4 x i32> @cmp_sel_Cminus1_or_C_v
 
 define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) {
 ; SSE-LABEL: sel_minus1_or_0_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $31, %xmm0
 ; SSE-NEXT:    psrad $31, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sel_minus1_or_0_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $31, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -144,12 +144,12 @@ define <4 x i32> @sel_minus1_or_0_vec(<4
 
 define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: cmp_sel_minus1_or_0_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: cmp_sel_minus1_or_0_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cond = icmp eq <4 x i32> %x, %y
@@ -159,14 +159,14 @@ define <4 x i32> @cmp_sel_minus1_or_0_ve
 
 define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) {
 ; SSE-LABEL: sel_0_or_minus1_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sel_0_or_minus1_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
@@ -177,14 +177,14 @@ define <4 x i32> @sel_0_or_minus1_vec(<4
 
 define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: cmp_sel_0_or_minus1_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqd %xmm1, %xmm0
 ; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; SSE-NEXT:    pxor %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: cmp_sel_0_or_minus1_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
@@ -196,12 +196,12 @@ define <4 x i32> @cmp_sel_0_or_minus1_ve
 
 define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) {
 ; SSE-LABEL: sel_1_or_0_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sel_1_or_0_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %add = select <4 x i1> %cond, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
@@ -210,13 +210,13 @@ define <4 x i32> @sel_1_or_0_vec(<4 x i1
 
 define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: cmp_sel_1_or_0_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqd %xmm1, %xmm0
 ; SSE-NEXT:    psrld $31, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: cmp_sel_1_or_0_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -227,12 +227,12 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4
 
 define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) {
 ; SSE-LABEL: sel_0_or_1_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andnps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sel_0_or_1_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandnps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -241,13 +241,13 @@ define <4 x i32> @sel_0_or_1_vec(<4 x i1
 
 define <4 x i32> @cmp_sel_0_or_1_vec(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: cmp_sel_0_or_1_vec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqd %xmm1, %xmm0
 ; SSE-NEXT:    pandn {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: cmp_sel_0_or_1_vec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpandn {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vselect-minmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect-minmax.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect-minmax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect-minmax.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define <16 x i8> @test1(<16 x i8> %a, <16 x i8> %b) {
 ; SSE2-LABEL: test1:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; SSE2-NEXT:    pcmpgtb %xmm0, %xmm2
 ; SSE2-NEXT:    pand %xmm2, %xmm0
@@ -17,12 +17,12 @@ define <16 x i8> @test1(<16 x i8> %a, <1
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test1:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test1:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -33,7 +33,7 @@ entry:
 
 define <16 x i8> @test2(<16 x i8> %a, <16 x i8> %b) {
 ; SSE2-LABEL: test2:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
 ; SSE2-NEXT:    pcmpeqd %xmm3, %xmm3
@@ -45,12 +45,12 @@ define <16 x i8> @test2(<16 x i8> %a, <1
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test2:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test2:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -61,7 +61,7 @@ entry:
 
 define <16 x i8> @test3(<16 x i8> %a, <16 x i8> %b) {
 ; SSE2-LABEL: test3:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
 ; SSE2-NEXT:    pand %xmm2, %xmm0
@@ -71,12 +71,12 @@ define <16 x i8> @test3(<16 x i8> %a, <1
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test3:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test3:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -87,7 +87,7 @@ entry:
 
 define <16 x i8> @test4(<16 x i8> %a, <16 x i8> %b) {
 ; SSE2-LABEL: test4:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pcmpgtb %xmm0, %xmm3
 ; SSE2-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -99,12 +99,12 @@ define <16 x i8> @test4(<16 x i8> %a, <1
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test4:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test4:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -115,12 +115,12 @@ entry:
 
 define <16 x i8> @test5(<16 x i8> %a, <16 x i8> %b) {
 ; SSE-LABEL: test5:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test5:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -131,12 +131,12 @@ entry:
 
 define <16 x i8> @test6(<16 x i8> %a, <16 x i8> %b) {
 ; SSE-LABEL: test6:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test6:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -147,12 +147,12 @@ entry:
 
 define <16 x i8> @test7(<16 x i8> %a, <16 x i8> %b) {
 ; SSE-LABEL: test7:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test7:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -163,12 +163,12 @@ entry:
 
 define <16 x i8> @test8(<16 x i8> %a, <16 x i8> %b) {
 ; SSE-LABEL: test8:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test8:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -179,12 +179,12 @@ entry:
 
 define <8 x i16> @test9(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test9:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test9:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -195,12 +195,12 @@ entry:
 
 define <8 x i16> @test10(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test10:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test10:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -211,12 +211,12 @@ entry:
 
 define <8 x i16> @test11(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test11:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test11:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -227,12 +227,12 @@ entry:
 
 define <8 x i16> @test12(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test12:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test12:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -243,7 +243,7 @@ entry:
 
 define <8 x i16> @test13(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: test13:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -255,12 +255,12 @@ define <8 x i16> @test13(<8 x i16> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test13:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test13:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -271,7 +271,7 @@ entry:
 
 define <8 x i16> @test14(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: test14:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    psubusw %xmm1, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
@@ -282,12 +282,12 @@ define <8 x i16> @test14(<8 x i16> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test14:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test14:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -298,7 +298,7 @@ entry:
 
 define <8 x i16> @test15(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: test15:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -310,12 +310,12 @@ define <8 x i16> @test15(<8 x i16> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test15:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test15:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -326,7 +326,7 @@ entry:
 
 define <8 x i16> @test16(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: test16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; SSE2-NEXT:    psubusw %xmm0, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
@@ -337,12 +337,12 @@ define <8 x i16> @test16(<8 x i16> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test16:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test16:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -353,7 +353,7 @@ entry:
 
 define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test17:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE2-NEXT:    pand %xmm2, %xmm0
@@ -362,12 +362,12 @@ define <4 x i32> @test17(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test17:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test17:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -378,7 +378,7 @@ entry:
 
 define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test18:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
 ; SSE2-NEXT:    pcmpeqd %xmm3, %xmm3
@@ -390,12 +390,12 @@ define <4 x i32> @test18(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test18:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test18:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -406,7 +406,7 @@ entry:
 
 define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test19:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
 ; SSE2-NEXT:    pand %xmm2, %xmm0
@@ -416,12 +416,12 @@ define <4 x i32> @test19(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test19:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test19:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -432,7 +432,7 @@ entry:
 
 define <4 x i32> @test20(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test20:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE2-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -444,12 +444,12 @@ define <4 x i32> @test20(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test20:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test20:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -460,7 +460,7 @@ entry:
 
 define <4 x i32> @test21(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test21:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -472,12 +472,12 @@ define <4 x i32> @test21(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test21:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test21:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -488,7 +488,7 @@ entry:
 
 define <4 x i32> @test22(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test22:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm2
@@ -503,12 +503,12 @@ define <4 x i32> @test22(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test22:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test22:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -519,7 +519,7 @@ entry:
 
 define <4 x i32> @test23(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test23:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -531,12 +531,12 @@ define <4 x i32> @test23(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test23:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test23:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -547,7 +547,7 @@ entry:
 
 define <4 x i32> @test24(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test24:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm2
@@ -562,12 +562,12 @@ define <4 x i32> @test24(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test24:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test24:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -578,7 +578,7 @@ entry:
 
 define <32 x i8> @test25(<32 x i8> %a, <32 x i8> %b) {
 ; SSE2-LABEL: test25:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm4
 ; SSE2-NEXT:    movdqa %xmm2, %xmm5
@@ -592,13 +592,13 @@ define <32 x i8> @test25(<32 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test25:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm2, %xmm0
 ; SSE4-NEXT:    pminsb %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test25:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm2
@@ -607,12 +607,12 @@ define <32 x i8> @test25(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test25:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test25:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -623,7 +623,7 @@ entry:
 
 define <32 x i8> @test26(<32 x i8> %a, <32 x i8> %b) {
 ; SSE2-LABEL: test26:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm6
 ; SSE2-NEXT:    pcmpgtb %xmm3, %xmm6
 ; SSE2-NEXT:    pcmpeqd %xmm7, %xmm7
@@ -643,13 +643,13 @@ define <32 x i8> @test26(<32 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test26:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm2, %xmm0
 ; SSE4-NEXT:    pminsb %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test26:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm2
@@ -658,12 +658,12 @@ define <32 x i8> @test26(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test26:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test26:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -674,7 +674,7 @@ entry:
 
 define <32 x i8> @test27(<32 x i8> %a, <32 x i8> %b) {
 ; SSE2-LABEL: test27:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pcmpgtb %xmm3, %xmm4
 ; SSE2-NEXT:    movdqa %xmm0, %xmm5
@@ -690,13 +690,13 @@ define <32 x i8> @test27(<32 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test27:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxsb %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test27:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
@@ -705,12 +705,12 @@ define <32 x i8> @test27(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test27:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test27:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -721,7 +721,7 @@ entry:
 
 define <32 x i8> @test28(<32 x i8> %a, <32 x i8> %b) {
 ; SSE2-LABEL: test28:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm6
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm6
 ; SSE2-NEXT:    pcmpeqd %xmm4, %xmm4
@@ -741,13 +741,13 @@ define <32 x i8> @test28(<32 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test28:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxsb %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test28:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
@@ -756,12 +756,12 @@ define <32 x i8> @test28(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test28:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test28:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -772,13 +772,13 @@ entry:
 
 define <32 x i8> @test29(<32 x i8> %a, <32 x i8> %b) {
 ; SSE-LABEL: test29:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm2, %xmm0
 ; SSE-NEXT:    pminub %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test29:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminub %xmm2, %xmm3, %xmm2
@@ -787,12 +787,12 @@ define <32 x i8> @test29(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test29:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test29:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -803,13 +803,13 @@ entry:
 
 define <32 x i8> @test30(<32 x i8> %a, <32 x i8> %b) {
 ; SSE-LABEL: test30:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm2, %xmm0
 ; SSE-NEXT:    pminub %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test30:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminub %xmm2, %xmm3, %xmm2
@@ -818,12 +818,12 @@ define <32 x i8> @test30(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test30:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test30:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -834,13 +834,13 @@ entry:
 
 define <32 x i8> @test31(<32 x i8> %a, <32 x i8> %b) {
 ; SSE-LABEL: test31:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm2, %xmm0
 ; SSE-NEXT:    pmaxub %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test31:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxub %xmm2, %xmm3, %xmm2
@@ -849,12 +849,12 @@ define <32 x i8> @test31(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test31:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test31:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -865,13 +865,13 @@ entry:
 
 define <32 x i8> @test32(<32 x i8> %a, <32 x i8> %b) {
 ; SSE-LABEL: test32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm2, %xmm0
 ; SSE-NEXT:    pmaxub %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxub %xmm2, %xmm3, %xmm2
@@ -880,12 +880,12 @@ define <32 x i8> @test32(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test32:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -896,13 +896,13 @@ entry:
 
 define <16 x i16> @test33(<16 x i16> %a, <16 x i16> %b) {
 ; SSE-LABEL: test33:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm2, %xmm0
 ; SSE-NEXT:    pminsw %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test33:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm2
@@ -911,12 +911,12 @@ define <16 x i16> @test33(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test33:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test33:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -927,13 +927,13 @@ entry:
 
 define <16 x i16> @test34(<16 x i16> %a, <16 x i16> %b) {
 ; SSE-LABEL: test34:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm2, %xmm0
 ; SSE-NEXT:    pminsw %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test34:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm2
@@ -942,12 +942,12 @@ define <16 x i16> @test34(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test34:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test34:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -958,13 +958,13 @@ entry:
 
 define <16 x i16> @test35(<16 x i16> %a, <16 x i16> %b) {
 ; SSE-LABEL: test35:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm2, %xmm0
 ; SSE-NEXT:    pmaxsw %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test35:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
@@ -973,12 +973,12 @@ define <16 x i16> @test35(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test35:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test35:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -989,13 +989,13 @@ entry:
 
 define <16 x i16> @test36(<16 x i16> %a, <16 x i16> %b) {
 ; SSE-LABEL: test36:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm2, %xmm0
 ; SSE-NEXT:    pmaxsw %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test36:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
@@ -1004,12 +1004,12 @@ define <16 x i16> @test36(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test36:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test36:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1020,7 +1020,7 @@ entry:
 
 define <16 x i16> @test37(<16 x i16> %a, <16 x i16> %b) {
 ; SSE2-LABEL: test37:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm5
 ; SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -1040,13 +1040,13 @@ define <16 x i16> @test37(<16 x i16> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test37:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm2, %xmm0
 ; SSE4-NEXT:    pminuw %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test37:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminuw %xmm2, %xmm3, %xmm2
@@ -1055,12 +1055,12 @@ define <16 x i16> @test37(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test37:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test37:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1071,7 +1071,7 @@ entry:
 
 define <16 x i16> @test38(<16 x i16> %a, <16 x i16> %b) {
 ; SSE2-LABEL: test38:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    psubusw %xmm3, %xmm4
 ; SSE2-NEXT:    pxor %xmm6, %xmm6
@@ -1090,13 +1090,13 @@ define <16 x i16> @test38(<16 x i16> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test38:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm2, %xmm0
 ; SSE4-NEXT:    pminuw %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test38:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminuw %xmm2, %xmm3, %xmm2
@@ -1105,12 +1105,12 @@ define <16 x i16> @test38(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test38:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test38:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1121,7 +1121,7 @@ entry:
 
 define <16 x i16> @test39(<16 x i16> %a, <16 x i16> %b) {
 ; SSE2-LABEL: test39:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm6
 ; SSE2-NEXT:    pxor %xmm5, %xmm6
@@ -1142,13 +1142,13 @@ define <16 x i16> @test39(<16 x i16> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test39:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxuw %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test39:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
@@ -1157,12 +1157,12 @@ define <16 x i16> @test39(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test39:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test39:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1173,7 +1173,7 @@ entry:
 
 define <16 x i16> @test40(<16 x i16> %a, <16 x i16> %b) {
 ; SSE2-LABEL: test40:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    psubusw %xmm1, %xmm4
 ; SSE2-NEXT:    pxor %xmm5, %xmm5
@@ -1190,13 +1190,13 @@ define <16 x i16> @test40(<16 x i16> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test40:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxuw %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test40:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
@@ -1205,12 +1205,12 @@ define <16 x i16> @test40(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test40:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test40:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1221,7 +1221,7 @@ entry:
 
 define <8 x i32> @test41(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test41:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm4
 ; SSE2-NEXT:    movdqa %xmm2, %xmm5
@@ -1235,13 +1235,13 @@ define <8 x i32> @test41(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test41:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm2, %xmm0
 ; SSE4-NEXT:    pminsd %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test41:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsd %xmm2, %xmm3, %xmm2
@@ -1250,12 +1250,12 @@ define <8 x i32> @test41(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test41:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test41:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1266,7 +1266,7 @@ entry:
 
 define <8 x i32> @test42(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test42:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm6
 ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm6
 ; SSE2-NEXT:    pcmpeqd %xmm7, %xmm7
@@ -1286,13 +1286,13 @@ define <8 x i32> @test42(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test42:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm2, %xmm0
 ; SSE4-NEXT:    pminsd %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test42:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsd %xmm2, %xmm3, %xmm2
@@ -1301,12 +1301,12 @@ define <8 x i32> @test42(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test42:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test42:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1317,7 +1317,7 @@ entry:
 
 define <8 x i32> @test43(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test43:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE2-NEXT:    movdqa %xmm0, %xmm5
@@ -1333,13 +1333,13 @@ define <8 x i32> @test43(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test43:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxsd %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test43:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
@@ -1348,12 +1348,12 @@ define <8 x i32> @test43(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test43:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test43:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1364,7 +1364,7 @@ entry:
 
 define <8 x i32> @test44(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test44:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm6
 ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm6
 ; SSE2-NEXT:    pcmpeqd %xmm4, %xmm4
@@ -1384,13 +1384,13 @@ define <8 x i32> @test44(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test44:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxsd %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test44:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
@@ -1399,12 +1399,12 @@ define <8 x i32> @test44(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test44:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test44:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1415,7 +1415,7 @@ entry:
 
 define <8 x i32> @test45(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test45:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm5
 ; SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -1435,13 +1435,13 @@ define <8 x i32> @test45(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test45:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm2, %xmm0
 ; SSE4-NEXT:    pminud %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test45:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminud %xmm2, %xmm3, %xmm2
@@ -1450,12 +1450,12 @@ define <8 x i32> @test45(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test45:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test45:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1466,7 +1466,7 @@ entry:
 
 define <8 x i32> @test46(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test46:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pxor %xmm6, %xmm4
@@ -1492,13 +1492,13 @@ define <8 x i32> @test46(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test46:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm2, %xmm0
 ; SSE4-NEXT:    pminud %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test46:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminud %xmm2, %xmm3, %xmm2
@@ -1507,12 +1507,12 @@ define <8 x i32> @test46(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test46:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test46:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1523,7 +1523,7 @@ entry:
 
 define <8 x i32> @test47(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test47:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm6
 ; SSE2-NEXT:    pxor %xmm5, %xmm6
@@ -1544,13 +1544,13 @@ define <8 x i32> @test47(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test47:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxud %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test47:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxud %xmm2, %xmm3, %xmm2
@@ -1559,12 +1559,12 @@ define <8 x i32> @test47(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test47:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test47:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1575,7 +1575,7 @@ entry:
 
 define <8 x i32> @test48(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test48:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pxor %xmm6, %xmm4
@@ -1601,13 +1601,13 @@ define <8 x i32> @test48(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test48:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxud %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test48:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxud %xmm2, %xmm3, %xmm2
@@ -1616,12 +1616,12 @@ define <8 x i32> @test48(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test48:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test48:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -1632,7 +1632,7 @@ entry:
 
 define <16 x i8> @test49(<16 x i8> %a, <16 x i8> %b) {
 ; SSE2-LABEL: test49:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; SSE2-NEXT:    pcmpgtb %xmm0, %xmm2
 ; SSE2-NEXT:    pand %xmm2, %xmm1
@@ -1642,12 +1642,12 @@ define <16 x i8> @test49(<16 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test49:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test49:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1658,7 +1658,7 @@ entry:
 
 define <16 x i8> @test50(<16 x i8> %a, <16 x i8> %b) {
 ; SSE2-LABEL: test50:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
 ; SSE2-NEXT:    pcmpeqd %xmm3, %xmm3
@@ -1670,12 +1670,12 @@ define <16 x i8> @test50(<16 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test50:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test50:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1686,7 +1686,7 @@ entry:
 
 define <16 x i8> @test51(<16 x i8> %a, <16 x i8> %b) {
 ; SSE2-LABEL: test51:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
 ; SSE2-NEXT:    pand %xmm2, %xmm1
@@ -1696,12 +1696,12 @@ define <16 x i8> @test51(<16 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test51:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test51:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1712,7 +1712,7 @@ entry:
 
 define <16 x i8> @test52(<16 x i8> %a, <16 x i8> %b) {
 ; SSE2-LABEL: test52:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pcmpgtb %xmm0, %xmm3
 ; SSE2-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -1724,12 +1724,12 @@ define <16 x i8> @test52(<16 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test52:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test52:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1740,12 +1740,12 @@ entry:
 
 define <16 x i8> @test53(<16 x i8> %a, <16 x i8> %b) {
 ; SSE-LABEL: test53:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test53:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1756,12 +1756,12 @@ entry:
 
 define <16 x i8> @test54(<16 x i8> %a, <16 x i8> %b) {
 ; SSE-LABEL: test54:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test54:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1772,12 +1772,12 @@ entry:
 
 define <16 x i8> @test55(<16 x i8> %a, <16 x i8> %b) {
 ; SSE-LABEL: test55:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test55:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1788,12 +1788,12 @@ entry:
 
 define <16 x i8> @test56(<16 x i8> %a, <16 x i8> %b) {
 ; SSE-LABEL: test56:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test56:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1804,12 +1804,12 @@ entry:
 
 define <8 x i16> @test57(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test57:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test57:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1820,12 +1820,12 @@ entry:
 
 define <8 x i16> @test58(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test58:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test58:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1836,12 +1836,12 @@ entry:
 
 define <8 x i16> @test59(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test59:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test59:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1852,12 +1852,12 @@ entry:
 
 define <8 x i16> @test60(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test60:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test60:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1868,7 +1868,7 @@ entry:
 
 define <8 x i16> @test61(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: test61:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -1881,12 +1881,12 @@ define <8 x i16> @test61(<8 x i16> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test61:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test61:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1897,7 +1897,7 @@ entry:
 
 define <8 x i16> @test62(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: test62:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    psubusw %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
@@ -1909,12 +1909,12 @@ define <8 x i16> @test62(<8 x i16> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test62:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test62:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1925,7 +1925,7 @@ entry:
 
 define <8 x i16> @test63(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: test63:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -1938,12 +1938,12 @@ define <8 x i16> @test63(<8 x i16> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test63:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test63:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1954,7 +1954,7 @@ entry:
 
 define <8 x i16> @test64(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: test64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    psubusw %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
@@ -1966,12 +1966,12 @@ define <8 x i16> @test64(<8 x i16> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test64:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -1982,7 +1982,7 @@ entry:
 
 define <4 x i32> @test65(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test65:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
 ; SSE2-NEXT:    pand %xmm2, %xmm1
@@ -1992,12 +1992,12 @@ define <4 x i32> @test65(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test65:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test65:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -2008,7 +2008,7 @@ entry:
 
 define <4 x i32> @test66(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test66:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
 ; SSE2-NEXT:    pcmpeqd %xmm3, %xmm3
@@ -2020,12 +2020,12 @@ define <4 x i32> @test66(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test66:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test66:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -2036,7 +2036,7 @@ entry:
 
 define <4 x i32> @test67(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test67:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
 ; SSE2-NEXT:    pand %xmm2, %xmm1
@@ -2046,12 +2046,12 @@ define <4 x i32> @test67(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test67:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test67:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -2062,7 +2062,7 @@ entry:
 
 define <4 x i32> @test68(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test68:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pcmpgtd %xmm0, %xmm3
 ; SSE2-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -2074,12 +2074,12 @@ define <4 x i32> @test68(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test68:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test68:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -2090,7 +2090,7 @@ entry:
 
 define <4 x i32> @test69(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test69:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -2103,12 +2103,12 @@ define <4 x i32> @test69(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test69:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test69:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -2119,7 +2119,7 @@ entry:
 
 define <4 x i32> @test70(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test70:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm2
@@ -2134,12 +2134,12 @@ define <4 x i32> @test70(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test70:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test70:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -2150,7 +2150,7 @@ entry:
 
 define <4 x i32> @test71(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test71:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -2163,12 +2163,12 @@ define <4 x i32> @test71(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test71:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test71:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -2179,7 +2179,7 @@ entry:
 
 define <4 x i32> @test72(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test72:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm2
@@ -2194,12 +2194,12 @@ define <4 x i32> @test72(<4 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test72:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm1, %xmm0
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: test72:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -2210,7 +2210,7 @@ entry:
 
 define <32 x i8> @test73(<32 x i8> %a, <32 x i8> %b) {
 ; SSE2-LABEL: test73:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm4
 ; SSE2-NEXT:    movdqa %xmm2, %xmm5
@@ -2226,13 +2226,13 @@ define <32 x i8> @test73(<32 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test73:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxsb %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test73:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
@@ -2241,12 +2241,12 @@ define <32 x i8> @test73(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test73:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test73:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2257,7 +2257,7 @@ entry:
 
 define <32 x i8> @test74(<32 x i8> %a, <32 x i8> %b) {
 ; SSE2-LABEL: test74:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm6
 ; SSE2-NEXT:    pcmpgtb %xmm3, %xmm6
 ; SSE2-NEXT:    pcmpeqd %xmm7, %xmm7
@@ -2277,13 +2277,13 @@ define <32 x i8> @test74(<32 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test74:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxsb %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test74:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
@@ -2292,12 +2292,12 @@ define <32 x i8> @test74(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test74:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test74:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2308,7 +2308,7 @@ entry:
 
 define <32 x i8> @test75(<32 x i8> %a, <32 x i8> %b) {
 ; SSE2-LABEL: test75:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pcmpgtb %xmm3, %xmm4
 ; SSE2-NEXT:    movdqa %xmm0, %xmm5
@@ -2324,13 +2324,13 @@ define <32 x i8> @test75(<32 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test75:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm2, %xmm0
 ; SSE4-NEXT:    pminsb %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test75:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm2
@@ -2339,12 +2339,12 @@ define <32 x i8> @test75(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test75:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test75:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2355,7 +2355,7 @@ entry:
 
 define <32 x i8> @test76(<32 x i8> %a, <32 x i8> %b) {
 ; SSE2-LABEL: test76:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm6
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm6
 ; SSE2-NEXT:    pcmpeqd %xmm4, %xmm4
@@ -2375,13 +2375,13 @@ define <32 x i8> @test76(<32 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test76:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm2, %xmm0
 ; SSE4-NEXT:    pminsb %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test76:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm2
@@ -2390,12 +2390,12 @@ define <32 x i8> @test76(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test76:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test76:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2406,13 +2406,13 @@ entry:
 
 define <32 x i8> @test77(<32 x i8> %a, <32 x i8> %b) {
 ; SSE-LABEL: test77:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm2, %xmm0
 ; SSE-NEXT:    pmaxub %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test77:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxub %xmm2, %xmm3, %xmm2
@@ -2421,12 +2421,12 @@ define <32 x i8> @test77(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test77:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test77:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2437,13 +2437,13 @@ entry:
 
 define <32 x i8> @test78(<32 x i8> %a, <32 x i8> %b) {
 ; SSE-LABEL: test78:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm2, %xmm0
 ; SSE-NEXT:    pmaxub %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test78:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxub %xmm2, %xmm3, %xmm2
@@ -2452,12 +2452,12 @@ define <32 x i8> @test78(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test78:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test78:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2468,13 +2468,13 @@ entry:
 
 define <32 x i8> @test79(<32 x i8> %a, <32 x i8> %b) {
 ; SSE-LABEL: test79:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm2, %xmm0
 ; SSE-NEXT:    pminub %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test79:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminub %xmm2, %xmm3, %xmm2
@@ -2483,12 +2483,12 @@ define <32 x i8> @test79(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test79:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test79:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2499,13 +2499,13 @@ entry:
 
 define <32 x i8> @test80(<32 x i8> %a, <32 x i8> %b) {
 ; SSE-LABEL: test80:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm2, %xmm0
 ; SSE-NEXT:    pminub %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test80:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminub %xmm2, %xmm3, %xmm2
@@ -2514,12 +2514,12 @@ define <32 x i8> @test80(<32 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test80:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test80:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2530,13 +2530,13 @@ entry:
 
 define <16 x i16> @test81(<16 x i16> %a, <16 x i16> %b) {
 ; SSE-LABEL: test81:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm2, %xmm0
 ; SSE-NEXT:    pmaxsw %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test81:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
@@ -2545,12 +2545,12 @@ define <16 x i16> @test81(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test81:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test81:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2561,13 +2561,13 @@ entry:
 
 define <16 x i16> @test82(<16 x i16> %a, <16 x i16> %b) {
 ; SSE-LABEL: test82:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm2, %xmm0
 ; SSE-NEXT:    pmaxsw %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test82:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
@@ -2576,12 +2576,12 @@ define <16 x i16> @test82(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test82:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test82:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2592,13 +2592,13 @@ entry:
 
 define <16 x i16> @test83(<16 x i16> %a, <16 x i16> %b) {
 ; SSE-LABEL: test83:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm2, %xmm0
 ; SSE-NEXT:    pminsw %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test83:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm2
@@ -2607,12 +2607,12 @@ define <16 x i16> @test83(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test83:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test83:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2623,13 +2623,13 @@ entry:
 
 define <16 x i16> @test84(<16 x i16> %a, <16 x i16> %b) {
 ; SSE-LABEL: test84:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm2, %xmm0
 ; SSE-NEXT:    pminsw %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test84:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm2
@@ -2638,12 +2638,12 @@ define <16 x i16> @test84(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test84:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test84:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2654,7 +2654,7 @@ entry:
 
 define <16 x i16> @test85(<16 x i16> %a, <16 x i16> %b) {
 ; SSE2-LABEL: test85:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm6
 ; SSE2-NEXT:    pxor %xmm4, %xmm6
@@ -2676,13 +2676,13 @@ define <16 x i16> @test85(<16 x i16> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test85:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxuw %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test85:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
@@ -2691,12 +2691,12 @@ define <16 x i16> @test85(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test85:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test85:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2707,7 +2707,7 @@ entry:
 
 define <16 x i16> @test86(<16 x i16> %a, <16 x i16> %b) {
 ; SSE2-LABEL: test86:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    psubusw %xmm3, %xmm4
 ; SSE2-NEXT:    pxor %xmm6, %xmm6
@@ -2726,13 +2726,13 @@ define <16 x i16> @test86(<16 x i16> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test86:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxuw %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test86:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
@@ -2741,12 +2741,12 @@ define <16 x i16> @test86(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test86:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test86:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2757,7 +2757,7 @@ entry:
 
 define <16 x i16> @test87(<16 x i16> %a, <16 x i16> %b) {
 ; SSE2-LABEL: test87:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm6
 ; SSE2-NEXT:    pxor %xmm4, %xmm6
@@ -2779,13 +2779,13 @@ define <16 x i16> @test87(<16 x i16> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test87:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm2, %xmm0
 ; SSE4-NEXT:    pminuw %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test87:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminuw %xmm2, %xmm3, %xmm2
@@ -2794,12 +2794,12 @@ define <16 x i16> @test87(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test87:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test87:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2810,7 +2810,7 @@ entry:
 
 define <16 x i16> @test88(<16 x i16> %a, <16 x i16> %b) {
 ; SSE2-LABEL: test88:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    psubusw %xmm1, %xmm4
 ; SSE2-NEXT:    pxor %xmm6, %xmm6
@@ -2829,13 +2829,13 @@ define <16 x i16> @test88(<16 x i16> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test88:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm2, %xmm0
 ; SSE4-NEXT:    pminuw %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test88:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminuw %xmm2, %xmm3, %xmm2
@@ -2844,12 +2844,12 @@ define <16 x i16> @test88(<16 x i16> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test88:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test88:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2860,7 +2860,7 @@ entry:
 
 define <8 x i32> @test89(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test89:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm4
 ; SSE2-NEXT:    movdqa %xmm2, %xmm5
@@ -2876,13 +2876,13 @@ define <8 x i32> @test89(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test89:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxsd %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test89:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
@@ -2891,12 +2891,12 @@ define <8 x i32> @test89(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test89:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test89:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2907,7 +2907,7 @@ entry:
 
 define <8 x i32> @test90(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test90:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm6
 ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm6
 ; SSE2-NEXT:    pcmpeqd %xmm7, %xmm7
@@ -2927,13 +2927,13 @@ define <8 x i32> @test90(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test90:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxsd %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test90:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
@@ -2942,12 +2942,12 @@ define <8 x i32> @test90(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test90:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test90:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -2958,7 +2958,7 @@ entry:
 
 define <8 x i32> @test91(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test91:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE2-NEXT:    movdqa %xmm0, %xmm5
@@ -2974,13 +2974,13 @@ define <8 x i32> @test91(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test91:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm2, %xmm0
 ; SSE4-NEXT:    pminsd %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test91:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsd %xmm2, %xmm3, %xmm2
@@ -2989,12 +2989,12 @@ define <8 x i32> @test91(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test91:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test91:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -3005,7 +3005,7 @@ entry:
 
 define <8 x i32> @test92(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test92:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm6
 ; SSE2-NEXT:    pcmpgtd %xmm1, %xmm6
 ; SSE2-NEXT:    pcmpeqd %xmm4, %xmm4
@@ -3025,13 +3025,13 @@ define <8 x i32> @test92(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test92:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm2, %xmm0
 ; SSE4-NEXT:    pminsd %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test92:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminsd %xmm2, %xmm3, %xmm2
@@ -3040,12 +3040,12 @@ define <8 x i32> @test92(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test92:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test92:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -3056,7 +3056,7 @@ entry:
 
 define <8 x i32> @test93(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test93:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm6
 ; SSE2-NEXT:    pxor %xmm4, %xmm6
@@ -3078,13 +3078,13 @@ define <8 x i32> @test93(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test93:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxud %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test93:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxud %xmm2, %xmm3, %xmm2
@@ -3093,12 +3093,12 @@ define <8 x i32> @test93(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test93:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test93:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -3109,7 +3109,7 @@ entry:
 
 define <8 x i32> @test94(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test94:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pxor %xmm6, %xmm4
@@ -3135,13 +3135,13 @@ define <8 x i32> @test94(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test94:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm2, %xmm0
 ; SSE4-NEXT:    pmaxud %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test94:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpmaxud %xmm2, %xmm3, %xmm2
@@ -3150,12 +3150,12 @@ define <8 x i32> @test94(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test94:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test94:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -3166,7 +3166,7 @@ entry:
 
 define <8 x i32> @test95(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test95:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm6
 ; SSE2-NEXT:    pxor %xmm4, %xmm6
@@ -3188,13 +3188,13 @@ define <8 x i32> @test95(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test95:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm2, %xmm0
 ; SSE4-NEXT:    pminud %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test95:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminud %xmm2, %xmm3, %xmm2
@@ -3203,12 +3203,12 @@ define <8 x i32> @test95(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test95:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test95:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -3219,7 +3219,7 @@ entry:
 
 define <8 x i32> @test96(<8 x i32> %a, <8 x i32> %b) {
 ; SSE2-LABEL: test96:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pxor %xmm6, %xmm4
@@ -3245,13 +3245,13 @@ define <8 x i32> @test96(<8 x i32> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test96:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm2, %xmm0
 ; SSE4-NEXT:    pminud %xmm3, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test96:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpminud %xmm2, %xmm3, %xmm2
@@ -3260,12 +3260,12 @@ define <8 x i32> @test96(<8 x i32> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test96:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test96:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -3278,7 +3278,7 @@ entry:
 
 define <64 x i8> @test97(<64 x i8> %a, <64 x i8> %b) {
 ; SSE2-LABEL: test97:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm7, %xmm8
 ; SSE2-NEXT:    pcmpgtb %xmm3, %xmm8
 ; SSE2-NEXT:    movdqa %xmm6, %xmm9
@@ -3302,7 +3302,7 @@ define <64 x i8> @test97(<64 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test97:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm4, %xmm0
 ; SSE4-NEXT:    pminsb %xmm5, %xmm1
 ; SSE4-NEXT:    pminsb %xmm6, %xmm2
@@ -3310,7 +3310,7 @@ define <64 x i8> @test97(<64 x i8> %a, <
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test97:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsb %xmm4, %xmm5, %xmm4
@@ -3324,13 +3324,13 @@ define <64 x i8> @test97(<64 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test97:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsb %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test97:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsb %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3341,7 +3341,7 @@ entry:
 
 define <64 x i8> @test98(<64 x i8> %a, <64 x i8> %b) {
 ; SSE2-LABEL: test98:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    movdqa %xmm2, %xmm9
 ; SSE2-NEXT:    movdqa %xmm8, %xmm12
@@ -3377,7 +3377,7 @@ define <64 x i8> @test98(<64 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test98:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm4, %xmm0
 ; SSE4-NEXT:    pminsb %xmm5, %xmm1
 ; SSE4-NEXT:    pminsb %xmm6, %xmm2
@@ -3385,7 +3385,7 @@ define <64 x i8> @test98(<64 x i8> %a, <
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test98:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsb %xmm4, %xmm5, %xmm4
@@ -3399,13 +3399,13 @@ define <64 x i8> @test98(<64 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test98:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsb %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test98:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsb %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3416,7 +3416,7 @@ entry:
 
 define <64 x i8> @test99(<64 x i8> %a, <64 x i8> %b) {
 ; SSE2-LABEL: test99:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    pcmpgtb %xmm7, %xmm3
 ; SSE2-NEXT:    movdqa %xmm2, %xmm9
@@ -3443,7 +3443,7 @@ define <64 x i8> @test99(<64 x i8> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test99:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxsb %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxsb %xmm6, %xmm2
@@ -3451,7 +3451,7 @@ define <64 x i8> @test99(<64 x i8> %a, <
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test99:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsb %xmm4, %xmm5, %xmm4
@@ -3465,13 +3465,13 @@ define <64 x i8> @test99(<64 x i8> %a, <
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test99:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test99:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsb %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3482,7 +3482,7 @@ entry:
 
 define <64 x i8> @test100(<64 x i8> %a, <64 x i8> %b) {
 ; SSE2-LABEL: test100:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    movdqa %xmm2, %xmm9
 ; SSE2-NEXT:    movdqa %xmm0, %xmm10
@@ -3518,7 +3518,7 @@ define <64 x i8> @test100(<64 x i8> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test100:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxsb %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxsb %xmm6, %xmm2
@@ -3526,7 +3526,7 @@ define <64 x i8> @test100(<64 x i8> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test100:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsb %xmm4, %xmm5, %xmm4
@@ -3540,13 +3540,13 @@ define <64 x i8> @test100(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test100:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test100:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsb %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3557,7 +3557,7 @@ entry:
 
 define <64 x i8> @test101(<64 x i8> %a, <64 x i8> %b) {
 ; SSE-LABEL: test101:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm4, %xmm0
 ; SSE-NEXT:    pminub %xmm5, %xmm1
 ; SSE-NEXT:    pminub %xmm6, %xmm2
@@ -3565,7 +3565,7 @@ define <64 x i8> @test101(<64 x i8> %a,
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test101:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminub %xmm4, %xmm5, %xmm4
@@ -3579,13 +3579,13 @@ define <64 x i8> @test101(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test101:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminub %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminub %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test101:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminub %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3596,7 +3596,7 @@ entry:
 
 define <64 x i8> @test102(<64 x i8> %a, <64 x i8> %b) {
 ; SSE-LABEL: test102:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm4, %xmm0
 ; SSE-NEXT:    pminub %xmm5, %xmm1
 ; SSE-NEXT:    pminub %xmm6, %xmm2
@@ -3604,7 +3604,7 @@ define <64 x i8> @test102(<64 x i8> %a,
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test102:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminub %xmm4, %xmm5, %xmm4
@@ -3618,13 +3618,13 @@ define <64 x i8> @test102(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test102:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminub %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminub %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test102:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminub %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3635,7 +3635,7 @@ entry:
 
 define <64 x i8> @test103(<64 x i8> %a, <64 x i8> %b) {
 ; SSE-LABEL: test103:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm4, %xmm0
 ; SSE-NEXT:    pmaxub %xmm5, %xmm1
 ; SSE-NEXT:    pmaxub %xmm6, %xmm2
@@ -3643,7 +3643,7 @@ define <64 x i8> @test103(<64 x i8> %a,
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test103:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxub %xmm4, %xmm5, %xmm4
@@ -3657,13 +3657,13 @@ define <64 x i8> @test103(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test103:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxub %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxub %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test103:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxub %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3674,7 +3674,7 @@ entry:
 
 define <64 x i8> @test104(<64 x i8> %a, <64 x i8> %b) {
 ; SSE-LABEL: test104:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm4, %xmm0
 ; SSE-NEXT:    pmaxub %xmm5, %xmm1
 ; SSE-NEXT:    pmaxub %xmm6, %xmm2
@@ -3682,7 +3682,7 @@ define <64 x i8> @test104(<64 x i8> %a,
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test104:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxub %xmm4, %xmm5, %xmm4
@@ -3696,13 +3696,13 @@ define <64 x i8> @test104(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test104:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxub %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxub %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test104:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxub %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3713,7 +3713,7 @@ entry:
 
 define <32 x i16> @test105(<32 x i16> %a, <32 x i16> %b) {
 ; SSE-LABEL: test105:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm4, %xmm0
 ; SSE-NEXT:    pminsw %xmm5, %xmm1
 ; SSE-NEXT:    pminsw %xmm6, %xmm2
@@ -3721,7 +3721,7 @@ define <32 x i16> @test105(<32 x i16> %a
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test105:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsw %xmm4, %xmm5, %xmm4
@@ -3735,13 +3735,13 @@ define <32 x i16> @test105(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test105:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test105:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3752,7 +3752,7 @@ entry:
 
 define <32 x i16> @test106(<32 x i16> %a, <32 x i16> %b) {
 ; SSE-LABEL: test106:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm4, %xmm0
 ; SSE-NEXT:    pminsw %xmm5, %xmm1
 ; SSE-NEXT:    pminsw %xmm6, %xmm2
@@ -3760,7 +3760,7 @@ define <32 x i16> @test106(<32 x i16> %a
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test106:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsw %xmm4, %xmm5, %xmm4
@@ -3774,13 +3774,13 @@ define <32 x i16> @test106(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test106:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test106:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3791,7 +3791,7 @@ entry:
 
 define <32 x i16> @test107(<32 x i16> %a, <32 x i16> %b) {
 ; SSE-LABEL: test107:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm4, %xmm0
 ; SSE-NEXT:    pmaxsw %xmm5, %xmm1
 ; SSE-NEXT:    pmaxsw %xmm6, %xmm2
@@ -3799,7 +3799,7 @@ define <32 x i16> @test107(<32 x i16> %a
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test107:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsw %xmm4, %xmm5, %xmm4
@@ -3813,13 +3813,13 @@ define <32 x i16> @test107(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test107:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test107:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3830,7 +3830,7 @@ entry:
 
 define <32 x i16> @test108(<32 x i16> %a, <32 x i16> %b) {
 ; SSE-LABEL: test108:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm4, %xmm0
 ; SSE-NEXT:    pmaxsw %xmm5, %xmm1
 ; SSE-NEXT:    pmaxsw %xmm6, %xmm2
@@ -3838,7 +3838,7 @@ define <32 x i16> @test108(<32 x i16> %a
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test108:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsw %xmm4, %xmm5, %xmm4
@@ -3852,13 +3852,13 @@ define <32 x i16> @test108(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test108:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test108:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3869,7 +3869,7 @@ entry:
 
 define <32 x i16> @test109(<32 x i16> %a, <32 x i16> %b) {
 ; SSE2-LABEL: test109:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm10 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm9
 ; SSE2-NEXT:    pxor %xmm10, %xmm9
@@ -3905,7 +3905,7 @@ define <32 x i16> @test109(<32 x i16> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test109:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm4, %xmm0
 ; SSE4-NEXT:    pminuw %xmm5, %xmm1
 ; SSE4-NEXT:    pminuw %xmm6, %xmm2
@@ -3913,7 +3913,7 @@ define <32 x i16> @test109(<32 x i16> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test109:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminuw %xmm4, %xmm5, %xmm4
@@ -3927,13 +3927,13 @@ define <32 x i16> @test109(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test109:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminuw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminuw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test109:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -3944,7 +3944,7 @@ entry:
 
 define <32 x i16> @test110(<32 x i16> %a, <32 x i16> %b) {
 ; SSE2-LABEL: test110:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    movdqa %xmm2, %xmm9
 ; SSE2-NEXT:    movdqa %xmm1, %xmm10
@@ -3974,7 +3974,7 @@ define <32 x i16> @test110(<32 x i16> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test110:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm4, %xmm0
 ; SSE4-NEXT:    pminuw %xmm5, %xmm1
 ; SSE4-NEXT:    pminuw %xmm6, %xmm2
@@ -3982,7 +3982,7 @@ define <32 x i16> @test110(<32 x i16> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test110:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminuw %xmm4, %xmm5, %xmm4
@@ -3996,13 +3996,13 @@ define <32 x i16> @test110(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test110:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminuw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminuw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test110:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -4013,7 +4013,7 @@ entry:
 
 define <32 x i16> @test111(<32 x i16> %a, <32 x i16> %b) {
 ; SSE2-LABEL: test111:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm11 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm7, %xmm9
 ; SSE2-NEXT:    pxor %xmm11, %xmm9
@@ -4052,7 +4052,7 @@ define <32 x i16> @test111(<32 x i16> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test111:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxuw %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxuw %xmm6, %xmm2
@@ -4060,7 +4060,7 @@ define <32 x i16> @test111(<32 x i16> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test111:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxuw %xmm4, %xmm5, %xmm4
@@ -4074,13 +4074,13 @@ define <32 x i16> @test111(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test111:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxuw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxuw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test111:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -4091,7 +4091,7 @@ entry:
 
 define <32 x i16> @test112(<32 x i16> %a, <32 x i16> %b) {
 ; SSE2-LABEL: test112:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm7, %xmm8
 ; SSE2-NEXT:    psubusw %xmm3, %xmm8
 ; SSE2-NEXT:    pxor %xmm9, %xmm9
@@ -4120,7 +4120,7 @@ define <32 x i16> @test112(<32 x i16> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test112:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxuw %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxuw %xmm6, %xmm2
@@ -4128,7 +4128,7 @@ define <32 x i16> @test112(<32 x i16> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test112:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxuw %xmm4, %xmm5, %xmm4
@@ -4142,13 +4142,13 @@ define <32 x i16> @test112(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test112:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxuw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxuw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test112:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -4159,7 +4159,7 @@ entry:
 
 define <16 x i32> @test113(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test113:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm7, %xmm8
 ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm8
 ; SSE2-NEXT:    movdqa %xmm6, %xmm9
@@ -4183,7 +4183,7 @@ define <16 x i32> @test113(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test113:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm4, %xmm0
 ; SSE4-NEXT:    pminsd %xmm5, %xmm1
 ; SSE4-NEXT:    pminsd %xmm6, %xmm2
@@ -4191,7 +4191,7 @@ define <16 x i32> @test113(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test113:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsd %xmm4, %xmm5, %xmm4
@@ -4205,13 +4205,13 @@ define <16 x i32> @test113(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test113:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test113:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsd %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -4222,7 +4222,7 @@ entry:
 
 define <16 x i32> @test114(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test114:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    movdqa %xmm2, %xmm9
 ; SSE2-NEXT:    movdqa %xmm8, %xmm12
@@ -4258,7 +4258,7 @@ define <16 x i32> @test114(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test114:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm4, %xmm0
 ; SSE4-NEXT:    pminsd %xmm5, %xmm1
 ; SSE4-NEXT:    pminsd %xmm6, %xmm2
@@ -4266,7 +4266,7 @@ define <16 x i32> @test114(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test114:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsd %xmm4, %xmm5, %xmm4
@@ -4280,13 +4280,13 @@ define <16 x i32> @test114(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test114:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test114:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsd %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -4297,7 +4297,7 @@ entry:
 
 define <16 x i32> @test115(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test115:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    pcmpgtd %xmm7, %xmm3
 ; SSE2-NEXT:    movdqa %xmm2, %xmm9
@@ -4324,7 +4324,7 @@ define <16 x i32> @test115(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test115:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxsd %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxsd %xmm6, %xmm2
@@ -4332,7 +4332,7 @@ define <16 x i32> @test115(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test115:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsd %xmm4, %xmm5, %xmm4
@@ -4346,13 +4346,13 @@ define <16 x i32> @test115(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test115:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test115:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsd %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -4363,7 +4363,7 @@ entry:
 
 define <16 x i32> @test116(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test116:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    movdqa %xmm2, %xmm9
 ; SSE2-NEXT:    movdqa %xmm0, %xmm10
@@ -4399,7 +4399,7 @@ define <16 x i32> @test116(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test116:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxsd %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxsd %xmm6, %xmm2
@@ -4407,7 +4407,7 @@ define <16 x i32> @test116(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test116:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsd %xmm4, %xmm5, %xmm4
@@ -4421,13 +4421,13 @@ define <16 x i32> @test116(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test116:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test116:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsd %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -4438,7 +4438,7 @@ entry:
 
 define <16 x i32> @test117(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test117:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm10 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm9
 ; SSE2-NEXT:    pxor %xmm10, %xmm9
@@ -4474,7 +4474,7 @@ define <16 x i32> @test117(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test117:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm4, %xmm0
 ; SSE4-NEXT:    pminud %xmm5, %xmm1
 ; SSE4-NEXT:    pminud %xmm6, %xmm2
@@ -4482,7 +4482,7 @@ define <16 x i32> @test117(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test117:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminud %xmm4, %xmm5, %xmm4
@@ -4496,13 +4496,13 @@ define <16 x i32> @test117(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test117:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminud %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminud %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test117:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminud %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -4513,7 +4513,7 @@ entry:
 
 define <16 x i32> @test118(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test118:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm10
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm14 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm7, %xmm0
@@ -4561,7 +4561,7 @@ define <16 x i32> @test118(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test118:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm4, %xmm0
 ; SSE4-NEXT:    pminud %xmm5, %xmm1
 ; SSE4-NEXT:    pminud %xmm6, %xmm2
@@ -4569,7 +4569,7 @@ define <16 x i32> @test118(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test118:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminud %xmm4, %xmm5, %xmm4
@@ -4583,13 +4583,13 @@ define <16 x i32> @test118(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test118:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminud %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminud %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test118:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminud %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -4600,7 +4600,7 @@ entry:
 
 define <16 x i32> @test119(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test119:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm11 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm7, %xmm9
 ; SSE2-NEXT:    pxor %xmm11, %xmm9
@@ -4639,7 +4639,7 @@ define <16 x i32> @test119(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test119:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxud %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxud %xmm6, %xmm2
@@ -4647,7 +4647,7 @@ define <16 x i32> @test119(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test119:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxud %xmm4, %xmm5, %xmm4
@@ -4661,13 +4661,13 @@ define <16 x i32> @test119(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test119:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxud %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxud %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test119:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxud %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -4678,7 +4678,7 @@ entry:
 
 define <16 x i32> @test120(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test120:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm10
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm14 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm0
@@ -4726,7 +4726,7 @@ define <16 x i32> @test120(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test120:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxud %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxud %xmm6, %xmm2
@@ -4734,7 +4734,7 @@ define <16 x i32> @test120(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test120:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxud %xmm4, %xmm5, %xmm4
@@ -4748,13 +4748,13 @@ define <16 x i32> @test120(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test120:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxud %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxud %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test120:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxud %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -4765,7 +4765,7 @@ entry:
 
 define <8 x i64> @test121(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test121:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm9 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    pxor %xmm9, %xmm8
@@ -4829,7 +4829,7 @@ define <8 x i64> @test121(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test121:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa %xmm7, %xmm9
 ; SSE4-NEXT:    pcmpgtq %xmm3, %xmm9
@@ -4853,7 +4853,7 @@ define <8 x i64> @test121(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test121:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -4869,7 +4869,7 @@ define <8 x i64> @test121(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test121:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm3, %ymm4
 ; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm2, %ymm5
 ; AVX2-NEXT:    vblendvpd %ymm5, %ymm0, %ymm2, %ymm0
@@ -4877,7 +4877,7 @@ define <8 x i64> @test121(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test121:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -4888,7 +4888,7 @@ entry:
 
 define <8 x i64> @test122(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test122:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm7, %xmm8
 ; SSE2-NEXT:    movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
 ; SSE2-NEXT:    movdqa %xmm3, %xmm7
@@ -4969,7 +4969,7 @@ define <8 x i64> @test122(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test122:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa %xmm3, %xmm9
 ; SSE4-NEXT:    pcmpgtq %xmm7, %xmm9
@@ -4997,7 +4997,7 @@ define <8 x i64> @test122(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test122:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -5018,7 +5018,7 @@ define <8 x i64> @test122(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test122:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm1, %ymm4
 ; AVX2-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
 ; AVX2-NEXT:    vpxor %ymm5, %ymm4, %ymm4
@@ -5029,7 +5029,7 @@ define <8 x i64> @test122(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test122:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -5040,7 +5040,7 @@ entry:
 
 define <8 x i64> @test123(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test123:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm9 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm7, %xmm8
 ; SSE2-NEXT:    pxor %xmm9, %xmm8
@@ -5104,7 +5104,7 @@ define <8 x i64> @test123(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test123:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa %xmm3, %xmm9
 ; SSE4-NEXT:    pcmpgtq %xmm7, %xmm9
@@ -5127,7 +5127,7 @@ define <8 x i64> @test123(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test123:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -5143,7 +5143,7 @@ define <8 x i64> @test123(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test123:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm1, %ymm4
 ; AVX2-NEXT:    vpcmpgtq %ymm2, %ymm0, %ymm5
 ; AVX2-NEXT:    vblendvpd %ymm5, %ymm0, %ymm2, %ymm0
@@ -5151,7 +5151,7 @@ define <8 x i64> @test123(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test123:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -5162,7 +5162,7 @@ entry:
 
 define <8 x i64> @test124(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test124:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm7, %xmm11
 ; SSE2-NEXT:    movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
 ; SSE2-NEXT:    movdqa %xmm3, %xmm7
@@ -5244,7 +5244,7 @@ define <8 x i64> @test124(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test124:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa %xmm7, %xmm9
 ; SSE4-NEXT:    pcmpgtq %xmm3, %xmm9
@@ -5273,7 +5273,7 @@ define <8 x i64> @test124(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test124:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -5294,7 +5294,7 @@ define <8 x i64> @test124(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test124:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm3, %ymm4
 ; AVX2-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
 ; AVX2-NEXT:    vpxor %ymm5, %ymm4, %ymm4
@@ -5305,7 +5305,7 @@ define <8 x i64> @test124(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test124:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -5316,7 +5316,7 @@ entry:
 
 define <8 x i64> @test125(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test125:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    pxor %xmm9, %xmm8
@@ -5380,7 +5380,7 @@ define <8 x i64> @test125(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test125:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm3, %xmm10
@@ -5416,7 +5416,7 @@ define <8 x i64> @test125(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test125:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
@@ -5441,7 +5441,7 @@ define <8 x i64> @test125(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test125:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm4
 ; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm5
 ; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm6
@@ -5454,7 +5454,7 @@ define <8 x i64> @test125(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test125:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminuq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -5465,7 +5465,7 @@ entry:
 
 define <8 x i64> @test126(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test126:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm7, %xmm8
 ; SSE2-NEXT:    movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
 ; SSE2-NEXT:    movdqa %xmm3, %xmm7
@@ -5546,7 +5546,7 @@ define <8 x i64> @test126(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test126:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm9
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm7, %xmm10
@@ -5587,7 +5587,7 @@ define <8 x i64> @test126(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test126:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
@@ -5617,7 +5617,7 @@ define <8 x i64> @test126(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test126:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm4
 ; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm5
 ; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm6
@@ -5633,7 +5633,7 @@ define <8 x i64> @test126(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test126:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminuq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -5644,7 +5644,7 @@ entry:
 
 define <8 x i64> @test127(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test127:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm7, %xmm8
 ; SSE2-NEXT:    pxor %xmm9, %xmm8
@@ -5708,7 +5708,7 @@ define <8 x i64> @test127(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test127:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm7, %xmm10
@@ -5744,7 +5744,7 @@ define <8 x i64> @test127(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test127:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
@@ -5769,7 +5769,7 @@ define <8 x i64> @test127(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test127:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm4
 ; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm5
 ; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm6
@@ -5782,7 +5782,7 @@ define <8 x i64> @test127(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test127:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -5793,7 +5793,7 @@ entry:
 
 define <8 x i64> @test128(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test128:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm7, %xmm11
 ; SSE2-NEXT:    movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
 ; SSE2-NEXT:    movdqa %xmm3, %xmm7
@@ -5875,7 +5875,7 @@ define <8 x i64> @test128(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test128:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm9
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm3, %xmm10
@@ -5916,7 +5916,7 @@ define <8 x i64> @test128(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test128:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
@@ -5946,7 +5946,7 @@ define <8 x i64> @test128(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test128:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm4
 ; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm5
 ; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm6
@@ -5962,7 +5962,7 @@ define <8 x i64> @test128(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test128:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -5973,7 +5973,7 @@ entry:
 
 define <64 x i8> @test129(<64 x i8> %a, <64 x i8> %b) {
 ; SSE2-LABEL: test129:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    movdqa %xmm7, %xmm3
 ; SSE2-NEXT:    pcmpgtb %xmm8, %xmm3
@@ -6001,7 +6001,7 @@ define <64 x i8> @test129(<64 x i8> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test129:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxsb %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxsb %xmm6, %xmm2
@@ -6009,7 +6009,7 @@ define <64 x i8> @test129(<64 x i8> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test129:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsb %xmm4, %xmm5, %xmm4
@@ -6023,13 +6023,13 @@ define <64 x i8> @test129(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test129:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test129:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsb %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6040,7 +6040,7 @@ entry:
 
 define <64 x i8> @test130(<64 x i8> %a, <64 x i8> %b) {
 ; SSE2-LABEL: test130:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm2, %xmm8
 ; SSE2-NEXT:    movdqa %xmm3, %xmm12
 ; SSE2-NEXT:    pcmpgtb %xmm7, %xmm12
@@ -6076,7 +6076,7 @@ define <64 x i8> @test130(<64 x i8> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test130:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsb %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxsb %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxsb %xmm6, %xmm2
@@ -6084,7 +6084,7 @@ define <64 x i8> @test130(<64 x i8> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test130:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsb %xmm4, %xmm5, %xmm4
@@ -6098,13 +6098,13 @@ define <64 x i8> @test130(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test130:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test130:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsb %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6115,7 +6115,7 @@ entry:
 
 define <64 x i8> @test131(<64 x i8> %a, <64 x i8> %b) {
 ; SSE2-LABEL: test131:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    pcmpgtb %xmm7, %xmm3
 ; SSE2-NEXT:    movdqa %xmm2, %xmm9
@@ -6142,7 +6142,7 @@ define <64 x i8> @test131(<64 x i8> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test131:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm4, %xmm0
 ; SSE4-NEXT:    pminsb %xmm5, %xmm1
 ; SSE4-NEXT:    pminsb %xmm6, %xmm2
@@ -6150,7 +6150,7 @@ define <64 x i8> @test131(<64 x i8> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test131:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsb %xmm4, %xmm5, %xmm4
@@ -6164,13 +6164,13 @@ define <64 x i8> @test131(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test131:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsb %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test131:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsb %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6181,7 +6181,7 @@ entry:
 
 define <64 x i8> @test132(<64 x i8> %a, <64 x i8> %b) {
 ; SSE2-LABEL: test132:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm2, %xmm8
 ; SSE2-NEXT:    movdqa %xmm0, %xmm10
 ; SSE2-NEXT:    movdqa %xmm7, %xmm12
@@ -6217,7 +6217,7 @@ define <64 x i8> @test132(<64 x i8> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test132:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsb %xmm4, %xmm0
 ; SSE4-NEXT:    pminsb %xmm5, %xmm1
 ; SSE4-NEXT:    pminsb %xmm6, %xmm2
@@ -6225,7 +6225,7 @@ define <64 x i8> @test132(<64 x i8> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test132:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsb %xmm4, %xmm5, %xmm4
@@ -6239,13 +6239,13 @@ define <64 x i8> @test132(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test132:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsb %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test132:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsb %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6256,7 +6256,7 @@ entry:
 
 define <64 x i8> @test133(<64 x i8> %a, <64 x i8> %b) {
 ; SSE-LABEL: test133:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm4, %xmm0
 ; SSE-NEXT:    pmaxub %xmm5, %xmm1
 ; SSE-NEXT:    pmaxub %xmm6, %xmm2
@@ -6264,7 +6264,7 @@ define <64 x i8> @test133(<64 x i8> %a,
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test133:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxub %xmm4, %xmm5, %xmm4
@@ -6278,13 +6278,13 @@ define <64 x i8> @test133(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test133:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxub %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxub %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test133:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxub %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6295,7 +6295,7 @@ entry:
 
 define <64 x i8> @test134(<64 x i8> %a, <64 x i8> %b) {
 ; SSE-LABEL: test134:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxub %xmm4, %xmm0
 ; SSE-NEXT:    pmaxub %xmm5, %xmm1
 ; SSE-NEXT:    pmaxub %xmm6, %xmm2
@@ -6303,7 +6303,7 @@ define <64 x i8> @test134(<64 x i8> %a,
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test134:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxub %xmm4, %xmm5, %xmm4
@@ -6317,13 +6317,13 @@ define <64 x i8> @test134(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test134:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxub %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxub %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test134:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxub %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6334,7 +6334,7 @@ entry:
 
 define <64 x i8> @test135(<64 x i8> %a, <64 x i8> %b) {
 ; SSE-LABEL: test135:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm4, %xmm0
 ; SSE-NEXT:    pminub %xmm5, %xmm1
 ; SSE-NEXT:    pminub %xmm6, %xmm2
@@ -6342,7 +6342,7 @@ define <64 x i8> @test135(<64 x i8> %a,
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test135:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminub %xmm4, %xmm5, %xmm4
@@ -6356,13 +6356,13 @@ define <64 x i8> @test135(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test135:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminub %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminub %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test135:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminub %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6373,7 +6373,7 @@ entry:
 
 define <64 x i8> @test136(<64 x i8> %a, <64 x i8> %b) {
 ; SSE-LABEL: test136:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminub %xmm4, %xmm0
 ; SSE-NEXT:    pminub %xmm5, %xmm1
 ; SSE-NEXT:    pminub %xmm6, %xmm2
@@ -6381,7 +6381,7 @@ define <64 x i8> @test136(<64 x i8> %a,
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test136:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminub %xmm4, %xmm5, %xmm4
@@ -6395,13 +6395,13 @@ define <64 x i8> @test136(<64 x i8> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test136:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminub %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminub %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test136:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminub %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6412,7 +6412,7 @@ entry:
 
 define <32 x i16> @test137(<32 x i16> %a, <32 x i16> %b) {
 ; SSE-LABEL: test137:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm4, %xmm0
 ; SSE-NEXT:    pmaxsw %xmm5, %xmm1
 ; SSE-NEXT:    pmaxsw %xmm6, %xmm2
@@ -6420,7 +6420,7 @@ define <32 x i16> @test137(<32 x i16> %a
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test137:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsw %xmm4, %xmm5, %xmm4
@@ -6434,13 +6434,13 @@ define <32 x i16> @test137(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test137:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test137:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6451,7 +6451,7 @@ entry:
 
 define <32 x i16> @test138(<32 x i16> %a, <32 x i16> %b) {
 ; SSE-LABEL: test138:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pmaxsw %xmm4, %xmm0
 ; SSE-NEXT:    pmaxsw %xmm5, %xmm1
 ; SSE-NEXT:    pmaxsw %xmm6, %xmm2
@@ -6459,7 +6459,7 @@ define <32 x i16> @test138(<32 x i16> %a
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test138:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsw %xmm4, %xmm5, %xmm4
@@ -6473,13 +6473,13 @@ define <32 x i16> @test138(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test138:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test138:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6490,7 +6490,7 @@ entry:
 
 define <32 x i16> @test139(<32 x i16> %a, <32 x i16> %b) {
 ; SSE-LABEL: test139:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm4, %xmm0
 ; SSE-NEXT:    pminsw %xmm5, %xmm1
 ; SSE-NEXT:    pminsw %xmm6, %xmm2
@@ -6498,7 +6498,7 @@ define <32 x i16> @test139(<32 x i16> %a
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test139:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsw %xmm4, %xmm5, %xmm4
@@ -6512,13 +6512,13 @@ define <32 x i16> @test139(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test139:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test139:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6529,7 +6529,7 @@ entry:
 
 define <32 x i16> @test140(<32 x i16> %a, <32 x i16> %b) {
 ; SSE-LABEL: test140:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pminsw %xmm4, %xmm0
 ; SSE-NEXT:    pminsw %xmm5, %xmm1
 ; SSE-NEXT:    pminsw %xmm6, %xmm2
@@ -6537,7 +6537,7 @@ define <32 x i16> @test140(<32 x i16> %a
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test140:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsw %xmm4, %xmm5, %xmm4
@@ -6551,13 +6551,13 @@ define <32 x i16> @test140(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test140:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test140:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6568,7 +6568,7 @@ entry:
 
 define <32 x i16> @test141(<32 x i16> %a, <32 x i16> %b) {
 ; SSE2-LABEL: test141:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm11
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm9
@@ -6608,7 +6608,7 @@ define <32 x i16> @test141(<32 x i16> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test141:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxuw %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxuw %xmm6, %xmm2
@@ -6616,7 +6616,7 @@ define <32 x i16> @test141(<32 x i16> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test141:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxuw %xmm4, %xmm5, %xmm4
@@ -6630,13 +6630,13 @@ define <32 x i16> @test141(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test141:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxuw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxuw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test141:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6647,7 +6647,7 @@ entry:
 
 define <32 x i16> @test142(<32 x i16> %a, <32 x i16> %b) {
 ; SSE2-LABEL: test142:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    movdqa %xmm2, %xmm9
 ; SSE2-NEXT:    movdqa %xmm1, %xmm10
@@ -6677,7 +6677,7 @@ define <32 x i16> @test142(<32 x i16> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test142:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxuw %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxuw %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxuw %xmm6, %xmm2
@@ -6685,7 +6685,7 @@ define <32 x i16> @test142(<32 x i16> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test142:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxuw %xmm4, %xmm5, %xmm4
@@ -6699,13 +6699,13 @@ define <32 x i16> @test142(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test142:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxuw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxuw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test142:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6716,7 +6716,7 @@ entry:
 
 define <32 x i16> @test143(<32 x i16> %a, <32 x i16> %b) {
 ; SSE2-LABEL: test143:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm11
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa %xmm7, %xmm9
@@ -6756,7 +6756,7 @@ define <32 x i16> @test143(<32 x i16> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test143:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm4, %xmm0
 ; SSE4-NEXT:    pminuw %xmm5, %xmm1
 ; SSE4-NEXT:    pminuw %xmm6, %xmm2
@@ -6764,7 +6764,7 @@ define <32 x i16> @test143(<32 x i16> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test143:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminuw %xmm4, %xmm5, %xmm4
@@ -6778,13 +6778,13 @@ define <32 x i16> @test143(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test143:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminuw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminuw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test143:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6795,7 +6795,7 @@ entry:
 
 define <32 x i16> @test144(<32 x i16> %a, <32 x i16> %b) {
 ; SSE2-LABEL: test144:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    movdqa %xmm2, %xmm9
 ; SSE2-NEXT:    movdqa %xmm1, %xmm10
@@ -6828,7 +6828,7 @@ define <32 x i16> @test144(<32 x i16> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test144:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminuw %xmm4, %xmm0
 ; SSE4-NEXT:    pminuw %xmm5, %xmm1
 ; SSE4-NEXT:    pminuw %xmm6, %xmm2
@@ -6836,7 +6836,7 @@ define <32 x i16> @test144(<32 x i16> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test144:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminuw %xmm4, %xmm5, %xmm4
@@ -6850,13 +6850,13 @@ define <32 x i16> @test144(<32 x i16> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test144:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminuw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminuw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test144:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuw %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -6867,7 +6867,7 @@ entry:
 
 define <16 x i32> @test145(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test145:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    movdqa %xmm7, %xmm3
 ; SSE2-NEXT:    pcmpgtd %xmm8, %xmm3
@@ -6895,7 +6895,7 @@ define <16 x i32> @test145(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test145:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxsd %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxsd %xmm6, %xmm2
@@ -6903,7 +6903,7 @@ define <16 x i32> @test145(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test145:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsd %xmm4, %xmm5, %xmm4
@@ -6917,13 +6917,13 @@ define <16 x i32> @test145(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test145:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test145:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsd %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -6934,7 +6934,7 @@ entry:
 
 define <16 x i32> @test146(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test146:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm2, %xmm8
 ; SSE2-NEXT:    movdqa %xmm3, %xmm12
 ; SSE2-NEXT:    pcmpgtd %xmm7, %xmm12
@@ -6970,7 +6970,7 @@ define <16 x i32> @test146(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test146:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxsd %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxsd %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxsd %xmm6, %xmm2
@@ -6978,7 +6978,7 @@ define <16 x i32> @test146(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test146:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxsd %xmm4, %xmm5, %xmm4
@@ -6992,13 +6992,13 @@ define <16 x i32> @test146(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test146:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxsd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxsd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test146:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsd %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -7009,7 +7009,7 @@ entry:
 
 define <16 x i32> @test147(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test147:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    pcmpgtd %xmm7, %xmm3
 ; SSE2-NEXT:    movdqa %xmm2, %xmm9
@@ -7036,7 +7036,7 @@ define <16 x i32> @test147(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test147:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm4, %xmm0
 ; SSE4-NEXT:    pminsd %xmm5, %xmm1
 ; SSE4-NEXT:    pminsd %xmm6, %xmm2
@@ -7044,7 +7044,7 @@ define <16 x i32> @test147(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test147:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsd %xmm4, %xmm5, %xmm4
@@ -7058,13 +7058,13 @@ define <16 x i32> @test147(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test147:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test147:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsd %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -7075,7 +7075,7 @@ entry:
 
 define <16 x i32> @test148(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test148:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm2, %xmm8
 ; SSE2-NEXT:    movdqa %xmm0, %xmm10
 ; SSE2-NEXT:    movdqa %xmm7, %xmm12
@@ -7111,7 +7111,7 @@ define <16 x i32> @test148(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test148:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminsd %xmm4, %xmm0
 ; SSE4-NEXT:    pminsd %xmm5, %xmm1
 ; SSE4-NEXT:    pminsd %xmm6, %xmm2
@@ -7119,7 +7119,7 @@ define <16 x i32> @test148(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test148:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminsd %xmm4, %xmm5, %xmm4
@@ -7133,13 +7133,13 @@ define <16 x i32> @test148(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test148:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminsd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminsd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test148:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsd %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -7150,7 +7150,7 @@ entry:
 
 define <16 x i32> @test149(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test149:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm11
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm9
@@ -7190,7 +7190,7 @@ define <16 x i32> @test149(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test149:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxud %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxud %xmm6, %xmm2
@@ -7198,7 +7198,7 @@ define <16 x i32> @test149(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test149:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxud %xmm4, %xmm5, %xmm4
@@ -7212,13 +7212,13 @@ define <16 x i32> @test149(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test149:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxud %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxud %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test149:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxud %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -7229,7 +7229,7 @@ entry:
 
 define <16 x i32> @test150(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test150:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm10
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm14 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm7, %xmm0
@@ -7277,7 +7277,7 @@ define <16 x i32> @test150(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test150:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pmaxud %xmm4, %xmm0
 ; SSE4-NEXT:    pmaxud %xmm5, %xmm1
 ; SSE4-NEXT:    pmaxud %xmm6, %xmm2
@@ -7285,7 +7285,7 @@ define <16 x i32> @test150(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test150:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpmaxud %xmm4, %xmm5, %xmm4
@@ -7299,13 +7299,13 @@ define <16 x i32> @test150(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test150:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpmaxud %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaxud %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test150:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxud %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -7316,7 +7316,7 @@ entry:
 
 define <16 x i32> @test151(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test151:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm11
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm7, %xmm9
@@ -7356,7 +7356,7 @@ define <16 x i32> @test151(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test151:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm4, %xmm0
 ; SSE4-NEXT:    pminud %xmm5, %xmm1
 ; SSE4-NEXT:    pminud %xmm6, %xmm2
@@ -7364,7 +7364,7 @@ define <16 x i32> @test151(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test151:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminud %xmm4, %xmm5, %xmm4
@@ -7378,13 +7378,13 @@ define <16 x i32> @test151(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test151:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminud %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminud %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test151:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminud %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -7395,7 +7395,7 @@ entry:
 
 define <16 x i32> @test152(<16 x i32> %a, <16 x i32> %b) {
 ; SSE2-LABEL: test152:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm0, %xmm10
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm14 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm0
@@ -7443,7 +7443,7 @@ define <16 x i32> @test152(<16 x i32> %a
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test152:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    pminud %xmm4, %xmm0
 ; SSE4-NEXT:    pminud %xmm5, %xmm1
 ; SSE4-NEXT:    pminud %xmm6, %xmm2
@@ -7451,7 +7451,7 @@ define <16 x i32> @test152(<16 x i32> %a
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test152:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpminud %xmm4, %xmm5, %xmm4
@@ -7465,13 +7465,13 @@ define <16 x i32> @test152(<16 x i32> %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test152:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpminud %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpminud %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test152:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminud %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -7484,7 +7484,7 @@ entry:
 
 define <8 x i64> @test153(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test153:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm11 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    pxor %xmm11, %xmm8
@@ -7552,7 +7552,7 @@ define <8 x i64> @test153(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test153:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa %xmm7, %xmm9
 ; SSE4-NEXT:    pcmpgtq %xmm3, %xmm9
@@ -7573,7 +7573,7 @@ define <8 x i64> @test153(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test153:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -7589,7 +7589,7 @@ define <8 x i64> @test153(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test153:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm3, %ymm4
 ; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm2, %ymm5
 ; AVX2-NEXT:    vblendvpd %ymm5, %ymm2, %ymm0, %ymm0
@@ -7597,7 +7597,7 @@ define <8 x i64> @test153(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test153:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -7608,7 +7608,7 @@ entry:
 
 define <8 x i64> @test154(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test154:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm7, %xmm8
 ; SSE2-NEXT:    movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
 ; SSE2-NEXT:    movdqa %xmm3, %xmm7
@@ -7689,7 +7689,7 @@ define <8 x i64> @test154(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test154:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa %xmm3, %xmm9
 ; SSE4-NEXT:    pcmpgtq %xmm7, %xmm9
@@ -7714,7 +7714,7 @@ define <8 x i64> @test154(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test154:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -7735,7 +7735,7 @@ define <8 x i64> @test154(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test154:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm1, %ymm4
 ; AVX2-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
 ; AVX2-NEXT:    vpxor %ymm5, %ymm4, %ymm4
@@ -7746,7 +7746,7 @@ define <8 x i64> @test154(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test154:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -7757,7 +7757,7 @@ entry:
 
 define <8 x i64> @test155(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test155:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm11 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm7, %xmm8
 ; SSE2-NEXT:    pxor %xmm11, %xmm8
@@ -7825,7 +7825,7 @@ define <8 x i64> @test155(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test155:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa %xmm3, %xmm9
 ; SSE4-NEXT:    pcmpgtq %xmm7, %xmm9
@@ -7845,7 +7845,7 @@ define <8 x i64> @test155(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test155:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -7861,7 +7861,7 @@ define <8 x i64> @test155(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test155:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm1, %ymm4
 ; AVX2-NEXT:    vpcmpgtq %ymm2, %ymm0, %ymm5
 ; AVX2-NEXT:    vblendvpd %ymm5, %ymm2, %ymm0, %ymm0
@@ -7869,7 +7869,7 @@ define <8 x i64> @test155(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test155:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -7880,7 +7880,7 @@ entry:
 
 define <8 x i64> @test156(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test156:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm7, %xmm11
 ; SSE2-NEXT:    movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
 ; SSE2-NEXT:    movdqa %xmm3, %xmm7
@@ -7962,7 +7962,7 @@ define <8 x i64> @test156(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test156:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa %xmm7, %xmm9
 ; SSE4-NEXT:    pcmpgtq %xmm3, %xmm9
@@ -7988,7 +7988,7 @@ define <8 x i64> @test156(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test156:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -8009,7 +8009,7 @@ define <8 x i64> @test156(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test156:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm3, %ymm4
 ; AVX2-NEXT:    vpcmpeqd %ymm5, %ymm5, %ymm5
 ; AVX2-NEXT:    vpxor %ymm5, %ymm4, %ymm4
@@ -8020,7 +8020,7 @@ define <8 x i64> @test156(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test156:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminsq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -8031,7 +8031,7 @@ entry:
 
 define <8 x i64> @test157(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test157:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm11 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm8
 ; SSE2-NEXT:    pxor %xmm11, %xmm8
@@ -8099,7 +8099,7 @@ define <8 x i64> @test157(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test157:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm3, %xmm10
@@ -8132,7 +8132,7 @@ define <8 x i64> @test157(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test157:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
@@ -8157,7 +8157,7 @@ define <8 x i64> @test157(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test157:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm4
 ; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm5
 ; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm6
@@ -8170,7 +8170,7 @@ define <8 x i64> @test157(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test157:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -8181,7 +8181,7 @@ entry:
 
 define <8 x i64> @test158(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test158:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm7, %xmm8
 ; SSE2-NEXT:    movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
 ; SSE2-NEXT:    movdqa %xmm3, %xmm7
@@ -8262,7 +8262,7 @@ define <8 x i64> @test158(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test158:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm7, %xmm10
@@ -8300,7 +8300,7 @@ define <8 x i64> @test158(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test158:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
@@ -8330,7 +8330,7 @@ define <8 x i64> @test158(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test158:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm4
 ; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm5
 ; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm6
@@ -8346,7 +8346,7 @@ define <8 x i64> @test158(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test158:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -8357,7 +8357,7 @@ entry:
 
 define <8 x i64> @test159(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test159:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm11 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm7, %xmm8
 ; SSE2-NEXT:    pxor %xmm11, %xmm8
@@ -8425,7 +8425,7 @@ define <8 x i64> @test159(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test159:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm7, %xmm10
@@ -8458,7 +8458,7 @@ define <8 x i64> @test159(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test159:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
@@ -8483,7 +8483,7 @@ define <8 x i64> @test159(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test159:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm4
 ; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm5
 ; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm6
@@ -8496,7 +8496,7 @@ define <8 x i64> @test159(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test159:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminuq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -8507,7 +8507,7 @@ entry:
 
 define <8 x i64> @test160(<8 x i64> %a, <8 x i64> %b) {
 ; SSE2-LABEL: test160:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa %xmm7, %xmm11
 ; SSE2-NEXT:    movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
 ; SSE2-NEXT:    movdqa %xmm3, %xmm7
@@ -8589,7 +8589,7 @@ define <8 x i64> @test160(<8 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test160:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm8
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm3, %xmm10
@@ -8627,7 +8627,7 @@ define <8 x i64> @test160(<8 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test160:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
@@ -8657,7 +8657,7 @@ define <8 x i64> @test160(<8 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test160:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm4
 ; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm5
 ; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm6
@@ -8673,7 +8673,7 @@ define <8 x i64> @test160(<8 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test160:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpminuq %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 entry:
@@ -8684,7 +8684,7 @@ entry:
 
 define <4 x i64> @test161(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test161:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm5
 ; SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -8718,7 +8718,7 @@ define <4 x i64> @test161(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test161:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa %xmm3, %xmm5
 ; SSE4-NEXT:    pcmpgtq %xmm1, %xmm5
@@ -8732,7 +8732,7 @@ define <4 x i64> @test161(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test161:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -8742,13 +8742,13 @@ define <4 x i64> @test161(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test161:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test161:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -8759,7 +8759,7 @@ entry:
 
 define <4 x i64> @test162(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test162:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pxor %xmm7, %xmm4
@@ -8799,7 +8799,7 @@ define <4 x i64> @test162(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test162:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa %xmm1, %xmm5
 ; SSE4-NEXT:    pcmpgtq %xmm3, %xmm5
@@ -8815,7 +8815,7 @@ define <4 x i64> @test162(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test162:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -8828,7 +8828,7 @@ define <4 x i64> @test162(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test162:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
 ; AVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
 ; AVX2-NEXT:    vpxor %ymm3, %ymm2, %ymm2
@@ -8836,7 +8836,7 @@ define <4 x i64> @test162(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test162:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -8847,7 +8847,7 @@ entry:
 
 define <4 x i64> @test163(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test163:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm5
 ; SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -8881,7 +8881,7 @@ define <4 x i64> @test163(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test163:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa %xmm1, %xmm5
 ; SSE4-NEXT:    pcmpgtq %xmm3, %xmm5
@@ -8894,7 +8894,7 @@ define <4 x i64> @test163(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test163:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -8904,13 +8904,13 @@ define <4 x i64> @test163(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test163:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
 ; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test163:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -8921,7 +8921,7 @@ entry:
 
 define <4 x i64> @test164(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test164:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pxor %xmm7, %xmm4
@@ -8961,7 +8961,7 @@ define <4 x i64> @test164(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test164:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa %xmm3, %xmm5
 ; SSE4-NEXT:    pcmpgtq %xmm1, %xmm5
@@ -8978,7 +8978,7 @@ define <4 x i64> @test164(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test164:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -8991,7 +8991,7 @@ define <4 x i64> @test164(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test164:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
 ; AVX2-NEXT:    vpxor %ymm3, %ymm2, %ymm2
@@ -8999,7 +8999,7 @@ define <4 x i64> @test164(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test164:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -9010,7 +9010,7 @@ entry:
 
 define <4 x i64> @test165(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test165:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm5
 ; SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -9044,7 +9044,7 @@ define <4 x i64> @test165(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test165:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm1, %xmm6
@@ -9064,7 +9064,7 @@ define <4 x i64> @test165(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test165:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -9079,7 +9079,7 @@ define <4 x i64> @test165(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test165:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
 ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3
 ; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm2
@@ -9088,7 +9088,7 @@ define <4 x i64> @test165(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test165:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -9099,7 +9099,7 @@ entry:
 
 define <4 x i64> @test166(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test166:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pxor %xmm7, %xmm4
@@ -9139,7 +9139,7 @@ define <4 x i64> @test166(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test166:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm3, %xmm6
@@ -9162,7 +9162,7 @@ define <4 x i64> @test166(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test166:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -9180,7 +9180,7 @@ define <4 x i64> @test166(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test166:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
 ; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm3
 ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm2
@@ -9191,7 +9191,7 @@ define <4 x i64> @test166(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test166:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -9202,7 +9202,7 @@ entry:
 
 define <4 x i64> @test167(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test167:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm5
 ; SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -9236,7 +9236,7 @@ define <4 x i64> @test167(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test167:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm3, %xmm6
@@ -9256,7 +9256,7 @@ define <4 x i64> @test167(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test167:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -9271,7 +9271,7 @@ define <4 x i64> @test167(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test167:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
 ; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm3
 ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm2
@@ -9280,7 +9280,7 @@ define <4 x i64> @test167(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test167:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -9291,7 +9291,7 @@ entry:
 
 define <4 x i64> @test168(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test168:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pxor %xmm7, %xmm4
@@ -9331,7 +9331,7 @@ define <4 x i64> @test168(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test168:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm1, %xmm6
@@ -9354,7 +9354,7 @@ define <4 x i64> @test168(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test168:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -9372,7 +9372,7 @@ define <4 x i64> @test168(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test168:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
 ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3
 ; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm2
@@ -9383,7 +9383,7 @@ define <4 x i64> @test168(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test168:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -9394,7 +9394,7 @@ entry:
 
 define <4 x i64> @test169(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test169:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pxor %xmm5, %xmm4
@@ -9430,7 +9430,7 @@ define <4 x i64> @test169(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test169:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa %xmm3, %xmm5
 ; SSE4-NEXT:    pcmpgtq %xmm1, %xmm5
@@ -9443,7 +9443,7 @@ define <4 x i64> @test169(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test169:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -9453,13 +9453,13 @@ define <4 x i64> @test169(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test169:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test169:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -9470,7 +9470,7 @@ entry:
 
 define <4 x i64> @test170(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test170:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pxor %xmm7, %xmm4
@@ -9510,7 +9510,7 @@ define <4 x i64> @test170(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test170:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa %xmm1, %xmm5
 ; SSE4-NEXT:    pcmpgtq %xmm3, %xmm5
@@ -9525,7 +9525,7 @@ define <4 x i64> @test170(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test170:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -9538,7 +9538,7 @@ define <4 x i64> @test170(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test170:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
 ; AVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
 ; AVX2-NEXT:    vpxor %ymm3, %ymm2, %ymm2
@@ -9546,7 +9546,7 @@ define <4 x i64> @test170(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test170:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -9557,7 +9557,7 @@ entry:
 
 define <4 x i64> @test171(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test171:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pxor %xmm5, %xmm4
@@ -9593,7 +9593,7 @@ define <4 x i64> @test171(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test171:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa %xmm1, %xmm5
 ; SSE4-NEXT:    pcmpgtq %xmm3, %xmm5
@@ -9605,7 +9605,7 @@ define <4 x i64> @test171(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test171:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -9615,13 +9615,13 @@ define <4 x i64> @test171(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test171:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
 ; AVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test171:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -9632,7 +9632,7 @@ entry:
 
 define <4 x i64> @test172(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test172:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pxor %xmm7, %xmm4
@@ -9672,7 +9672,7 @@ define <4 x i64> @test172(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test172:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa %xmm3, %xmm5
 ; SSE4-NEXT:    pcmpgtq %xmm1, %xmm5
@@ -9688,7 +9688,7 @@ define <4 x i64> @test172(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test172:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -9701,7 +9701,7 @@ define <4 x i64> @test172(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test172:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vpcmpeqd %ymm3, %ymm3, %ymm3
 ; AVX2-NEXT:    vpxor %ymm3, %ymm2, %ymm2
@@ -9709,7 +9709,7 @@ define <4 x i64> @test172(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test172:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -9720,7 +9720,7 @@ entry:
 
 define <4 x i64> @test173(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test173:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pxor %xmm5, %xmm4
@@ -9756,7 +9756,7 @@ define <4 x i64> @test173(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test173:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm1, %xmm6
@@ -9775,7 +9775,7 @@ define <4 x i64> @test173(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test173:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -9790,7 +9790,7 @@ define <4 x i64> @test173(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test173:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
 ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3
 ; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm2
@@ -9799,7 +9799,7 @@ define <4 x i64> @test173(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test173:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -9810,7 +9810,7 @@ entry:
 
 define <4 x i64> @test174(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test174:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pxor %xmm7, %xmm4
@@ -9850,7 +9850,7 @@ define <4 x i64> @test174(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test174:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm3, %xmm6
@@ -9872,7 +9872,7 @@ define <4 x i64> @test174(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test174:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -9890,7 +9890,7 @@ define <4 x i64> @test174(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test174:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
 ; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm3
 ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm2
@@ -9901,7 +9901,7 @@ define <4 x i64> @test174(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test174:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -9912,7 +9912,7 @@ entry:
 
 define <4 x i64> @test175(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test175:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    pxor %xmm5, %xmm4
@@ -9948,7 +9948,7 @@ define <4 x i64> @test175(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test175:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm3, %xmm6
@@ -9967,7 +9967,7 @@ define <4 x i64> @test175(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test175:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -9982,7 +9982,7 @@ define <4 x i64> @test175(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test175:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
 ; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm3
 ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm2
@@ -9991,7 +9991,7 @@ define <4 x i64> @test175(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test175:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10002,7 +10002,7 @@ entry:
 
 define <4 x i64> @test176(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: test176:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 ; SSE2-NEXT:    pxor %xmm7, %xmm4
@@ -10042,7 +10042,7 @@ define <4 x i64> @test176(<4 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test176:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm1, %xmm6
@@ -10064,7 +10064,7 @@ define <4 x i64> @test176(<4 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test176:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -10082,7 +10082,7 @@ define <4 x i64> @test176(<4 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test176:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
 ; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3
 ; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm2
@@ -10093,7 +10093,7 @@ define <4 x i64> @test176(<4 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test176:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10104,7 +10104,7 @@ entry:
 
 define <2 x i64> @test177(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test177:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10123,7 +10123,7 @@ define <2 x i64> @test177(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test177:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa %xmm1, %xmm0
 ; SSE4-NEXT:    pcmpgtq %xmm2, %xmm0
@@ -10132,19 +10132,19 @@ define <2 x i64> @test177(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test177:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test177:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test177:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10155,7 +10155,7 @@ entry:
 
 define <2 x i64> @test178(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test178:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10177,7 +10177,7 @@ define <2 x i64> @test178(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test178:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    pcmpgtq %xmm1, %xmm0
 ; SSE4-NEXT:    pcmpeqd %xmm3, %xmm3
@@ -10187,7 +10187,7 @@ define <2 x i64> @test178(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test178:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -10195,7 +10195,7 @@ define <2 x i64> @test178(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test178:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX2-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -10203,7 +10203,7 @@ define <2 x i64> @test178(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test178:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10214,7 +10214,7 @@ entry:
 
 define <2 x i64> @test179(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test179:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10233,7 +10233,7 @@ define <2 x i64> @test179(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test179:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    pcmpgtq %xmm1, %xmm0
 ; SSE4-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
@@ -10241,19 +10241,19 @@ define <2 x i64> @test179(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test179:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test179:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test179:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10264,7 +10264,7 @@ entry:
 
 define <2 x i64> @test180(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test180:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10286,7 +10286,7 @@ define <2 x i64> @test180(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test180:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa %xmm1, %xmm3
 ; SSE4-NEXT:    pcmpgtq %xmm2, %xmm3
@@ -10297,7 +10297,7 @@ define <2 x i64> @test180(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test180:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -10305,7 +10305,7 @@ define <2 x i64> @test180(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test180:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX2-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -10313,7 +10313,7 @@ define <2 x i64> @test180(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test180:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10324,7 +10324,7 @@ entry:
 
 define <2 x i64> @test181(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test181:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10343,7 +10343,7 @@ define <2 x i64> @test181(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test181:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm2, %xmm3
@@ -10355,7 +10355,7 @@ define <2 x i64> @test181(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test181:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm2
@@ -10364,7 +10364,7 @@ define <2 x i64> @test181(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test181:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm3
 ; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm2
@@ -10373,7 +10373,7 @@ define <2 x i64> @test181(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test181:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10384,7 +10384,7 @@ entry:
 
 define <2 x i64> @test182(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test182:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10406,7 +10406,7 @@ define <2 x i64> @test182(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test182:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm1, %xmm0
@@ -10420,7 +10420,7 @@ define <2 x i64> @test182(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test182:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
 ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm2
@@ -10431,7 +10431,7 @@ define <2 x i64> @test182(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test182:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm3
 ; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm2
@@ -10442,7 +10442,7 @@ define <2 x i64> @test182(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test182:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10453,7 +10453,7 @@ entry:
 
 define <2 x i64> @test183(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test183:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10472,7 +10472,7 @@ define <2 x i64> @test183(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test183:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm1, %xmm3
@@ -10484,7 +10484,7 @@ define <2 x i64> @test183(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test183:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
 ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm2
@@ -10493,7 +10493,7 @@ define <2 x i64> @test183(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test183:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm3
 ; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm2
@@ -10502,7 +10502,7 @@ define <2 x i64> @test183(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test183:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10513,7 +10513,7 @@ entry:
 
 define <2 x i64> @test184(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test184:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10535,7 +10535,7 @@ define <2 x i64> @test184(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test184:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    pxor %xmm3, %xmm0
@@ -10548,7 +10548,7 @@ define <2 x i64> @test184(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test184:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm2
@@ -10559,7 +10559,7 @@ define <2 x i64> @test184(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test184:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm3
 ; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm2
@@ -10570,7 +10570,7 @@ define <2 x i64> @test184(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test184:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10581,7 +10581,7 @@ entry:
 
 define <2 x i64> @test185(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test185:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10601,7 +10601,7 @@ define <2 x i64> @test185(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test185:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa %xmm1, %xmm0
 ; SSE4-NEXT:    pcmpgtq %xmm2, %xmm0
@@ -10610,19 +10610,19 @@ define <2 x i64> @test185(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test185:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX1-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test185:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX2-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test185:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10633,7 +10633,7 @@ entry:
 
 define <2 x i64> @test186(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test186:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10655,7 +10655,7 @@ define <2 x i64> @test186(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test186:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    pcmpgtq %xmm1, %xmm0
 ; SSE4-NEXT:    pcmpeqd %xmm3, %xmm3
@@ -10665,7 +10665,7 @@ define <2 x i64> @test186(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test186:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -10673,7 +10673,7 @@ define <2 x i64> @test186(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test186:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX2-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -10681,7 +10681,7 @@ define <2 x i64> @test186(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test186:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxsq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10692,7 +10692,7 @@ entry:
 
 define <2 x i64> @test187(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test187:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10712,7 +10712,7 @@ define <2 x i64> @test187(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test187:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    pcmpgtq %xmm1, %xmm0
 ; SSE4-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
@@ -10720,19 +10720,19 @@ define <2 x i64> @test187(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test187:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test187:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX2-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test187:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10743,7 +10743,7 @@ entry:
 
 define <2 x i64> @test188(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test188:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10765,7 +10765,7 @@ define <2 x i64> @test188(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test188:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa %xmm1, %xmm3
 ; SSE4-NEXT:    pcmpgtq %xmm2, %xmm3
@@ -10776,7 +10776,7 @@ define <2 x i64> @test188(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test188:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -10784,7 +10784,7 @@ define <2 x i64> @test188(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test188:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX2-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -10792,7 +10792,7 @@ define <2 x i64> @test188(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test188:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminsq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10803,7 +10803,7 @@ entry:
 
 define <2 x i64> @test189(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test189:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10823,7 +10823,7 @@ define <2 x i64> @test189(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test189:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm2, %xmm3
@@ -10835,7 +10835,7 @@ define <2 x i64> @test189(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test189:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm2
@@ -10844,7 +10844,7 @@ define <2 x i64> @test189(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test189:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm3
 ; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm2
@@ -10853,7 +10853,7 @@ define <2 x i64> @test189(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test189:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10864,7 +10864,7 @@ entry:
 
 define <2 x i64> @test190(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test190:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10886,7 +10886,7 @@ define <2 x i64> @test190(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test190:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm1, %xmm0
@@ -10900,7 +10900,7 @@ define <2 x i64> @test190(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test190:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
 ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm2
@@ -10911,7 +10911,7 @@ define <2 x i64> @test190(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test190:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm3
 ; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm2
@@ -10922,7 +10922,7 @@ define <2 x i64> @test190(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test190:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpmaxuq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10933,7 +10933,7 @@ entry:
 
 define <2 x i64> @test191(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test191:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -10953,7 +10953,7 @@ define <2 x i64> @test191(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test191:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    movdqa %xmm1, %xmm3
@@ -10965,7 +10965,7 @@ define <2 x i64> @test191(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test191:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
 ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm2
@@ -10974,7 +10974,7 @@ define <2 x i64> @test191(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test191:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm3
 ; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm2
@@ -10983,7 +10983,7 @@ define <2 x i64> @test191(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test191:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -10994,7 +10994,7 @@ entry:
 
 define <2 x i64> @test192(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test192:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -11016,7 +11016,7 @@ define <2 x i64> @test192(<2 x i64> %a,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: test192:
-; SSE4:       # BB#0: # %entry
+; SSE4:       # %bb.0: # %entry
 ; SSE4-NEXT:    movdqa %xmm0, %xmm2
 ; SSE4-NEXT:    movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; SSE4-NEXT:    pxor %xmm3, %xmm0
@@ -11029,7 +11029,7 @@ define <2 x i64> @test192(<2 x i64> %a,
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: test192:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm2
@@ -11040,7 +11040,7 @@ define <2 x i64> @test192(<2 x i64> %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test192:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm3
 ; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm2
@@ -11051,7 +11051,7 @@ define <2 x i64> @test192(<2 x i64> %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test192:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpminuq %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/vselect-packss.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect-packss.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect-packss.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect-packss.ll Mon Dec  4 09:18:51 2017
@@ -14,7 +14,7 @@
 
 define <16 x i8> @vselect_packss_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2, <16 x i8> %a3) {
 ; SSE2-LABEL: vselect_packss_v16i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pcmpeqw %xmm3, %xmm1
 ; SSE2-NEXT:    pcmpeqw %xmm2, %xmm0
 ; SSE2-NEXT:    packsswb %xmm1, %xmm0
@@ -24,7 +24,7 @@ define <16 x i8> @vselect_packss_v16i16(
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: vselect_packss_v16i16:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pcmpeqw %xmm3, %xmm1
 ; SSE42-NEXT:    pcmpeqw %xmm2, %xmm0
 ; SSE42-NEXT:    packsswb %xmm1, %xmm0
@@ -33,7 +33,7 @@ define <16 x i8> @vselect_packss_v16i16(
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: vselect_packss_v16i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm5, %xmm4
@@ -44,7 +44,7 @@ define <16 x i8> @vselect_packss_v16i16(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vselect_packss_v16i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
@@ -53,7 +53,7 @@ define <16 x i8> @vselect_packss_v16i16(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512NOBW-LABEL: vselect_packss_v16i16:
-; AVX512NOBW:       # BB#0:
+; AVX512NOBW:       # %bb.0:
 ; AVX512NOBW-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
 ; AVX512NOBW-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512NOBW-NEXT:    vpmovdb %zmm0, %xmm0
@@ -62,7 +62,7 @@ define <16 x i8> @vselect_packss_v16i16(
 ; AVX512NOBW-NEXT:    retq
 ;
 ; AVX512BWNOVL-LABEL: vselect_packss_v16i16:
-; AVX512BWNOVL:       # BB#0:
+; AVX512BWNOVL:       # %bb.0:
 ; AVX512BWNOVL-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
 ; AVX512BWNOVL-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BWNOVL-NEXT:    vpblendvb %xmm0, %xmm2, %xmm3, %xmm0
@@ -70,7 +70,7 @@ define <16 x i8> @vselect_packss_v16i16(
 ; AVX512BWNOVL-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: vselect_packss_v16i16:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpcmpeqw %ymm1, %ymm0, %k0
 ; AVX512BWVL-NEXT:    vpmovm2b %k0, %xmm0
 ; AVX512BWVL-NEXT:    vpblendvb %xmm0, %xmm2, %xmm3, %xmm0
@@ -87,7 +87,7 @@ define <16 x i8> @vselect_packss_v16i16(
 
 define <16 x i8> @vselect_packss_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i8> %a2, <16 x i8> %a3) {
 ; SSE2-LABEL: vselect_packss_v16i32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pcmpeqd %xmm7, %xmm3
 ; SSE2-NEXT:    pcmpeqd %xmm6, %xmm2
 ; SSE2-NEXT:    packssdw %xmm3, %xmm2
@@ -102,7 +102,7 @@ define <16 x i8> @vselect_packss_v16i32(
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: vselect_packss_v16i32:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm8
 ; SSE42-NEXT:    pcmpeqd %xmm7, %xmm3
 ; SSE42-NEXT:    pcmpeqd %xmm6, %xmm2
@@ -116,7 +116,7 @@ define <16 x i8> @vselect_packss_v16i32(
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: vselect_packss_v16i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm6
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
 ; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm7, %xmm6
@@ -133,7 +133,7 @@ define <16 x i8> @vselect_packss_v16i32(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vselect_packss_v16i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0
@@ -147,7 +147,7 @@ define <16 x i8> @vselect_packss_v16i32(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512NOBW-LABEL: vselect_packss_v16i32:
-; AVX512NOBW:       # BB#0:
+; AVX512NOBW:       # %bb.0:
 ; AVX512NOBW-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
 ; AVX512NOBW-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512NOBW-NEXT:    vpmovdb %zmm0, %xmm0
@@ -156,7 +156,7 @@ define <16 x i8> @vselect_packss_v16i32(
 ; AVX512NOBW-NEXT:    retq
 ;
 ; AVX512BWNOVL-LABEL: vselect_packss_v16i32:
-; AVX512BWNOVL:       # BB#0:
+; AVX512BWNOVL:       # %bb.0:
 ; AVX512BWNOVL-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; AVX512BWNOVL-NEXT:    vpmovm2b %k0, %zmm0
 ; AVX512BWNOVL-NEXT:    vpblendvb %xmm0, %xmm2, %xmm3, %xmm0
@@ -164,7 +164,7 @@ define <16 x i8> @vselect_packss_v16i32(
 ; AVX512BWNOVL-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: vselect_packss_v16i32:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; AVX512BWVL-NEXT:    vpmovm2b %k0, %xmm0
 ; AVX512BWVL-NEXT:    vpblendvb %xmm0, %xmm2, %xmm3, %xmm0
@@ -181,7 +181,7 @@ define <16 x i8> @vselect_packss_v16i32(
 
 define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8> %a2, <16 x i8> %a3) {
 ; SSE2-LABEL: vselect_packss_v16i64:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pcmpeqd {{[0-9]+}}(%rsp), %xmm7
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[1,0,3,2]
 ; SSE2-NEXT:    pand %xmm7, %xmm8
@@ -221,7 +221,7 @@ define <16 x i8> @vselect_packss_v16i64(
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: vselect_packss_v16i64:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pcmpeqq {{[0-9]+}}(%rsp), %xmm7
 ; SSE42-NEXT:    pcmpeqq {{[0-9]+}}(%rsp), %xmm6
 ; SSE42-NEXT:    packssdw %xmm7, %xmm6
@@ -244,7 +244,7 @@ define <16 x i8> @vselect_packss_v16i64(
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: vselect_packss_v16i64:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm8
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm9
 ; AVX1-NEXT:    vpcmpeqq %xmm8, %xmm9, %xmm8
@@ -275,7 +275,7 @@ define <16 x i8> @vselect_packss_v16i64(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vselect_packss_v16i64:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqq %ymm7, %ymm3, %ymm3
 ; AVX2-NEXT:    vpcmpeqq %ymm6, %ymm2, %ymm2
 ; AVX2-NEXT:    vpackssdw %ymm3, %ymm2, %ymm2
@@ -295,7 +295,7 @@ define <16 x i8> @vselect_packss_v16i64(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512NOBW-LABEL: vselect_packss_v16i64:
-; AVX512NOBW:       # BB#0:
+; AVX512NOBW:       # %bb.0:
 ; AVX512NOBW-NEXT:    vpcmpeqq %zmm2, %zmm0, %k0
 ; AVX512NOBW-NEXT:    vpcmpeqq %zmm3, %zmm1, %k1
 ; AVX512NOBW-NEXT:    kunpckbw %k0, %k1, %k1
@@ -306,7 +306,7 @@ define <16 x i8> @vselect_packss_v16i64(
 ; AVX512NOBW-NEXT:    retq
 ;
 ; AVX512BWNOVL-LABEL: vselect_packss_v16i64:
-; AVX512BWNOVL:       # BB#0:
+; AVX512BWNOVL:       # %bb.0:
 ; AVX512BWNOVL-NEXT:    vpcmpeqq %zmm2, %zmm0, %k0
 ; AVX512BWNOVL-NEXT:    vpcmpeqq %zmm3, %zmm1, %k1
 ; AVX512BWNOVL-NEXT:    kunpckbw %k0, %k1, %k0
@@ -316,7 +316,7 @@ define <16 x i8> @vselect_packss_v16i64(
 ; AVX512BWNOVL-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: vselect_packss_v16i64:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpcmpeqq %zmm2, %zmm0, %k0
 ; AVX512BWVL-NEXT:    vpcmpeqq %zmm3, %zmm1, %k1
 ; AVX512BWVL-NEXT:    kunpckbw %k0, %k1, %k0
@@ -339,7 +339,7 @@ define <16 x i8> @vselect_packss_v16i64(
 
 define <16 x i8> @vselect_packss(<16 x i16> %a0, <16 x i16> %a1, <16 x i8> %a2, <16 x i8> %a3) {
 ; SSE2-LABEL: vselect_packss:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pcmpeqw %xmm3, %xmm1
 ; SSE2-NEXT:    pcmpeqw %xmm2, %xmm0
 ; SSE2-NEXT:    packsswb %xmm1, %xmm0
@@ -349,7 +349,7 @@ define <16 x i8> @vselect_packss(<16 x i
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: vselect_packss:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pcmpeqw %xmm3, %xmm1
 ; SSE42-NEXT:    pcmpeqw %xmm2, %xmm0
 ; SSE42-NEXT:    packsswb %xmm1, %xmm0
@@ -358,7 +358,7 @@ define <16 x i8> @vselect_packss(<16 x i
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: vselect_packss:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm5, %xmm4
@@ -369,7 +369,7 @@ define <16 x i8> @vselect_packss(<16 x i
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vselect_packss:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
@@ -378,7 +378,7 @@ define <16 x i8> @vselect_packss(<16 x i
 ; AVX2-NEXT:    retq
 ;
 ; AVX512NOBW-LABEL: vselect_packss:
-; AVX512NOBW:       # BB#0:
+; AVX512NOBW:       # %bb.0:
 ; AVX512NOBW-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
 ; AVX512NOBW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512NOBW-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
@@ -387,7 +387,7 @@ define <16 x i8> @vselect_packss(<16 x i
 ; AVX512NOBW-NEXT:    retq
 ;
 ; AVX512BWNOVL-LABEL: vselect_packss:
-; AVX512BWNOVL:       # BB#0:
+; AVX512BWNOVL:       # %bb.0:
 ; AVX512BWNOVL-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
 ; AVX512BWNOVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWNOVL-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
@@ -396,7 +396,7 @@ define <16 x i8> @vselect_packss(<16 x i
 ; AVX512BWNOVL-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: vselect_packss:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpcmpeqw %ymm1, %ymm0, %k0
 ; AVX512BWVL-NEXT:    vpmovm2w %k0, %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll Mon Dec  4 09:18:51 2017
@@ -13,12 +13,12 @@
 
 define <16 x i8> @signbit_sel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) {
 ; AVX12-LABEL: signbit_sel_v16i8:
-; AVX12:       # BB#0:
+; AVX12:       # %bb.0:
 ; AVX12-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX12-NEXT:    retq
 ;
 ; AVX512-LABEL: signbit_sel_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512-NEXT:    vpcmpgtb %xmm2, %xmm3, %xmm2
 ; AVX512-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
@@ -32,7 +32,7 @@ define <16 x i8> @signbit_sel_v16i8(<16
 
 define <8 x i16> @signbit_sel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) {
 ; AVX-LABEL: signbit_sel_v8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm2
 ; AVX-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
@@ -44,12 +44,12 @@ define <8 x i16> @signbit_sel_v8i16(<8 x
 
 define <4 x i32> @signbit_sel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) {
 ; AVX12F-LABEL: signbit_sel_v4i32:
-; AVX12F:       # BB#0:
+; AVX12F:       # %bb.0:
 ; AVX12F-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX12F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: signbit_sel_v4i32:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpcmpgtd %xmm2, %xmm3, %k1
 ; AVX512VL-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
@@ -61,12 +61,12 @@ define <4 x i32> @signbit_sel_v4i32(<4 x
 
 define <2 x i64> @signbit_sel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) {
 ; AVX12F-LABEL: signbit_sel_v2i64:
-; AVX12F:       # BB#0:
+; AVX12F:       # %bb.0:
 ; AVX12F-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX12F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: signbit_sel_v2i64:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpcmpgtq %xmm2, %xmm3, %k1
 ; AVX512VL-NEXT:    vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
@@ -78,12 +78,12 @@ define <2 x i64> @signbit_sel_v2i64(<2 x
 
 define <4 x float> @signbit_sel_v4f32(<4 x float> %x, <4 x float> %y, <4 x i32> %mask) {
 ; AVX12F-LABEL: signbit_sel_v4f32:
-; AVX12F:       # BB#0:
+; AVX12F:       # %bb.0:
 ; AVX12F-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX12F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: signbit_sel_v4f32:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpcmpgtd %xmm2, %xmm3, %k1
 ; AVX512VL-NEXT:    vblendmps %xmm0, %xmm1, %xmm0 {%k1}
@@ -95,12 +95,12 @@ define <4 x float> @signbit_sel_v4f32(<4
 
 define <2 x double> @signbit_sel_v2f64(<2 x double> %x, <2 x double> %y, <2 x i64> %mask) {
 ; AVX12F-LABEL: signbit_sel_v2f64:
-; AVX12F:       # BB#0:
+; AVX12F:       # %bb.0:
 ; AVX12F-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX12F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: signbit_sel_v2f64:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpcmpgtq %xmm2, %xmm3, %k1
 ; AVX512VL-NEXT:    vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
@@ -114,7 +114,7 @@ define <2 x double> @signbit_sel_v2f64(<
 
 define <32 x i8> @signbit_sel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %mask) {
 ; AVX1-LABEL: signbit_sel_v32i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
 ; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX1-NEXT:    vpcmpgtb %xmm3, %xmm4, %xmm3
@@ -126,12 +126,12 @@ define <32 x i8> @signbit_sel_v32i8(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: signbit_sel_v32i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: signbit_sel_v32i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512-NEXT:    vpcmpgtb %ymm2, %ymm3, %ymm2
 ; AVX512-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -145,7 +145,7 @@ define <32 x i8> @signbit_sel_v32i8(<32
 
 define <16 x i16> @signbit_sel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %mask) {
 ; AVX1-LABEL: signbit_sel_v16i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
 ; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; AVX1-NEXT:    vpcmpgtw %xmm3, %xmm4, %xmm3
@@ -157,14 +157,14 @@ define <16 x i16> @signbit_sel_v16i16(<1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: signbit_sel_v16i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX2-NEXT:    vpcmpgtw %ymm2, %ymm3, %ymm2
 ; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: signbit_sel_v16i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512-NEXT:    vpcmpgtw %ymm2, %ymm3, %ymm2
 ; AVX512-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -176,12 +176,12 @@ define <16 x i16> @signbit_sel_v16i16(<1
 
 define <8 x i32> @signbit_sel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %mask) {
 ; AVX12-LABEL: signbit_sel_v8i32:
-; AVX12:       # BB#0:
+; AVX12:       # %bb.0:
 ; AVX12-NEXT:    vblendvps %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX12-NEXT:    retq
 ;
 ; AVX512F-LABEL: signbit_sel_v8i32:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
 ; AVX512F-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512F-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
@@ -192,7 +192,7 @@ define <8 x i32> @signbit_sel_v8i32(<8 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: signbit_sel_v8i32:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpcmpgtd %ymm2, %ymm3, %k1
 ; AVX512VL-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
@@ -204,12 +204,12 @@ define <8 x i32> @signbit_sel_v8i32(<8 x
 
 define <4 x i64> @signbit_sel_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %mask) {
 ; AVX12F-LABEL: signbit_sel_v4i64:
-; AVX12F:       # BB#0:
+; AVX12F:       # %bb.0:
 ; AVX12F-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX12F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: signbit_sel_v4i64:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpcmpgtq %ymm2, %ymm3, %k1
 ; AVX512VL-NEXT:    vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
@@ -221,12 +221,12 @@ define <4 x i64> @signbit_sel_v4i64(<4 x
 
 define <4 x double> @signbit_sel_v4f64(<4 x double> %x, <4 x double> %y, <4 x i64> %mask) {
 ; AVX12F-LABEL: signbit_sel_v4f64:
-; AVX12F:       # BB#0:
+; AVX12F:       # %bb.0:
 ; AVX12F-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX12F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: signbit_sel_v4f64:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpcmpgtq %ymm2, %ymm3, %k1
 ; AVX512VL-NEXT:    vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
@@ -240,7 +240,7 @@ define <4 x double> @signbit_sel_v4f64(<
 
 define <4 x double> @signbit_sel_v4f64_small_mask(<4 x double> %x, <4 x double> %y, <4 x i32> %mask) {
 ; AVX1-LABEL: signbit_sel_v4f64_small_mask:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovsxdq %xmm2, %xmm3
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
 ; AVX1-NEXT:    vpmovsxdq %xmm2, %xmm2
@@ -249,19 +249,19 @@ define <4 x double> @signbit_sel_v4f64_s
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: signbit_sel_v4f64_small_mask:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmovsxdq %xmm2, %ymm2
 ; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: signbit_sel_v4f64_small_mask:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxdq %xmm2, %ymm2
 ; AVX512F-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: signbit_sel_v4f64_small_mask:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpcmpgtd %xmm2, %xmm3, %k1
 ; AVX512VL-NEXT:    vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
@@ -275,13 +275,13 @@ define <4 x double> @signbit_sel_v4f64_s
 
 define <8 x double> @signbit_sel_v8f64(<8 x double> %x, <8 x double> %y, <8 x i64> %mask) {
 ; AVX12-LABEL: signbit_sel_v8f64:
-; AVX12:       # BB#0:
+; AVX12:       # %bb.0:
 ; AVX12-NEXT:    vblendvpd %ymm4, %ymm0, %ymm2, %ymm0
 ; AVX12-NEXT:    vblendvpd %ymm5, %ymm1, %ymm3, %ymm1
 ; AVX12-NEXT:    retq
 ;
 ; AVX512-LABEL: signbit_sel_v8f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX512-NEXT:    vpcmpgtq %zmm2, %zmm3, %k1
 ; AVX512-NEXT:    vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
@@ -297,14 +297,14 @@ define <8 x double> @signbit_sel_v8f64(<
 
 define <4 x float> @signbit_sel_v4f32_fcmp(<4 x float> %x, <4 x float> %y, <4 x float> %mask) #0 {
 ; AVX12F-LABEL: signbit_sel_v4f32_fcmp:
-; AVX12F:       # BB#0:
+; AVX12F:       # %bb.0:
 ; AVX12F-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; AVX12F-NEXT:    vcmpltps %xmm2, %xmm0, %xmm2
 ; AVX12F-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX12F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: signbit_sel_v4f32_fcmp:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vcmpltps %xmm2, %xmm0, %k1
 ; AVX512VL-NEXT:    vblendmps %xmm0, %xmm1, %xmm0 {%k1}

Modified: llvm/trunk/test/CodeGen/X86/vselect-zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect-zero.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect-zero.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect-zero.ll Mon Dec  4 09:18:51 2017
@@ -8,14 +8,14 @@
 
 define <4 x i32> @test1(<4 x i1> %cond, <4 x i32> %x) {
 ; SSE-LABEL: test1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $31, %xmm0
 ; SSE-NEXT:    psrad $31, %xmm0
 ; SSE-NEXT:    pandn %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $31, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; AVX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
@@ -26,13 +26,13 @@ define <4 x i32> @test1(<4 x i1> %cond,
 
 define <4 x i32> @test2(<4 x float> %a, <4 x float> %b, <4 x i32> %x) {
 ; SSE-LABEL: test2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpneqps %xmm1, %xmm0
 ; SSE-NEXT:    andps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpneqps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -43,13 +43,13 @@ define <4 x i32> @test2(<4 x float> %a,
 
 define float @fsel(float %a, float %b, float %x) {
 ; SSE-LABEL: fsel:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpeqss %xmm1, %xmm0
 ; SSE-NEXT:    andnps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: fsel:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpeqss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vandnps %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vselect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect.ll Mon Dec  4 09:18:51 2017
@@ -9,18 +9,18 @@
 
 define <4 x float> @test1(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: test1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test1:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; AVX-NEXT:    retq
   %1 = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x float> %a, <4 x float> %b
@@ -29,18 +29,18 @@ define <4 x float> @test1(<4 x float> %a
 
 define <4 x float> @test2(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: test2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSE2-NEXT:    movapd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test2:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; AVX-NEXT:    retq
   %1 = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
@@ -49,17 +49,17 @@ define <4 x float> @test2(<4 x float> %a
 
 define <4 x float> @test3(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: test3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test3:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
@@ -68,12 +68,12 @@ define <4 x float> @test3(<4 x float> %a
 
 define <4 x float> @test4(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
@@ -82,11 +82,11 @@ define <4 x float> @test4(<4 x float> %a
 
 define <4 x float> @test5(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test5:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test5:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
   ret <4 x float> %1
@@ -94,11 +94,11 @@ define <4 x float> @test5(<4 x float> %a
 
 define <8 x i16> @test6(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test6:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test6:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = select <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <8 x i16> %a, <8 x i16> %a
   ret <8 x i16> %1
@@ -106,23 +106,23 @@ define <8 x i16> @test6(<8 x i16> %a, <8
 
 define <8 x i16> @test7(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: test7:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSE2-NEXT:    movapd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test7:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test7:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test7:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
 ; AVX2-NEXT:    retq
   %1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %a, <8 x i16> %b
@@ -131,22 +131,22 @@ define <8 x i16> @test7(<8 x i16> %a, <8
 
 define <8 x i16> @test8(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: test8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test8:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
 ; AVX2-NEXT:    retq
   %1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <8 x i16> %a, <8 x i16> %b
@@ -155,12 +155,12 @@ define <8 x i16> @test8(<8 x i16> %a, <8
 
 define <8 x i16> @test9(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test9:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test9:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %a, <8 x i16> %b
@@ -169,11 +169,11 @@ define <8 x i16> @test9(<8 x i16> %a, <8
 
 define <8 x i16> @test10(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test10:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test10:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> %a, <8 x i16> %b
   ret <8 x i16> %1
@@ -181,7 +181,7 @@ define <8 x i16> @test10(<8 x i16> %a, <
 
 define <8 x i16> @test11(<8 x i16> %a, <8 x i16> %b) {
 ; SSE2-LABEL: test11:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,65535,65535]
 ; SSE2-NEXT:    andps %xmm2, %xmm0
 ; SSE2-NEXT:    andnps %xmm1, %xmm2
@@ -189,12 +189,12 @@ define <8 x i16> @test11(<8 x i16> %a, <
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test11:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test11:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
 ; AVX-NEXT:    retq
   %1 = select <8 x i1> <i1 false, i1 true, i1 true, i1 false, i1 undef, i1 true, i1 true, i1 undef>, <8 x i16> %a, <8 x i16> %b
@@ -203,12 +203,12 @@ define <8 x i16> @test11(<8 x i16> %a, <
 
 define <8 x i16> @test12(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test12:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test12:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = select <8 x i1> <i1 false, i1 false, i1 undef, i1 false, i1 false, i1 false, i1 false, i1 undef>, <8 x i16> %a, <8 x i16> %b
@@ -217,12 +217,12 @@ define <8 x i16> @test12(<8 x i16> %a, <
 
 define <8 x i16> @test13(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test13:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test13:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = select <8 x i1> <i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>, <8 x i16> %a, <8 x i16> %b
@@ -232,11 +232,11 @@ define <8 x i16> @test13(<8 x i16> %a, <
 ; Fold (vselect (build_vector AllOnes), N1, N2) -> N1
 define <4 x float> @test14(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test14:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test14:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = select <4 x i1> <i1 true, i1 undef, i1 true, i1 undef>, <4 x float> %a, <4 x float> %b
   ret <4 x float> %1
@@ -244,11 +244,11 @@ define <4 x float> @test14(<4 x float> %
 
 define <8 x i16> @test15(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test15:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test15:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 true, i1 true, i1 undef>, <8 x i16> %a, <8 x i16> %b
   ret <8 x i16> %1
@@ -257,12 +257,12 @@ define <8 x i16> @test15(<8 x i16> %a, <
 ; Fold (vselect (build_vector AllZeros), N1, N2) -> N2
 define <4 x float> @test16(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = select <4 x i1> <i1 false, i1 undef, i1 false, i1 undef>, <4 x float> %a, <4 x float> %b
@@ -271,12 +271,12 @@ define <4 x float> @test16(<4 x float> %
 
 define <8 x i16> @test17(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: test17:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test17:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 undef, i1 undef, i1 false, i1 false, i1 undef>, <8 x i16> %a, <8 x i16> %b
@@ -285,17 +285,17 @@ define <8 x i16> @test17(<8 x i16> %a, <
 
 define <4 x float> @test18(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: test18:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test18:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test18:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; AVX-NEXT:    retq
   %1 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
@@ -304,22 +304,22 @@ define <4 x float> @test18(<4 x float> %
 
 define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test19:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test19:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test19:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test19:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; AVX2-NEXT:    retq
   %1 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> %a, <4 x i32> %b
@@ -328,17 +328,17 @@ define <4 x i32> @test19(<4 x i32> %a, <
 
 define <2 x double> @test20(<2 x double> %a, <2 x double> %b) {
 ; SSE2-LABEL: test20:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test20:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test20:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %b
@@ -347,22 +347,22 @@ define <2 x double> @test20(<2 x double>
 
 define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test21:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test21:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test21:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test21:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
 ; AVX2-NEXT:    retq
   %1 = select <2 x i1> <i1 false, i1 true>, <2 x i64> %a, <2 x i64> %b
@@ -371,18 +371,18 @@ define <2 x i64> @test21(<2 x i64> %a, <
 
 define <4 x float> @test22(<4 x float> %a, <4 x float> %b) {
 ; SSE2-LABEL: test22:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test22:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test22:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; AVX-NEXT:    retq
   %1 = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
@@ -391,23 +391,23 @@ define <4 x float> @test22(<4 x float> %
 
 define <4 x i32> @test23(<4 x i32> %a, <4 x i32> %b) {
 ; SSE2-LABEL: test23:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test23:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test23:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test23:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; AVX2-NEXT:    retq
   %1 = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %a, <4 x i32> %b
@@ -416,18 +416,18 @@ define <4 x i32> @test23(<4 x i32> %a, <
 
 define <2 x double> @test24(<2 x double> %a, <2 x double> %b) {
 ; SSE2-LABEL: test24:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSE2-NEXT:    movapd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test24:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test24:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; AVX-NEXT:    retq
   %1 = select <2 x i1> <i1 true, i1 false>, <2 x double> %a, <2 x double> %b
@@ -436,23 +436,23 @@ define <2 x double> @test24(<2 x double>
 
 define <2 x i64> @test25(<2 x i64> %a, <2 x i64> %b) {
 ; SSE2-LABEL: test25:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSE2-NEXT:    movapd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test25:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test25:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test25:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
 ; AVX2-NEXT:    retq
   %1 = select <2 x i1> <i1 true, i1 false>, <2 x i64> %a, <2 x i64> %b
@@ -461,14 +461,14 @@ define <2 x i64> @test25(<2 x i64> %a, <
 
 define <4 x float> @select_of_shuffles_0(<2 x float> %a0, <2 x float> %b0, <2 x float> %a1, <2 x float> %b1) {
 ; SSE-LABEL: select_of_shuffles_0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
 ; SSE-NEXT:    subps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_of_shuffles_0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
 ; AVX-NEXT:    vsubps %xmm1, %xmm0, %xmm0
@@ -486,7 +486,7 @@ define <4 x float> @select_of_shuffles_0
 ; PR20677
 define <16 x double> @select_illegal(<16 x double> %a, <16 x double> %b) {
 ; SSE-LABEL: select_illegal:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm4
 ; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm5
 ; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm6
@@ -503,7 +503,7 @@ define <16 x double> @select_illegal(<16
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_illegal:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %ymm6, %ymm2
 ; AVX-NEXT:    vmovaps %ymm7, %ymm3
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vshift-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vshift-1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vshift-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vshift-1.ll Mon Dec  4 09:18:51 2017
@@ -7,14 +7,14 @@
 
 define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
 ; X32-LABEL: shift1a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    psllq $32, %xmm0
 ; X32-NEXT:    movdqa %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift1a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psllq $32, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -26,7 +26,7 @@ entry:
 
 define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
 ; X32-LABEL: shift1b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
@@ -35,7 +35,7 @@ define void @shift1b(<2 x i64> %val, <2
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift1b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rsi, %xmm1
 ; X64-NEXT:    psllq %xmm1, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
@@ -51,14 +51,14 @@ entry:
 
 define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
 ; X32-LABEL: shift2a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    pslld $5, %xmm0
 ; X32-NEXT:    movdqa %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift2a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pslld $5, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -70,7 +70,7 @@ entry:
 
 define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
 ; X32-LABEL: shift2b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    pslld %xmm1, %xmm0
@@ -78,7 +78,7 @@ define void @shift2b(<4 x i32> %val, <4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift2b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movd %esi, %xmm1
 ; X64-NEXT:    pslld %xmm1, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
@@ -95,14 +95,14 @@ entry:
 
 define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
 ; X32-LABEL: shift3a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    psllw $5, %xmm0
 ; X32-NEXT:    movdqa %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift3a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psllw $5, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -115,7 +115,7 @@ entry:
 ; Make sure the shift amount is properly zero extended.
 define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
 ; X32-LABEL: shift3b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movd %ecx, %xmm1
@@ -124,7 +124,7 @@ define void @shift3b(<8 x i16> %val, <8
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift3b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movzwl %si, %eax
 ; X64-NEXT:    movd %eax, %xmm1
 ; X64-NEXT:    psllw %xmm1, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/vshift-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vshift-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vshift-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vshift-2.ll Mon Dec  4 09:18:51 2017
@@ -7,14 +7,14 @@
 
 define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
 ; X32-LABEL: shift1a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    psrlq $32, %xmm0
 ; X32-NEXT:    movdqa %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift1a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psrlq $32, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -26,7 +26,7 @@ entry:
 
 define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
 ; X32-LABEL: shift1b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
@@ -35,7 +35,7 @@ define void @shift1b(<2 x i64> %val, <2
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift1b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rsi, %xmm1
 ; X64-NEXT:    psrlq %xmm1, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
@@ -50,14 +50,14 @@ entry:
 
 define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
 ; X32-LABEL: shift2a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    psrld $17, %xmm0
 ; X32-NEXT:    movdqa %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift2a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psrld $17, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -69,7 +69,7 @@ entry:
 
 define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
 ; X32-LABEL: shift2b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    psrld %xmm1, %xmm0
@@ -77,7 +77,7 @@ define void @shift2b(<4 x i32> %val, <4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift2b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movd %esi, %xmm1
 ; X64-NEXT:    psrld %xmm1, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
@@ -95,14 +95,14 @@ entry:
 
 define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
 ; X32-LABEL: shift3a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    psrlw $5, %xmm0
 ; X32-NEXT:    movdqa %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift3a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psrlw $5, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -115,7 +115,7 @@ entry:
 ; properly zero extend the shift amount
 define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
 ; X32-LABEL: shift3b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movd %ecx, %xmm1
@@ -124,7 +124,7 @@ define void @shift3b(<8 x i16> %val, <8
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift3b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movzwl %si, %eax
 ; X64-NEXT:    movd %eax, %xmm1
 ; X64-NEXT:    psrlw %xmm1, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/vshift-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vshift-3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vshift-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vshift-3.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 
 define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
 ; X32-LABEL: shift1a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
 ; X32-NEXT:    psrad $31, %xmm0
@@ -19,7 +19,7 @@ define void @shift1a(<2 x i64> %val, <2
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift1a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
 ; X64-NEXT:    psrad $31, %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
@@ -34,14 +34,14 @@ entry:
 
 define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
 ; X32-LABEL: shift2a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    psrad $5, %xmm0
 ; X32-NEXT:    movdqa %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift2a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psrad $5, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -53,7 +53,7 @@ entry:
 
 define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
 ; X32-LABEL: shift2b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    psrad %xmm1, %xmm0
@@ -61,7 +61,7 @@ define void @shift2b(<4 x i32> %val, <4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift2b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movd %esi, %xmm1
 ; X64-NEXT:    psrad %xmm1, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
@@ -78,14 +78,14 @@ entry:
 
 define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
 ; X32-LABEL: shift3a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    psraw $5, %xmm0
 ; X32-NEXT:    movdqa %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift3a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psraw $5, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -97,7 +97,7 @@ entry:
 
 define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
 ; X32-LABEL: shift3b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movd %ecx, %xmm1
@@ -106,7 +106,7 @@ define void @shift3b(<8 x i16> %val, <8
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift3b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movzwl %si, %eax
 ; X64-NEXT:    movd %eax, %xmm1
 ; X64-NEXT:    psraw %xmm1, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/vshift-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vshift-4.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vshift-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vshift-4.ll Mon Dec  4 09:18:51 2017
@@ -7,14 +7,14 @@
 
 define void @shift1a(<2 x i64> %val, <2 x i64>* %dst, <2 x i64> %sh) nounwind {
 ; X32-LABEL: shift1a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    psllq %xmm1, %xmm0
 ; X32-NEXT:    movdqa %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift1a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psllq %xmm1, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -28,7 +28,7 @@ entry:
 ; shift1b can't use a packed shift but can shift lanes separately and shuffle back together
 define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, <2 x i64> %sh) nounwind {
 ; X32-LABEL: shift1b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movdqa %xmm0, %xmm2
 ; X32-NEXT:    psllq %xmm1, %xmm2
@@ -39,7 +39,7 @@ define void @shift1b(<2 x i64> %val, <2
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift1b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movdqa %xmm0, %xmm2
 ; X64-NEXT:    psllq %xmm1, %xmm2
 ; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -56,7 +56,7 @@ entry:
 
 define void @shift2a(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
 ; X32-LABEL: shift2a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; X32-NEXT:    xorps %xmm2, %xmm2
@@ -66,7 +66,7 @@ define void @shift2a(<4 x i32> %val, <4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift2a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; X64-NEXT:    xorps %xmm2, %xmm2
 ; X64-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
@@ -82,7 +82,7 @@ entry:
 
 define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
 ; X32-LABEL: shift2b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; X32-NEXT:    xorps %xmm2, %xmm2
@@ -92,7 +92,7 @@ define void @shift2b(<4 x i32> %val, <4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift2b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; X64-NEXT:    xorps %xmm2, %xmm2
 ; X64-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
@@ -108,7 +108,7 @@ entry:
 
 define void @shift2c(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
 ; X32-LABEL: shift2c:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; X32-NEXT:    xorps %xmm2, %xmm2
@@ -118,7 +118,7 @@ define void @shift2c(<4 x i32> %val, <4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift2c:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; X64-NEXT:    xorps %xmm2, %xmm2
 ; X64-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
@@ -134,7 +134,7 @@ entry:
 
 define void @shift3a(<8 x i16> %val, <8 x i16>* %dst, <8 x i16> %amt) nounwind {
 ; X32-LABEL: shift3a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    pextrw $6, %xmm1, %ecx
 ; X32-NEXT:    movd %ecx, %xmm1
@@ -143,7 +143,7 @@ define void @shift3a(<8 x i16> %val, <8
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift3a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pextrw $6, %xmm1, %eax
 ; X64-NEXT:    movd %eax, %xmm1
 ; X64-NEXT:    psllw %xmm1, %xmm0
@@ -158,7 +158,7 @@ entry:
 
 define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
 ; X32-LABEL: shift3b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movd %ecx, %xmm1
@@ -167,7 +167,7 @@ define void @shift3b(<8 x i16> %val, <8
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift3b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movzwl %si, %eax
 ; X64-NEXT:    movd %eax, %xmm1
 ; X64-NEXT:    psllw %xmm1, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/vshift-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vshift-5.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vshift-5.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vshift-5.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @shift5a(<4 x i32> %val, <4 x i32>* %dst, i32* %pamt) nounwind {
 ; X32-LABEL: shift5a:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -15,7 +15,7 @@ define void @shift5a(<4 x i32> %val, <4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift5a:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X64-NEXT:    pslld %xmm1, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
@@ -32,7 +32,7 @@ entry:
 
 define void @shift5b(<4 x i32> %val, <4 x i32>* %dst, i32* %pamt) nounwind {
 ; X32-LABEL: shift5b:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -41,7 +41,7 @@ define void @shift5b(<4 x i32> %val, <4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift5b:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X64-NEXT:    psrad %xmm1, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
@@ -58,7 +58,7 @@ entry:
 
 define void @shift5c(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
 ; X32-LABEL: shift5c:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    pslld %xmm1, %xmm0
@@ -66,7 +66,7 @@ define void @shift5c(<4 x i32> %val, <4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift5c:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movd %esi, %xmm1
 ; X64-NEXT:    pslld %xmm1, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)
@@ -82,7 +82,7 @@ entry:
 
 define void @shift5d(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
 ; X32-LABEL: shift5d:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    psrad %xmm1, %xmm0
@@ -90,7 +90,7 @@ define void @shift5d(<4 x i32> %val, <4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: shift5d:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movd %esi, %xmm1
 ; X64-NEXT:    psrad %xmm1, %xmm0
 ; X64-NEXT:    movdqa %xmm0, (%rdi)

Modified: llvm/trunk/test/CodeGen/X86/vshift-6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vshift-6.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vshift-6.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vshift-6.ll Mon Dec  4 09:18:51 2017
@@ -26,7 +26,7 @@
 
 define <16 x i8> @do_not_crash(i8*, i32*, i64*, i32, i64, i8) {
 ; X32-LABEL: do_not_crash:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movb %al, (%ecx)
@@ -63,7 +63,7 @@ define <16 x i8> @do_not_crash(i8*, i32*
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: do_not_crash:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movb %r9b, (%rdi)
 ; X64-NEXT:    movd %r9d, %xmm0
 ; X64-NEXT:    psllq $56, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/vsplit-and.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vsplit-and.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vsplit-and.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vsplit-and.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @t0(<2 x i64>* %dst, <2 x i64> %src1, <2 x i64> %src2) nounwind readonly {
 ; CHECK-LABEL: t0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm2, %xmm2
 ; CHECK-NEXT:    pcmpeqq %xmm2, %xmm0
 ; CHECK-NEXT:    pcmpeqq %xmm2, %xmm1
@@ -22,7 +22,7 @@ define void @t0(<2 x i64>* %dst, <2 x i6
 
 define void @t2(<3 x i64>* %dst, <3 x i64> %src1, <3 x i64> %src2) nounwind readonly {
 ; CHECK-LABEL: t2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %r9, %xmm1
 ; CHECK-NEXT:    movq %r8, %xmm0
 ; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]

Modified: llvm/trunk/test/CodeGen/X86/vzero-excess.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vzero-excess.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vzero-excess.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vzero-excess.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define <4 x float> @zeroupper_v4f32(<8 x float> *%x, <8 x float> %y) nounwind {
 ; CHECK-LABEL: zeroupper_v4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    subq $48, %rsp
 ; CHECK-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
@@ -33,7 +33,7 @@ define <4 x float> @zeroupper_v4f32(<8 x
 
 define <8 x float> @zeroupper_v8f32(<8 x float> %x) nounwind {
 ; CHECK-LABEL: zeroupper_v8f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subq $56, %rsp
 ; CHECK-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
 ; CHECK-NEXT:    vzeroupper
@@ -48,7 +48,7 @@ define <8 x float> @zeroupper_v8f32(<8 x
 
 define <4 x float> @zeroall_v4f32(<8 x float> *%x, <8 x float> %y) nounwind {
 ; CHECK-LABEL: zeroall_v4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    subq $48, %rsp
 ; CHECK-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
@@ -75,7 +75,7 @@ define <4 x float> @zeroall_v4f32(<8 x f
 
 define <8 x float> @zeroall_v8f32(<8 x float> %x) nounwind {
 ; CHECK-LABEL: zeroall_v8f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subq $56, %rsp
 ; CHECK-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
 ; CHECK-NEXT:    vzeroall

Modified: llvm/trunk/test/CodeGen/X86/wide-fma-contraction.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/wide-fma-contraction.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/wide-fma-contraction.ll (original)
+++ llvm/trunk/test/CodeGen/X86/wide-fma-contraction.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 ; CHECK-NOFMA-LABEL: fmafunc
 define <16 x float> @fmafunc(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
 ; CHECK-LABEL: fmafunc:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushl %ebp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NEXT:    .cfi_offset %ebp, -8
@@ -21,7 +21,7 @@ define <16 x float> @fmafunc(<16 x float
 ; CHECK-NEXT:    retl
 ;
 ; CHECK-NOFMA-LABEL: fmafunc:
-; CHECK-NOFMA:       ## BB#0:
+; CHECK-NOFMA:       ## %bb.0:
 ; CHECK-NOFMA-NEXT:    pushl %ebp
 ; CHECK-NOFMA-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NOFMA-NEXT:    .cfi_offset %ebp, -8

Modified: llvm/trunk/test/CodeGen/X86/wide-integer-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/wide-integer-cmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/wide-integer-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/wide-integer-cmp.ll Mon Dec  4 09:18:51 2017
@@ -3,14 +3,14 @@
 
 define i32 @branch_eq(i64 %a, i64 %b) {
 ; CHECK-LABEL: branch_eq:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    xorl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    orl %ecx, %eax
 ; CHECK-NEXT:    jne .LBB0_2
-; CHECK-NEXT:  # BB#1: # %bb1
+; CHECK-NEXT:  # %bb.1: # %bb1
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    retl
 ; CHECK-NEXT:  .LBB0_2: # %bb2
@@ -27,13 +27,13 @@ bb2:
 
 define i32 @branch_slt(i64 %a, i64 %b) {
 ; CHECK-LABEL: branch_slt:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    jge .LBB1_2
-; CHECK-NEXT:  # BB#1: # %bb1
+; CHECK-NEXT:  # %bb.1: # %bb1
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    retl
 ; CHECK-NEXT:  .LBB1_2: # %bb2
@@ -50,13 +50,13 @@ bb2:
 
 define i32 @branch_ule(i64 %a, i64 %b) {
 ; CHECK-LABEL: branch_ule:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    jb .LBB2_2
-; CHECK-NEXT:  # BB#1: # %bb1
+; CHECK-NEXT:  # %bb.1: # %bb1
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    retl
 ; CHECK-NEXT:  .LBB2_2: # %bb2
@@ -73,7 +73,7 @@ bb2:
 
 define i32 @set_gt(i64 %a, i64 %b) {
 ; CHECK-LABEL: set_gt:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
@@ -89,7 +89,7 @@ entry:
 
 define i32 @test_wide(i128 %a, i128 %b) {
 ; CHECK-LABEL: test_wide:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushl %esi
 ; CHECK-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NEXT:    .cfi_offset %esi, -8
@@ -102,7 +102,7 @@ define i32 @test_wide(i128 %a, i128 %b)
 ; CHECK-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    jge .LBB4_2
-; CHECK-NEXT:  # BB#1: # %bb1
+; CHECK-NEXT:  # %bb.1: # %bb1
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    popl %esi
 ; CHECK-NEXT:    retl
@@ -123,11 +123,11 @@ bb2:
 ; sure the code can handle that.
 define i32 @test_carry_false(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_carry_false:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    jge .LBB5_2
-; CHECK-NEXT:  # BB#1: # %bb1
+; CHECK-NEXT:  # %bb.1: # %bb1
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    retl
 ; CHECK-NEXT:  .LBB5_2: # %bb2

Modified: llvm/trunk/test/CodeGen/X86/widen_arith-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_arith-1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_arith-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_arith-1.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @update(<3 x i8>* %dst, <3 x i8>* %src, i32 %n) nounwind {
 ; CHECK-LABEL: update:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subl $12, %esp
 ; CHECK-NEXT:    movl $0, (%esp)
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
@@ -26,7 +26,7 @@ define void @update(<3 x i8>* %dst, <3 x
 ; CHECK-NEXT:    movl (%esp), %eax
 ; CHECK-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    jl .LBB0_2
-; CHECK-NEXT:  # BB#3: # %afterfor
+; CHECK-NEXT:  # %bb.3: # %afterfor
 ; CHECK-NEXT:    addl $12, %esp
 ; CHECK-NEXT:    retl
 entry:

Modified: llvm/trunk/test/CodeGen/X86/widen_arith-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_arith-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_arith-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_arith-2.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
 ; CHECK-LABEL: update:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subl $12, %esp
 ; CHECK-NEXT:    movl $0, (%esp)
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
@@ -32,7 +32,7 @@ define void @update(i64* %dst_i, i64* %s
 ; CHECK-NEXT:    movl (%esp), %eax
 ; CHECK-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    jl .LBB0_2
-; CHECK-NEXT:  # BB#3: # %afterfor
+; CHECK-NEXT:  # %bb.3: # %afterfor
 ; CHECK-NEXT:    addl $12, %esp
 ; CHECK-NEXT:    retl
 entry:

Modified: llvm/trunk/test/CodeGen/X86/widen_arith-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_arith-3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_arith-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_arith-3.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define void @update(<3 x i16>* %dst, <3 x i16>* %src, i32 %n) nounwind {
 ; CHECK-LABEL: update:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushl %ebp
 ; CHECK-NEXT:    movl %esp, %ebp
 ; CHECK-NEXT:    andl $-8, %esp
@@ -39,7 +39,7 @@ define void @update(<3 x i16>* %dst, <3
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    cmpl 16(%ebp), %eax
 ; CHECK-NEXT:    jl .LBB0_2
-; CHECK-NEXT:  # BB#3: # %afterfor
+; CHECK-NEXT:  # %bb.3: # %afterfor
 ; CHECK-NEXT:    movl %ebp, %esp
 ; CHECK-NEXT:    popl %ebp
 ; CHECK-NEXT:    retl

Modified: llvm/trunk/test/CodeGen/X86/widen_arith-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_arith-4.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_arith-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_arith-4.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define void @update(<5 x i16>* %dst, <5 x i16>* %src, i32 %n) nounwind {
 ; CHECK-LABEL: update:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq %rdi, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movq %rsi, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movl %edx, -{{[0-9]+}}(%rsp)
@@ -34,7 +34,7 @@ define void @update(<5 x i16>* %dst, <5
 ; CHECK-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
 ; CHECK-NEXT:    cmpl -{{[0-9]+}}(%rsp), %eax
 ; CHECK-NEXT:    jl .LBB0_2
-; CHECK-NEXT:  # BB#3: # %afterfor
+; CHECK-NEXT:  # %bb.3: # %afterfor
 ; CHECK-NEXT:    retq
 entry:
 	%dst.addr = alloca <5 x i16>*

Modified: llvm/trunk/test/CodeGen/X86/widen_arith-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_arith-5.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_arith-5.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_arith-5.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define void @update(<3 x i32>* %dst, <3 x i32>* %src, i32 %n) nounwind {
 ; CHECK-LABEL: update:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq %rdi, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movq %rsi, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movl %edx, -{{[0-9]+}}(%rsp)
@@ -33,7 +33,7 @@ define void @update(<3 x i32>* %dst, <3
 ; CHECK-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
 ; CHECK-NEXT:    cmpl -{{[0-9]+}}(%rsp), %eax
 ; CHECK-NEXT:    jl .LBB0_2
-; CHECK-NEXT:  # BB#3: # %afterfor
+; CHECK-NEXT:  # %bb.3: # %afterfor
 ; CHECK-NEXT:    retq
 entry:
 	%dst.addr = alloca <3 x i32>*

Modified: llvm/trunk/test/CodeGen/X86/widen_arith-6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_arith-6.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_arith-6.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_arith-6.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define void @update(<3 x float>* %dst, <3 x float>* %src, i32 %n) nounwind {
 ; CHECK-LABEL: update:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushl %ebp
 ; CHECK-NEXT:    movl %esp, %ebp
 ; CHECK-NEXT:    andl $-16, %esp
@@ -35,7 +35,7 @@ define void @update(<3 x float>* %dst, <
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    cmpl 16(%ebp), %eax
 ; CHECK-NEXT:    jl .LBB0_2
-; CHECK-NEXT:  # BB#3: # %afterfor
+; CHECK-NEXT:  # %bb.3: # %afterfor
 ; CHECK-NEXT:    movl %ebp, %esp
 ; CHECK-NEXT:    popl %ebp
 ; CHECK-NEXT:    retl

Modified: llvm/trunk/test/CodeGen/X86/widen_bitops-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_bitops-0.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_bitops-0.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_bitops-0.ll Mon Dec  4 09:18:51 2017
@@ -8,13 +8,13 @@
 
 define i24 @and_i24_as_v3i8(i24 %a, i24 %b) nounwind {
 ; X32-SSE-LABEL: and_i24_as_v3i8:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    andl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: and_i24_as_v3i8:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    andl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -27,13 +27,13 @@ define i24 @and_i24_as_v3i8(i24 %a, i24
 
 define i24 @xor_i24_as_v3i8(i24 %a, i24 %b) nounwind {
 ; X32-SSE-LABEL: xor_i24_as_v3i8:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    xorl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: xor_i24_as_v3i8:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    xorl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -46,13 +46,13 @@ define i24 @xor_i24_as_v3i8(i24 %a, i24
 
 define i24 @or_i24_as_v3i8(i24 %a, i24 %b) nounwind {
 ; X32-SSE-LABEL: or_i24_as_v3i8:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: or_i24_as_v3i8:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    orl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -69,13 +69,13 @@ define i24 @or_i24_as_v3i8(i24 %a, i24 %
 
 define i24 @and_i24_as_v8i3(i24 %a, i24 %b) nounwind {
 ; X32-SSE-LABEL: and_i24_as_v8i3:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    andl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: and_i24_as_v8i3:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    andl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -88,13 +88,13 @@ define i24 @and_i24_as_v8i3(i24 %a, i24
 
 define i24 @xor_i24_as_v8i3(i24 %a, i24 %b) nounwind {
 ; X32-SSE-LABEL: xor_i24_as_v8i3:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    xorl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: xor_i24_as_v8i3:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    xorl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -107,13 +107,13 @@ define i24 @xor_i24_as_v8i3(i24 %a, i24
 
 define i24 @or_i24_as_v8i3(i24 %a, i24 %b) nounwind {
 ; X32-SSE-LABEL: or_i24_as_v8i3:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: or_i24_as_v8i3:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    orl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -130,7 +130,7 @@ define i24 @or_i24_as_v8i3(i24 %a, i24 %
 
 define <3 x i8> @and_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
 ; X32-SSE-LABEL: and_v3i8_as_i24:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE-NEXT:    pinsrb $4, {{[0-9]+}}(%esp), %xmm0
 ; X32-SSE-NEXT:    pinsrb $8, {{[0-9]+}}(%esp), %xmm0
@@ -147,7 +147,7 @@ define <3 x i8> @and_v3i8_as_i24(<3 x i8
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: and_v3i8_as_i24:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    movd %ecx, %xmm0
 ; X64-SSE-NEXT:    pinsrd $1, %r8d, %xmm0
 ; X64-SSE-NEXT:    pinsrd $2, %r9d, %xmm0
@@ -171,7 +171,7 @@ define <3 x i8> @and_v3i8_as_i24(<3 x i8
 
 define <3 x i8> @xor_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
 ; X32-SSE-LABEL: xor_v3i8_as_i24:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE-NEXT:    pinsrb $4, {{[0-9]+}}(%esp), %xmm0
 ; X32-SSE-NEXT:    pinsrb $8, {{[0-9]+}}(%esp), %xmm0
@@ -188,7 +188,7 @@ define <3 x i8> @xor_v3i8_as_i24(<3 x i8
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: xor_v3i8_as_i24:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    movd %ecx, %xmm0
 ; X64-SSE-NEXT:    pinsrd $1, %r8d, %xmm0
 ; X64-SSE-NEXT:    pinsrd $2, %r9d, %xmm0
@@ -212,7 +212,7 @@ define <3 x i8> @xor_v3i8_as_i24(<3 x i8
 
 define <3 x i8> @or_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
 ; X32-SSE-LABEL: or_v3i8_as_i24:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE-NEXT:    pinsrb $4, {{[0-9]+}}(%esp), %xmm0
 ; X32-SSE-NEXT:    pinsrb $8, {{[0-9]+}}(%esp), %xmm0
@@ -229,7 +229,7 @@ define <3 x i8> @or_v3i8_as_i24(<3 x i8>
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: or_v3i8_as_i24:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    movd %ecx, %xmm0
 ; X64-SSE-NEXT:    pinsrd $1, %r8d, %xmm0
 ; X64-SSE-NEXT:    pinsrd $2, %r9d, %xmm0
@@ -257,12 +257,12 @@ define <3 x i8> @or_v3i8_as_i24(<3 x i8>
 
 define <8 x i3> @and_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
 ; X32-SSE-LABEL: and_v8i3_as_i24:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    andps %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: and_v8i3_as_i24:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    andps %xmm1, %xmm0
 ; X64-SSE-NEXT:    retq
   %1 = bitcast <8 x i3> %a to i24
@@ -274,12 +274,12 @@ define <8 x i3> @and_v8i3_as_i24(<8 x i3
 
 define <8 x i3> @xor_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
 ; X32-SSE-LABEL: xor_v8i3_as_i24:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    xorps %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: xor_v8i3_as_i24:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    xorps %xmm1, %xmm0
 ; X64-SSE-NEXT:    retq
   %1 = bitcast <8 x i3> %a to i24
@@ -291,12 +291,12 @@ define <8 x i3> @xor_v8i3_as_i24(<8 x i3
 
 define <8 x i3> @or_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
 ; X32-SSE-LABEL: or_v8i3_as_i24:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    orps %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: or_v8i3_as_i24:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    orps %xmm1, %xmm0
 ; X64-SSE-NEXT:    retq
   %1 = bitcast <8 x i3> %a to i24

Modified: llvm/trunk/test/CodeGen/X86/widen_bitops-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_bitops-1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_bitops-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_bitops-1.ll Mon Dec  4 09:18:51 2017
@@ -8,13 +8,13 @@
 
 define i32 @and_i32_as_v4i8(i32 %a, i32 %b) nounwind {
 ; X32-SSE-LABEL: and_i32_as_v4i8:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    andl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: and_i32_as_v4i8:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    andl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -27,13 +27,13 @@ define i32 @and_i32_as_v4i8(i32 %a, i32
 
 define i32 @xor_i32_as_v4i8(i32 %a, i32 %b) nounwind {
 ; X32-SSE-LABEL: xor_i32_as_v4i8:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    xorl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: xor_i32_as_v4i8:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    xorl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -46,13 +46,13 @@ define i32 @xor_i32_as_v4i8(i32 %a, i32
 
 define i32 @or_i32_as_v4i8(i32 %a, i32 %b) nounwind {
 ; X32-SSE-LABEL: or_i32_as_v4i8:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: or_i32_as_v4i8:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    orl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -69,13 +69,13 @@ define i32 @or_i32_as_v4i8(i32 %a, i32 %
 
 define i32 @and_i32_as_v8i4(i32 %a, i32 %b) nounwind {
 ; X32-SSE-LABEL: and_i32_as_v8i4:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    andl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: and_i32_as_v8i4:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    andl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -88,13 +88,13 @@ define i32 @and_i32_as_v8i4(i32 %a, i32
 
 define i32 @xor_i32_as_v8i4(i32 %a, i32 %b) nounwind {
 ; X32-SSE-LABEL: xor_i32_as_v8i4:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    xorl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: xor_i32_as_v8i4:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    xorl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -107,13 +107,13 @@ define i32 @xor_i32_as_v8i4(i32 %a, i32
 
 define i32 @or_i32_as_v8i4(i32 %a, i32 %b) nounwind {
 ; X32-SSE-LABEL: or_i32_as_v8i4:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: or_i32_as_v8i4:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    orl %esi, %edi
 ; X64-SSE-NEXT:    movl %edi, %eax
 ; X64-SSE-NEXT:    retq
@@ -130,12 +130,12 @@ define i32 @or_i32_as_v8i4(i32 %a, i32 %
 
 define <4 x i8> @and_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
 ; X32-SSE-LABEL: and_v4i8_as_i32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    andps %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: and_v4i8_as_i32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    andps %xmm1, %xmm0
 ; X64-SSE-NEXT:    retq
   %1 = bitcast <4 x i8> %a to i32
@@ -147,12 +147,12 @@ define <4 x i8> @and_v4i8_as_i32(<4 x i8
 
 define <4 x i8> @xor_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
 ; X32-SSE-LABEL: xor_v4i8_as_i32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    xorps %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: xor_v4i8_as_i32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    xorps %xmm1, %xmm0
 ; X64-SSE-NEXT:    retq
   %1 = bitcast <4 x i8> %a to i32
@@ -164,12 +164,12 @@ define <4 x i8> @xor_v4i8_as_i32(<4 x i8
 
 define <4 x i8> @or_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
 ; X32-SSE-LABEL: or_v4i8_as_i32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    orps %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: or_v4i8_as_i32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    orps %xmm1, %xmm0
 ; X64-SSE-NEXT:    retq
   %1 = bitcast <4 x i8> %a to i32
@@ -185,12 +185,12 @@ define <4 x i8> @or_v4i8_as_i32(<4 x i8>
 
 define <8 x i4> @and_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
 ; X32-SSE-LABEL: and_v8i4_as_i32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    andps %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: and_v8i4_as_i32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    andps %xmm1, %xmm0
 ; X64-SSE-NEXT:    retq
   %1 = bitcast <8 x i4> %a to i32
@@ -202,12 +202,12 @@ define <8 x i4> @and_v8i4_as_i32(<8 x i4
 
 define <8 x i4> @xor_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
 ; X32-SSE-LABEL: xor_v8i4_as_i32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    xorps %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: xor_v8i4_as_i32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    xorps %xmm1, %xmm0
 ; X64-SSE-NEXT:    retq
   %1 = bitcast <8 x i4> %a to i32
@@ -219,12 +219,12 @@ define <8 x i4> @xor_v8i4_as_i32(<8 x i4
 
 define <8 x i4> @or_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
 ; X32-SSE-LABEL: or_v8i4_as_i32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    orps %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: or_v8i4_as_i32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    orps %xmm1, %xmm0
 ; X64-SSE-NEXT:    retq
   %1 = bitcast <8 x i4> %a to i32

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-1.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define void @convert(<2 x i32>* %dst, <4 x i16>* %src) nounwind {
 ; CHECK-LABEL: convert:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushl %eax
 ; CHECK-NEXT:    movl $0, (%esp)
 ; CHECK-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,1,1]
@@ -33,7 +33,7 @@ define void @convert(<2 x i32>* %dst, <4
 ; CHECK-NEXT:    retl
 ;
 ; ATOM-LABEL: convert:
-; ATOM:       # BB#0: # %entry
+; ATOM:       # %bb.0: # %entry
 ; ATOM-NEXT:    pushl %eax
 ; ATOM-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,1,1]
 ; ATOM-NEXT:    movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-2.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define void @convert(<7 x i32>* %dst, <14 x i16>* %src) nounwind {
 ; CHECK-LABEL: convert:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushl %eax
 ; CHECK-NEXT:    movl $0, (%esp)
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-3.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @convert(<12 x i8>* %dst.addr, <3 x i32> %src) nounwind {
 ; X86-LABEL: convert:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X86-NEXT:    psubd %xmm1, %xmm0
@@ -16,7 +16,7 @@ define void @convert(<12 x i8>* %dst.add
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: convert:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-NEXT:    psubd %xmm1, %xmm0
 ; X64-NEXT:    pextrd $2, %xmm0, 8(%rdi)

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-4.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-4.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
 ; NARROW-LABEL: update:
-; NARROW:       # BB#0: # %entry
+; NARROW:       # %bb.0: # %entry
 ; NARROW-NEXT:    subl $12, %esp
 ; NARROW-NEXT:    movl $0, (%esp)
 ; NARROW-NEXT:    pcmpeqd %xmm0, %xmm0
@@ -35,12 +35,12 @@ define void @update(i64* %dst_i, i64* %s
 ; NARROW-NEXT:    movl (%esp), %eax
 ; NARROW-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
 ; NARROW-NEXT:    jl .LBB0_2
-; NARROW-NEXT:  # BB#3: # %afterfor
+; NARROW-NEXT:  # %bb.3: # %afterfor
 ; NARROW-NEXT:    addl $12, %esp
 ; NARROW-NEXT:    retl
 ;
 ; WIDE-LABEL: update:
-; WIDE:       # BB#0: # %entry
+; WIDE:       # %bb.0: # %entry
 ; WIDE-NEXT:    subl $12, %esp
 ; WIDE-NEXT:    movl $0, (%esp)
 ; WIDE-NEXT:    pcmpeqd %xmm0, %xmm0
@@ -72,7 +72,7 @@ define void @update(i64* %dst_i, i64* %s
 ; WIDE-NEXT:    movl (%esp), %eax
 ; WIDE-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
 ; WIDE-NEXT:    jl .LBB0_2
-; WIDE-NEXT:  # BB#3: # %afterfor
+; WIDE-NEXT:  # %bb.3: # %afterfor
 ; WIDE-NEXT:    addl $12, %esp
 ; WIDE-NEXT:    retl
 entry:

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-5.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-5.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-5.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @convert(<2 x i32>* %dst.addr, i64 %src) nounwind {
 ; X86-LABEL: convert:
-; X86:       ## BB#0: ## %entry
+; X86:       ## %bb.0: ## %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
 ; X86-NEXT:    pxor LCPI0_0, %xmm0
@@ -15,7 +15,7 @@ define void @convert(<2 x i32>* %dst.add
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: convert:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movq %rsi, %xmm0
 ; X64-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; X64-NEXT:    pxor {{.*}}(%rip), %xmm0

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-6.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-6.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-6.ll Mon Dec  4 09:18:51 2017
@@ -6,14 +6,14 @@
 
 define i32 @return_v2hi() nounwind {
 ; X86-LABEL: return_v2hi:
-; X86:       ## BB#0: ## %entry
+; X86:       ## %bb.0: ## %entry
 ; X86-NEXT:    pushl %eax
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    popl %ecx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: return_v2hi:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/widen_compare-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_compare-1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_compare-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_compare-1.ll Mon Dec  4 09:18:51 2017
@@ -6,12 +6,12 @@
 
 define <2 x i16> @compare_v2i64_to_v2i16(<2 x i16>* %src) nounwind {
 ; X86-LABEL: compare_v2i64_to_v2i16:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: compare_v2i64_to_v2i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-NEXT:    retq
   %val = load <2 x i16>, <2 x i16>* %src, align 4

Modified: llvm/trunk/test/CodeGen/X86/widen_conv-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_conv-1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_conv-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_conv-1.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @convert_v2i64_to_v2i32(<2 x i32>* %dst.addr, <2 x i64> %src) nounwind {
 ; X86-LABEL: convert_v2i64_to_v2i32:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    paddd {{\.LCPI.*}}, %xmm0
 ; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -14,7 +14,7 @@ define void @convert_v2i64_to_v2i32(<2 x
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: convert_v2i64_to_v2i32:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-NEXT:    movq %xmm0, (%rdi)
@@ -30,7 +30,7 @@ entry:
 
 define void @convert_v3i32_to_v3i8(<3 x i8>* %dst.addr, <3 x i32>* %src.addr) nounwind {
 ; X86-LABEL: convert_v3i32_to_v3i8:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -44,7 +44,7 @@ define void @convert_v3i32_to_v3i8(<3 x
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: convert_v3i32_to_v3i8:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movdqa (%rsi), %xmm0
 ; X64-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-NEXT:    psubd %xmm1, %xmm0
@@ -64,7 +64,7 @@ entry:
 
 define void @convert_v5i16_to_v5i8(<5 x i8>* %dst.addr, <5 x i16>* %src.addr) nounwind {
 ; X86-LABEL: convert_v5i16_to_v5i8:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -82,7 +82,7 @@ define void @convert_v5i16_to_v5i8(<5 x
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: convert_v5i16_to_v5i8:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movdqa (%rsi), %xmm0
 ; X64-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-NEXT:    psubw %xmm1, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/widen_conv-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_conv-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_conv-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_conv-2.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @convert_v2i16_v2i32(<2 x i32>* %dst.addr, <2 x i16> %src) nounwind {
 ; X86-LABEL: convert_v2i16_v2i32:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    psllq $48, %xmm0
 ; X86-NEXT:    psrad $16, %xmm0
@@ -15,7 +15,7 @@ define void @convert_v2i16_v2i32(<2 x i3
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: convert_v2i16_v2i32:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psllq $48, %xmm0
 ; X64-NEXT:    psrad $16, %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]

Modified: llvm/trunk/test/CodeGen/X86/widen_conv-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_conv-3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_conv-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_conv-3.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define void @convert_v2i16_to_v2f32(<2 x float>* %dst.addr, <2 x i16> %src) nounwind {
 ; X86-SSE2-LABEL: convert_v2i16_to_v2f32:
-; X86-SSE2:       # BB#0: # %entry
+; X86-SSE2:       # %bb.0: # %entry
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    psllq $48, %xmm0
 ; X86-SSE2-NEXT:    psrad $16, %xmm0
@@ -20,7 +20,7 @@ define void @convert_v2i16_to_v2f32(<2 x
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: convert_v2i16_to_v2f32:
-; X86-SSE42:       # BB#0: # %entry
+; X86-SSE42:       # %bb.0: # %entry
 ; X86-SSE42-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE42-NEXT:    psllq $48, %xmm0
 ; X86-SSE42-NEXT:    psrad $16, %xmm0
@@ -31,7 +31,7 @@ define void @convert_v2i16_to_v2f32(<2 x
 ; X86-SSE42-NEXT:    retl
 ;
 ; X64-LABEL: convert_v2i16_to_v2f32:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psllq $48, %xmm0
 ; X64-NEXT:    psrad $16, %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
@@ -48,7 +48,7 @@ entry:
 
 define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr) nounwind {
 ; X86-SSE2-LABEL: convert_v3i8_to_v3f32:
-; X86-SSE2:       # BB#0: # %entry
+; X86-SSE2:       # %bb.0: # %entry
 ; X86-SSE2-NEXT:    pushl %ebp
 ; X86-SSE2-NEXT:    movl %esp, %ebp
 ; X86-SSE2-NEXT:    pushl %esi
@@ -84,7 +84,7 @@ define void @convert_v3i8_to_v3f32(<3 x
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: convert_v3i8_to_v3f32:
-; X86-SSE42:       # BB#0: # %entry
+; X86-SSE42:       # %bb.0: # %entry
 ; X86-SSE42-NEXT:    pushl %eax
 ; X86-SSE42-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE42-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -103,7 +103,7 @@ define void @convert_v3i8_to_v3f32(<3 x
 ; X86-SSE42-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: convert_v3i8_to_v3f32:
-; X64-SSE2:       # BB#0: # %entry
+; X64-SSE2:       # %bb.0: # %entry
 ; X64-SSE2-NEXT:    movzwl (%rsi), %eax
 ; X64-SSE2-NEXT:    movq %rax, %xmm0
 ; X64-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -126,7 +126,7 @@ define void @convert_v3i8_to_v3f32(<3 x
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: convert_v3i8_to_v3f32:
-; X64-SSE42:       # BB#0: # %entry
+; X64-SSE42:       # %bb.0: # %entry
 ; X64-SSE42-NEXT:    movzbl 2(%rsi), %eax
 ; X64-SSE42-NEXT:    movzwl (%rsi), %ecx
 ; X64-SSE42-NEXT:    movq %rcx, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/widen_conv-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_conv-4.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_conv-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_conv-4.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define void @convert_v7i16_v7f32(<7 x float>* %dst.addr, <7 x i16> %src) nounwind {
 ; X86-SSE2-LABEL: convert_v7i16_v7f32:
-; X86-SSE2:       # BB#0: # %entry
+; X86-SSE2:       # %bb.0: # %entry
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    pxor %xmm1, %xmm1
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm2
@@ -26,7 +26,7 @@ define void @convert_v7i16_v7f32(<7 x fl
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: convert_v7i16_v7f32:
-; X86-SSE42:       # BB#0: # %entry
+; X86-SSE42:       # %bb.0: # %entry
 ; X86-SSE42-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE42-NEXT:    pxor %xmm1, %xmm1
 ; X86-SSE42-NEXT:    pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -40,7 +40,7 @@ define void @convert_v7i16_v7f32(<7 x fl
 ; X86-SSE42-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: convert_v7i16_v7f32:
-; X64-SSE2:       # BB#0: # %entry
+; X64-SSE2:       # %bb.0: # %entry
 ; X64-SSE2-NEXT:    pxor %xmm1, %xmm1
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; X64-SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
@@ -54,7 +54,7 @@ define void @convert_v7i16_v7f32(<7 x fl
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: convert_v7i16_v7f32:
-; X64-SSE42:       # BB#0: # %entry
+; X64-SSE42:       # %bb.0: # %entry
 ; X64-SSE42-NEXT:    pxor %xmm1, %xmm1
 ; X64-SSE42-NEXT:    pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; X64-SSE42-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -74,7 +74,7 @@ entry:
 
 define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr) nounwind {
 ; X86-SSE2-LABEL: convert_v3i8_to_v3f32:
-; X86-SSE2:       # BB#0: # %entry
+; X86-SSE2:       # %bb.0: # %entry
 ; X86-SSE2-NEXT:    pushl %ebp
 ; X86-SSE2-NEXT:    movl %esp, %ebp
 ; X86-SSE2-NEXT:    pushl %esi
@@ -110,7 +110,7 @@ define void @convert_v3i8_to_v3f32(<3 x
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: convert_v3i8_to_v3f32:
-; X86-SSE42:       # BB#0: # %entry
+; X86-SSE42:       # %bb.0: # %entry
 ; X86-SSE42-NEXT:    pushl %eax
 ; X86-SSE42-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE42-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -128,7 +128,7 @@ define void @convert_v3i8_to_v3f32(<3 x
 ; X86-SSE42-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: convert_v3i8_to_v3f32:
-; X64-SSE2:       # BB#0: # %entry
+; X64-SSE2:       # %bb.0: # %entry
 ; X64-SSE2-NEXT:    movzwl (%rsi), %eax
 ; X64-SSE2-NEXT:    movq %rax, %xmm0
 ; X64-SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -151,7 +151,7 @@ define void @convert_v3i8_to_v3f32(<3 x
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: convert_v3i8_to_v3f32:
-; X64-SSE42:       # BB#0: # %entry
+; X64-SSE42:       # %bb.0: # %entry
 ; X64-SSE42-NEXT:    movzbl 2(%rsi), %eax
 ; X64-SSE42-NEXT:    movzwl (%rsi), %ecx
 ; X64-SSE42-NEXT:    movq %rcx, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/widen_conversions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_conversions.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_conversions.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_conversions.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <4 x i32> @zext_v4i8_to_v4i32(<4 x i8>* %ptr) {
 ; X86-LABEL: zext_v4i8_to_v4i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X86-NEXT:    pxor %xmm1, %xmm1
@@ -13,7 +13,7 @@ define <4 x i32> @zext_v4i8_to_v4i32(<4
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: zext_v4i8_to_v4i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    pxor %xmm1, %xmm1
 ; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]

Modified: llvm/trunk/test/CodeGen/X86/widen_extract-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_extract-1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_extract-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_extract-1.ll Mon Dec  4 09:18:51 2017
@@ -6,14 +6,14 @@
 
 define void @convert(<2 x double>* %dst.addr, <3 x double> %src)  {
 ; X32-LABEL: convert:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movups {{[0-9]+}}(%esp), %xmm0
 ; X32-NEXT:    movaps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: convert:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X64-NEXT:    movaps %xmm0, (%rdi)
 ; X64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/widen_load-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_load-0.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_load-0.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_load-0.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwind {
 ; X86-LABEL: short2_int_swap:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -20,7 +20,7 @@ define void @short2_int_swap(<2 x i16>*
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: short2_int_swap:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl (%rsi), %eax
 ; X64-NEXT:    movl (%rdi), %ecx
 ; X64-NEXT:    movl %eax, (%rdi)

Modified: llvm/trunk/test/CodeGen/X86/widen_load-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_load-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_load-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_load-2.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 %i32vec3 = type <3 x i32>
 define void @add3i32(%i32vec3*  sret %ret, %i32vec3* %ap, %i32vec3* %bp)  {
 ; X86-LABEL: add3i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -20,7 +20,7 @@ define void @add3i32(%i32vec3*  sret %re
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: add3i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movdqa (%rsi), %xmm0
 ; X64-NEXT:    paddd (%rdx), %xmm0
 ; X64-NEXT:    pextrd $2, %xmm0, 8(%rdi)
@@ -36,7 +36,7 @@ define void @add3i32(%i32vec3*  sret %re
 
 define void @add3i32_2(%i32vec3*  sret %ret, %i32vec3* %ap, %i32vec3* %bp)  {
 ; X86-LABEL: add3i32_2:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -53,7 +53,7 @@ define void @add3i32_2(%i32vec3*  sret %
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: add3i32_2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    pinsrd $2, 8(%rsi), %xmm0
 ; X64-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
@@ -73,7 +73,7 @@ define void @add3i32_2(%i32vec3*  sret %
 %i32vec7 = type <7 x i32>
 define void @add7i32(%i32vec7*  sret %ret, %i32vec7* %ap, %i32vec7* %bp)  {
 ; X86-LABEL: add7i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -88,7 +88,7 @@ define void @add7i32(%i32vec7*  sret %re
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: add7i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movdqa (%rsi), %xmm0
 ; X64-NEXT:    movdqa 16(%rsi), %xmm1
 ; X64-NEXT:    paddd (%rdx), %xmm0
@@ -108,7 +108,7 @@ define void @add7i32(%i32vec7*  sret %re
 %i32vec12 = type <12 x i32>
 define void @add12i32(%i32vec12*  sret %ret, %i32vec12* %ap, %i32vec12* %bp)  {
 ; X86-LABEL: add12i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -124,7 +124,7 @@ define void @add12i32(%i32vec12*  sret %
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: add12i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movdqa (%rsi), %xmm0
 ; X64-NEXT:    movdqa 16(%rsi), %xmm1
 ; X64-NEXT:    movdqa 32(%rsi), %xmm2
@@ -147,7 +147,7 @@ define void @add12i32(%i32vec12*  sret %
 %i16vec3 = type <3 x i16>
 define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp) nounwind {
 ; X86-LABEL: add3i16:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -170,7 +170,7 @@ define void @add3i16(%i16vec3* nocapture
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: add3i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; X64-NEXT:    pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; X64-NEXT:    paddd %xmm0, %xmm1
@@ -189,7 +189,7 @@ define void @add3i16(%i16vec3* nocapture
 %i16vec4 = type <4 x i16>
 define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp) nounwind {
 ; X86-LABEL: add4i16:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -200,7 +200,7 @@ define void @add4i16(%i16vec4* nocapture
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: add4i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; X64-NEXT:    paddw %xmm0, %xmm1
@@ -217,7 +217,7 @@ define void @add4i16(%i16vec4* nocapture
 %i16vec12 = type <12 x i16>
 define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12* %bp) nounwind {
 ; X86-LABEL: add12i16:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -231,7 +231,7 @@ define void @add12i16(%i16vec12* nocaptu
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: add12i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movdqa (%rsi), %xmm0
 ; X64-NEXT:    movdqa 16(%rsi), %xmm1
 ; X64-NEXT:    paddw (%rdx), %xmm0
@@ -250,7 +250,7 @@ define void @add12i16(%i16vec12* nocaptu
 %i16vec18 = type <18 x i16>
 define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18* %bp) nounwind {
 ; X86-LABEL: add18i16:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -266,7 +266,7 @@ define void @add18i16(%i16vec18* nocaptu
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: add18i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movdqa (%rsi), %xmm0
 ; X64-NEXT:    movdqa 16(%rsi), %xmm1
 ; X64-NEXT:    movdqa 32(%rsi), %xmm2
@@ -289,7 +289,7 @@ define void @add18i16(%i16vec18* nocaptu
 %i8vec3 = type <3 x i8>
 define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) nounwind {
 ; X86-LABEL: add3i8:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    subl $12, %esp
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -304,7 +304,7 @@ define void @add3i8(%i8vec3* nocapture s
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: add3i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; X64-NEXT:    pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; X64-NEXT:    paddd %xmm0, %xmm1
@@ -323,7 +323,7 @@ define void @add3i8(%i8vec3* nocapture s
 %i8vec31 = type <31 x i8>
 define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp) nounwind {
 ; X86-LABEL: add31i8:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -340,7 +340,7 @@ define void @add31i8(%i8vec31* nocapture
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: add31i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movdqa (%rsi), %xmm0
 ; X64-NEXT:    movdqa 16(%rsi), %xmm1
 ; X64-NEXT:    paddb (%rdx), %xmm0
@@ -363,7 +363,7 @@ define void @add31i8(%i8vec31* nocapture
 %i8vec3pack = type { <3 x i8>, i8 }
 define void @rot(%i8vec3pack* nocapture sret %result, %i8vec3pack* %X, %i8vec3pack* %rot) nounwind {
 ; X86-LABEL: rot:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    subl $16, %esp
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -385,7 +385,7 @@ define void @rot(%i8vec3pack* nocapture
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: rot:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movdqa {{.*#+}} xmm0 = [40606,158]
 ; X64-NEXT:    pextrw $0, %xmm0, (%rsi)
 ; X64-NEXT:    movb $-98, 2(%rsi)

Modified: llvm/trunk/test/CodeGen/X86/widen_load-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_load-3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_load-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_load-3.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define <7 x i64> @load7_aligned(<7 x i64>* %x) {
 ; X86-SSE-LABEL: load7_aligned:
-; X86-SSE:       # BB#0:
+; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE-NEXT:    movaps (%ecx), %xmm0
@@ -26,7 +26,7 @@ define <7 x i64> @load7_aligned(<7 x i64
 ; X86-SSE-NEXT:    retl $4
 ;
 ; X86-AVX-LABEL: load7_aligned:
-; X86-AVX:       # BB#0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-AVX-NEXT:    vmovaps (%ecx), %ymm0
@@ -40,7 +40,7 @@ define <7 x i64> @load7_aligned(<7 x i64
 ; X86-AVX-NEXT:    retl $4
 ;
 ; X64-SSE-LABEL: load7_aligned:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    movaps (%rsi), %xmm0
 ; X64-SSE-NEXT:    movaps 16(%rsi), %xmm1
 ; X64-SSE-NEXT:    movaps 32(%rsi), %xmm2
@@ -53,7 +53,7 @@ define <7 x i64> @load7_aligned(<7 x i64
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: load7_aligned:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovaps (%rsi), %ymm0
 ; X64-AVX-NEXT:    vmovaps 32(%rsi), %ymm1
 ; X64-AVX-NEXT:    vmovaps %ymm0, (%rdi)
@@ -69,7 +69,7 @@ define <7 x i64> @load7_aligned(<7 x i64
 
 define <7 x i64> @load7_unaligned(<7 x i64>* %x) {
 ; X86-SSE-LABEL: load7_unaligned:
-; X86-SSE:       # BB#0:
+; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE-NEXT:    movups (%ecx), %xmm0
@@ -85,7 +85,7 @@ define <7 x i64> @load7_unaligned(<7 x i
 ; X86-SSE-NEXT:    retl $4
 ;
 ; X86-AVX-LABEL: load7_unaligned:
-; X86-AVX:       # BB#0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-AVX-NEXT:    vmovups (%ecx), %ymm0
@@ -100,7 +100,7 @@ define <7 x i64> @load7_unaligned(<7 x i
 ; X86-AVX-NEXT:    retl $4
 ;
 ; X64-SSE-LABEL: load7_unaligned:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    movups (%rsi), %xmm0
 ; X64-SSE-NEXT:    movups 16(%rsi), %xmm1
 ; X64-SSE-NEXT:    movups 32(%rsi), %xmm2
@@ -113,7 +113,7 @@ define <7 x i64> @load7_unaligned(<7 x i
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: load7_unaligned:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovups (%rsi), %ymm0
 ; X64-AVX-NEXT:    vmovups 32(%rsi), %xmm1
 ; X64-AVX-NEXT:    movq 48(%rsi), %rax

Modified: llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 ; widening shuffle v3float and then a add
 define void @shuf(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
 ; X86-LABEL: shuf:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    addps %xmm1, %xmm0
 ; X86-NEXT:    extractps $2, %xmm0, 8(%eax)
@@ -14,7 +14,7 @@ define void @shuf(<3 x float>* %dst.addr
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: shuf:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    addps %xmm1, %xmm0
 ; X64-NEXT:    extractps $2, %xmm0, 8(%rdi)
 ; X64-NEXT:    movlps %xmm0, (%rdi)
@@ -30,7 +30,7 @@ entry:
 ; widening shuffle v3float with a different mask and then a add
 define void @shuf2(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
 ; X86-LABEL: shuf2:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
 ; X86-NEXT:    addps %xmm1, %xmm0
@@ -40,7 +40,7 @@ define void @shuf2(<3 x float>* %dst.add
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: shuf2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
 ; X64-NEXT:    addps %xmm1, %xmm0
 ; X64-NEXT:    extractps $2, %xmm0, 8(%rdi)
@@ -58,14 +58,14 @@ entry:
 ; opA with opB, the DAG will produce new operations with opA.
 define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) nounwind {
 ; X86-LABEL: shuf3:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
 ; X86-NEXT:    movaps %xmm1, (%eax)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: shuf3:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
 ; X64-NEXT:    movaps %xmm1, (%rdi)
 ; X64-NEXT:    retq
@@ -88,7 +88,7 @@ entry:
 ; PR10421: make sure we correctly handle extreme widening with CONCAT_VECTORS
 define <8 x i8> @shuf4(<4 x i8> %a, <4 x i8> %b) nounwind readnone {
 ; X86-LABEL: shuf4:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; X86-NEXT:    pshufb %xmm2, %xmm1
 ; X86-NEXT:    pshufb %xmm2, %xmm0
@@ -96,7 +96,7 @@ define <8 x i8> @shuf4(<4 x i8> %a, <4 x
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: shuf4:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; X64-NEXT:    pshufb %xmm2, %xmm1
 ; X64-NEXT:    pshufb %xmm2, %xmm0
@@ -109,14 +109,14 @@ define <8 x i8> @shuf4(<4 x i8> %a, <4 x
 ; PR11389: another CONCAT_VECTORS case
 define void @shuf5(<8 x i8>* %p) nounwind {
 ; X86-LABEL: shuf5:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X86-NEXT:    movsd %xmm0, (%eax)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: shuf5:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movq %rax, (%rdi)
 ; X64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/widened-broadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widened-broadcast.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widened-broadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widened-broadcast.ll Mon Dec  4 09:18:51 2017
@@ -9,18 +9,18 @@
 
 define <4 x float> @load_splat_4f32_4f32_0101(<4 x float>* %ptr) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: load_splat_4f32_4f32_0101:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: load_splat_4f32_4f32_0101:
-; SSE42:       # BB#0: # %entry
+; SSE42:       # %bb.0: # %entry
 ; SSE42-NEXT:    movddup {{.*#+}} xmm0 = mem[0,0]
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: load_splat_4f32_4f32_0101:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
 ; AVX-NEXT:    retq
 entry:
@@ -31,31 +31,31 @@ entry:
 
 define <8 x float> @load_splat_8f32_4f32_01010101(<4 x float>* %ptr) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: load_splat_8f32_4f32_01010101:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: load_splat_8f32_4f32_01010101:
-; SSE42:       # BB#0: # %entry
+; SSE42:       # %bb.0: # %entry
 ; SSE42-NEXT:    movddup {{.*#+}} xmm0 = mem[0,0]
 ; SSE42-NEXT:    movapd %xmm0, %xmm1
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_8f32_4f32_01010101:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_8f32_4f32_01010101:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_8f32_4f32_01010101:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -66,20 +66,20 @@ entry:
 
 define <8 x float> @load_splat_8f32_8f32_01010101(<8 x float>* %ptr) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: load_splat_8f32_8f32_01010101:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: load_splat_8f32_8f32_01010101:
-; SSE42:       # BB#0: # %entry
+; SSE42:       # %bb.0: # %entry
 ; SSE42-NEXT:    movddup {{.*#+}} xmm0 = mem[0,0]
 ; SSE42-NEXT:    movapd %xmm0, %xmm1
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: load_splat_8f32_8f32_01010101:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX-NEXT:    retq
 entry:
@@ -90,22 +90,22 @@ entry:
 
 define <4 x i32> @load_splat_4i32_4i32_0101(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_4i32_4i32_0101:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_4i32_4i32_0101:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_4i32_4i32_0101:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq (%rdi), %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_4i32_4i32_0101:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpbroadcastq (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -116,24 +116,24 @@ entry:
 
 define <8 x i32> @load_splat_8i32_4i32_01010101(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_8i32_4i32_01010101:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_8i32_4i32_01010101:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_8i32_4i32_01010101:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_8i32_4i32_01010101:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -144,24 +144,24 @@ entry:
 
 define <8 x i32> @load_splat_8i32_8i32_01010101(<8 x i32>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_8i32_8i32_01010101:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_8i32_8i32_01010101:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_8i32_8i32_01010101:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_8i32_8i32_01010101:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -172,22 +172,22 @@ entry:
 
 define <8 x i16> @load_splat_8i16_8i16_01010101(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_8i16_8i16_01010101:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_8i16_8i16_01010101:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,0,0,0]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_8i16_8i16_01010101:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vbroadcastss (%rdi), %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_8i16_8i16_01010101:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vbroadcastss (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -198,22 +198,22 @@ entry:
 
 define <8 x i16> @load_splat_8i16_8i16_01230123(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_8i16_8i16_01230123:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_8i16_8i16_01230123:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_8i16_8i16_01230123:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq (%rdi), %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_8i16_8i16_01230123:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpbroadcastq (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -224,24 +224,24 @@ entry:
 
 define <16 x i16> @load_splat_16i16_8i16_0101010101010101(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_16i16_8i16_0101010101010101:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_16i16_8i16_0101010101010101:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_16i16_8i16_0101010101010101:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vbroadcastss (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_16i16_8i16_0101010101010101:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vbroadcastss (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -252,24 +252,24 @@ entry:
 
 define <16 x i16> @load_splat_16i16_8i16_0123012301230123(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_16i16_8i16_0123012301230123:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_16i16_8i16_0123012301230123:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_16i16_8i16_0123012301230123:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_16i16_8i16_0123012301230123:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -280,24 +280,24 @@ entry:
 
 define <16 x i16> @load_splat_16i16_16i16_0101010101010101(<16 x i16>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_16i16_16i16_0101010101010101:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_16i16_16i16_0101010101010101:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_16i16_16i16_0101010101010101:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vbroadcastss (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_16i16_16i16_0101010101010101:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vbroadcastss (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -308,13 +308,13 @@ entry:
 
 define <16 x i16> @load_splat_16i16_16i16_0123012301230123(<16 x i16>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_16i16_16i16_0123012301230123:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: load_splat_16i16_16i16_0123012301230123:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX-NEXT:    retq
 entry:
@@ -325,24 +325,24 @@ entry:
 
 define <16 x i8> @load_splat_16i8_16i8_0101010101010101(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_16i8_16i8_0101010101010101:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_16i8_16i8_0101010101010101:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_16i8_16i8_0101010101010101:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastw (%rdi), %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_16i8_16i8_0101010101010101:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpbroadcastw (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -353,22 +353,22 @@ entry:
 
 define <16 x i8> @load_splat_16i8_16i8_0123012301230123(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_16i8_16i8_0123012301230123:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_16i8_16i8_0123012301230123:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,0,0,0]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_16i8_16i8_0123012301230123:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vbroadcastss (%rdi), %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_16i8_16i8_0123012301230123:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vbroadcastss (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -379,22 +379,22 @@ entry:
 
 define <16 x i8> @load_splat_16i8_16i8_0123456701234567(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_16i8_16i8_0123456701234567:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_16i8_16i8_0123456701234567:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_16i8_16i8_0123456701234567:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastq (%rdi), %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_16i8_16i8_0123456701234567:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpbroadcastq (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -405,26 +405,26 @@ entry:
 
 define <32 x i8> @load_splat_32i8_16i8_01010101010101010101010101010101(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_32i8_16i8_01010101010101010101010101010101:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_32i8_16i8_01010101010101010101010101010101:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_32i8_16i8_01010101010101010101010101010101:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastw (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_32i8_16i8_01010101010101010101010101010101:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpbroadcastw (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -435,24 +435,24 @@ entry:
 
 define <32 x i8> @load_splat_32i8_16i8_01230123012301230123012301230123(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_32i8_16i8_01230123012301230123012301230123:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_32i8_16i8_01230123012301230123012301230123:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,0,0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_32i8_16i8_01230123012301230123012301230123:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vbroadcastss (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_32i8_16i8_01230123012301230123012301230123:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vbroadcastss (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -463,24 +463,24 @@ entry:
 
 define <32 x i8> @load_splat_32i8_16i8_01234567012345670123456701234567(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -491,26 +491,26 @@ entry:
 
 define <32 x i8> @load_splat_32i8_32i8_01010101010101010101010101010101(<32 x i8>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_32i8_32i8_01010101010101010101010101010101:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_32i8_32i8_01010101010101010101010101010101:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = mem[0,0,0,0,4,5,6,7]
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_32i8_32i8_01010101010101010101010101010101:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpbroadcastw (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_32i8_32i8_01010101010101010101010101010101:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpbroadcastw (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -521,24 +521,24 @@ entry:
 
 define <32 x i8> @load_splat_32i8_32i8_01230123012301230123012301230123(<32 x i8>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_32i8_32i8_01230123012301230123012301230123:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,0,0,0]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: load_splat_32i8_32i8_01230123012301230123012301230123:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vbroadcastss (%rdi), %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_splat_32i8_32i8_01230123012301230123012301230123:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-NEXT:    vbroadcastss %xmm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_splat_32i8_32i8_01230123012301230123012301230123:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512-NEXT:    vbroadcastss %xmm0, %ymm0
 ; AVX512-NEXT:    retq
@@ -550,13 +550,13 @@ entry:
 
 define <32 x i8> @load_splat_32i8_32i8_01234567012345670123456701234567(<32 x i8>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_32i8_32i8_01234567012345670123456701234567:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: load_splat_32i8_32i8_01234567012345670123456701234567:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; AVX-NEXT:    retq
 entry:
@@ -567,13 +567,13 @@ entry:
 
 define <4 x float> @load_splat_4f32_8f32_0000(<8 x float>* %ptr) nounwind uwtable readnone ssp {
 ; SSE-LABEL: load_splat_4f32_8f32_0000:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: load_splat_4f32_8f32_0000:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vbroadcastss (%rdi), %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -584,20 +584,20 @@ entry:
 
 define <8 x float> @load_splat_8f32_16f32_89898989(<16 x float>* %ptr) nounwind uwtable readnone ssp {
 ; SSE2-LABEL: load_splat_8f32_16f32_89898989:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps 32(%rdi), %xmm0
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: load_splat_8f32_16f32_89898989:
-; SSE42:       # BB#0: # %entry
+; SSE42:       # %bb.0: # %entry
 ; SSE42-NEXT:    movddup {{.*#+}} xmm0 = mem[0,0]
 ; SSE42-NEXT:    movapd %xmm0, %xmm1
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: load_splat_8f32_16f32_89898989:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vbroadcastsd 32(%rdi), %ymm0
 ; AVX-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/win64_sibcall.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/win64_sibcall.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/win64_sibcall.ll (original)
+++ llvm/trunk/test/CodeGen/X86/win64_sibcall.ll Mon Dec  4 09:18:51 2017
@@ -6,9 +6,9 @@
 define void @C1(%Object addrspace(1)* %param0) gc "coreclr" {
 entry:
 
-; WIN_X64: # BB#0:
+; WIN_X64: # %bb.0:
 ; WIN_X64:	pushq	%rax
-; LINUX:   # BB#0:                                 # %entry
+; LINUX:   # %bb.0:                                 # %entry
 ; LINUX:	movq	$0, -8(%rsp)
 
   %this = alloca %Object addrspace(1)*

Modified: llvm/trunk/test/CodeGen/X86/win_chkstk.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/win_chkstk.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/win_chkstk.ll (original)
+++ llvm/trunk/test/CodeGen/X86/win_chkstk.ll Mon Dec  4 09:18:51 2017
@@ -30,19 +30,19 @@ entry:
 ; allocation.
 define i32 @main128() nounwind {
 entry:
-; WIN_X32:       # BB#0:
+; WIN_X32:       # %bb.0:
 ; WIN_X32-NOT:   calll __chkstk
 ; WIN_X32:       ret
 
-; WIN_X64:       # BB#0:
+; WIN_X64:       # %bb.0:
 ; WIN_X64-NOT:   callq __chkstk
 ; WIN_X64:       ret
 
-; MINGW_X64:     # BB#0:
+; MINGW_X64:     # %bb.0:
 ; MINGW_X64-NOT: callq ___chkstk_ms
 ; MINGW_X64:     ret
 
-; LINUX:         # BB#0:
+; LINUX:         # %bb.0:
 ; LINUX-NOT:     call __chkstk
 ; LINUX:         ret
   %array128 = alloca [128 x i8], align 16         ; <[128 x i8]*> [#uses=0]

Modified: llvm/trunk/test/CodeGen/X86/win_coreclr_chkstk.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/win_coreclr_chkstk.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/win_coreclr_chkstk.ll (original)
+++ llvm/trunk/test/CodeGen/X86/win_coreclr_chkstk.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 define i32 @main4k() nounwind {
 entry:
 ; WIN_X64-LABEL:main4k:
-; WIN_X64: # BB#0:
+; WIN_X64: # %bb.0:
 ; WIN_X64:      movl    $4096, %eax
 ; WIN_X64:      movq    %rcx, 8(%rsp)
 ; WIN_X64:	movq	%rdx, 16(%rsp)
@@ -19,7 +19,7 @@ entry:
 ; WIN_X64:	movq	%gs:16, %rcx
 ; WIN_X64:	cmpq	%rcx, %rdx
 ; WIN_X64:	jae	.LBB0_3
-; WIN_X64:# BB#1:
+; WIN_X64:# %bb.1:
 ; WIN_X64:	andq	$-4096, %rdx
 ; WIN_X64:.LBB0_2:
 ; WIN_X64:	leaq	-4096(%rcx), %rcx

Modified: llvm/trunk/test/CodeGen/X86/x32-cet-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/x32-cet-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/x32-cet-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/x32-cet-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @test_incsspd(i32 %a) local_unnamed_addr {
 ; CHECK-LABEL: test_incsspd:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    incsspd %eax
 ; CHECK-NEXT:    retl
@@ -16,7 +16,7 @@ declare void @llvm.x86.incsspd(i32)
 
 define i32 @test_rdsspd(i32 %a) {
 ; CHECK-LABEL: test_rdsspd:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    rdsspd %eax
 ; CHECK-NEXT:    retl
@@ -29,7 +29,7 @@ declare i32 @llvm.x86.rdsspd(i32)
 
 define void @test_saveprevssp() {
 ; CHECK-LABEL: test_saveprevssp:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    saveprevssp
 ; CHECK-NEXT:    retl
 entry:
@@ -41,7 +41,7 @@ declare void @llvm.x86.saveprevssp()
 
 define void @test_rstorssp(i8* %__p) {
 ; CHECK-LABEL: test_rstorssp:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    rstorssp (%eax)
 ; CHECK-NEXT:    retl
@@ -54,7 +54,7 @@ declare void @llvm.x86.rstorssp(i8*)
 
 define void @test_wrssd(i32 %a, i8* %__p) {
 ; CHECK-LABEL: test_wrssd:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    wrssd %eax, (%ecx)
@@ -68,7 +68,7 @@ declare void @llvm.x86.wrssd(i32, i8*)
 
 define void @test_wrussd(i32 %a, i8* %__p) {
 ; CHECK-LABEL: test_wrussd:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    wrussd %eax, (%ecx)
@@ -82,7 +82,7 @@ declare void @llvm.x86.wrussd(i32, i8*)
 
 define void @test_setssbsy() {
 ; CHECK-LABEL: test_setssbsy:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    setssbsy
 ; CHECK-NEXT:    retl
 entry:
@@ -94,7 +94,7 @@ declare void @llvm.x86.setssbsy()
 
 define void @test_clrssbsy(i8* %__p) {
 ; CHECK-LABEL: test_clrssbsy:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    clrssbsy (%eax)
 ; CHECK-NEXT:    retl

Modified: llvm/trunk/test/CodeGen/X86/x32-lea-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/x32-lea-1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/x32-lea-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/x32-lea-1.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @foo(i32** %p) {
 ; CHECK-LABEL: foo:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    leal -{{[0-9]+}}(%rsp), %eax
 ; CHECK-NEXT:    addl $16, %eax
 ; CHECK-NEXT:    movl %eax, (%edi)

Modified: llvm/trunk/test/CodeGen/X86/x64-cet-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/x64-cet-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/x64-cet-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/x64-cet-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @test_incsspd(i32 %a) local_unnamed_addr {
 ; CHECK-LABEL: test_incsspd:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    incsspd %edi
 ; CHECK-NEXT:    retq
 entry:
@@ -15,7 +15,7 @@ declare void @llvm.x86.incsspd(i32)
 
 define void @test_incsspq(i32 %a) local_unnamed_addr {
 ; CHECK-LABEL: test_incsspq:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movslq %edi, %rax
 ; CHECK-NEXT:    incsspq %rax
 ; CHECK-NEXT:    retq
@@ -29,7 +29,7 @@ declare void @llvm.x86.incsspq(i64)
 
 define i32 @test_rdsspd(i32 %a) {
 ; CHECK-LABEL: test_rdsspd:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    rdsspd %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -42,7 +42,7 @@ declare i32 @llvm.x86.rdsspd(i32)
 
 define i64 @test_rdsspq(i64 %a) {
 ; CHECK-LABEL: test_rdsspq:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    rdsspq %rdi
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    retq
@@ -55,7 +55,7 @@ declare i64 @llvm.x86.rdsspq(i64)
 
 define void @test_saveprevssp() {
 ; CHECK-LABEL: test_saveprevssp:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    saveprevssp
 ; CHECK-NEXT:    retq
 entry:
@@ -67,7 +67,7 @@ declare void @llvm.x86.saveprevssp()
 
 define void @test_rstorssp(i8* %__p) {
 ; CHECK-LABEL: test_rstorssp:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    rstorssp (%rdi)
 ; CHECK-NEXT:    retq
 entry:
@@ -79,7 +79,7 @@ declare void @llvm.x86.rstorssp(i8*)
 
 define void @test_wrssd(i32 %a, i8* %__p) {
 ; CHECK-LABEL: test_wrssd:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    wrssd %edi, (%rsi)
 ; CHECK-NEXT:    retq
 entry:
@@ -91,7 +91,7 @@ declare void @llvm.x86.wrssd(i32, i8*)
 
 define void @test_wrssq(i64 %a, i8* %__p) {
 ; CHECK-LABEL: test_wrssq:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    wrssq %rdi, (%rsi)
 ; CHECK-NEXT:    retq
 entry:
@@ -103,7 +103,7 @@ declare void @llvm.x86.wrssq(i64, i8*)
 
 define void @test_wrussd(i32 %a, i8* %__p) {
 ; CHECK-LABEL: test_wrussd:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    wrussd %edi, (%rsi)
 ; CHECK-NEXT:    retq
 entry:
@@ -115,7 +115,7 @@ declare void @llvm.x86.wrussd(i32, i8*)
 
 define void @test_wrussq(i64 %a, i8* %__p) {
 ; CHECK-LABEL: test_wrussq:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    wrussq %rdi, (%rsi)
 ; CHECK-NEXT:    retq
 entry:
@@ -127,7 +127,7 @@ declare void @llvm.x86.wrussq(i64, i8*)
 
 define void @test_setssbsy() {
 ; CHECK-LABEL: test_setssbsy:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    setssbsy
 ; CHECK-NEXT:    retq
 entry:
@@ -139,7 +139,7 @@ declare void @llvm.x86.setssbsy()
 
 define void @test_clrssbsy(i8* %__p) {
 ; CHECK-LABEL: test_clrssbsy:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    clrssbsy (%rdi)
 ; CHECK-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/x86-64-intrcc-nosse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/x86-64-intrcc-nosse.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/x86-64-intrcc-nosse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/x86-64-intrcc-nosse.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 ; Clobbered SSE must not be saved when the target doesn't support SSE
 define x86_intrcc void @test_isr_sse_clobbers(%struct.interrupt_frame* %frame, i64 %ecode) {
   ; CHECK-LABEL: test_isr_sse_clobbers:
-  ; CHECK:       # BB#0:
+  ; CHECK:       # %bb.0:
   ; CHECK-NEXT:    pushq %rax
   ; CHECK-NEXT:    cld
   ; CHECK-NEXT:    #APP

Modified: llvm/trunk/test/CodeGen/X86/x86-fold-pshufb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/x86-fold-pshufb.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/x86-fold-pshufb.ll (original)
+++ llvm/trunk/test/CodeGen/X86/x86-fold-pshufb.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define <2 x i64> @fold_pshufb() {
 ; CHECK-LABEL: fold_pshufb:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,1,0,0,0,2,0,0,0,3,0,0,0]
 ; CHECK-NEXT:    retq
 entry:
@@ -23,7 +23,7 @@ entry:
 
 define <2 x i64> @pr24562() {
 ; CHECK-LABEL: pr24562:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 entry:




More information about the llvm-commits mailing list