[llvm] r319665 - [CodeGen] Unify MBB reference format in both MIR and debug output

Francis Visoiu Mistrih via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 4 09:18:56 PST 2017


Modified: llvm/trunk/test/CodeGen/X86/chain_order.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/chain_order.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/chain_order.ll (original)
+++ llvm/trunk/test/CodeGen/X86/chain_order.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 ; A test from pifft (after SLP-vectorization) that fails when we drop the chain on newly merged loads.
 define void @cftx020(double* nocapture %a) {
 ; CHECK-LABEL: cftx020:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
 ; CHECK-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]

Modified: llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll (original)
+++ llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll Mon Dec  4 09:18:51 2017
@@ -10,24 +10,24 @@
 
 define <2 x i64> @_clearupper2xi64a(<2 x i64>) nounwind {
 ; SSE2-LABEL: _clearupper2xi64a:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper2xi64a:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm1, %xmm1
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: _clearupper2xi64a:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: _clearupper2xi64a:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; AVX2-NEXT:    retq
@@ -44,21 +44,21 @@ define <2 x i64> @_clearupper2xi64a(<2 x
 
 define <4 x i64> @_clearupper4xi64a(<4 x i64>) nounwind {
 ; SSE2-LABEL: _clearupper4xi64a:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps {{.*#+}} xmm2 = [4294967295,4294967295]
 ; SSE2-NEXT:    andps %xmm2, %xmm0
 ; SSE2-NEXT:    andps %xmm2, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper4xi64a:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm2, %xmm2
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper4xi64a:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
 ; AVX-NEXT:    retq
@@ -83,18 +83,18 @@ define <4 x i64> @_clearupper4xi64a(<4 x
 
 define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind {
 ; SSE2-LABEL: _clearupper4xi32a:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper4xi32a:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm1, %xmm1
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper4xi32a:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
 ; AVX-NEXT:    retq
@@ -119,26 +119,26 @@ define <4 x i32> @_clearupper4xi32a(<4 x
 
 define <8 x i32> @_clearupper8xi32a(<8 x i32>) nounwind {
 ; SSE2-LABEL: _clearupper8xi32a:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps {{.*#+}} xmm2 = [65535,65535,65535,65535]
 ; SSE2-NEXT:    andps %xmm2, %xmm0
 ; SSE2-NEXT:    andps %xmm2, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper8xi32a:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm2, %xmm2
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: _clearupper8xi32a:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: _clearupper8xi32a:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
 ; AVX2-NEXT:    retq
@@ -179,12 +179,12 @@ define <8 x i32> @_clearupper8xi32a(<8 x
 
 define <8 x i16> @_clearupper8xi16a(<8 x i16>) nounwind {
 ; SSE-LABEL: _clearupper8xi16a:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper8xi16a:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x0 = extractelement <8 x i16> %0, i32 0
@@ -224,14 +224,14 @@ define <8 x i16> @_clearupper8xi16a(<8 x
 
 define <16 x i16> @_clearupper16xi16a(<16 x i16>) nounwind {
 ; SSE-LABEL: _clearupper16xi16a:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
 ; SSE-NEXT:    andps %xmm2, %xmm0
 ; SSE-NEXT:    andps %xmm2, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper16xi16a:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %x0  = extractelement <16 x i16> %0, i32 0
@@ -303,7 +303,7 @@ define <16 x i16> @_clearupper16xi16a(<1
 
 define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind {
 ; SSE2-LABEL: _clearupper16xi8a:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    movd %eax, %xmm0
@@ -352,12 +352,12 @@ define <16 x i8> @_clearupper16xi8a(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper16xi8a:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper16xi8a:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x0  = extractelement <16 x i8> %0, i32 0
@@ -429,7 +429,7 @@ define <16 x i8> @_clearupper16xi8a(<16
 
 define <32 x i8> @_clearupper32xi8a(<32 x i8>) nounwind {
 ; SSE2-LABEL: _clearupper32xi8a:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-NEXT:    movaps %xmm1, -{{[0-9]+}}(%rsp)
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
@@ -524,14 +524,14 @@ define <32 x i8> @_clearupper32xi8a(<32
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper32xi8a:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
 ; SSE42-NEXT:    andps %xmm2, %xmm0
 ; SSE42-NEXT:    andps %xmm2, %xmm1
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper32xi8a:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %x0  = extractelement <32 x i8> %0, i32 0
@@ -667,24 +667,24 @@ define <32 x i8> @_clearupper32xi8a(<32
 
 define <2 x i64> @_clearupper2xi64b(<2 x i64>) nounwind {
 ; SSE2-LABEL: _clearupper2xi64b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper2xi64b:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm1, %xmm1
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: _clearupper2xi64b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: _clearupper2xi64b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; AVX2-NEXT:    retq
@@ -697,21 +697,21 @@ define <2 x i64> @_clearupper2xi64b(<2 x
 
 define <4 x i64> @_clearupper4xi64b(<4 x i64>) nounwind {
 ; SSE2-LABEL: _clearupper4xi64b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
 ; SSE2-NEXT:    andps %xmm2, %xmm0
 ; SSE2-NEXT:    andps %xmm2, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper4xi64b:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm2, %xmm2
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper4xi64b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
 ; AVX-NEXT:    retq
@@ -726,18 +726,18 @@ define <4 x i64> @_clearupper4xi64b(<4 x
 
 define <4 x i32> @_clearupper4xi32b(<4 x i32>) nounwind {
 ; SSE2-LABEL: _clearupper4xi32b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper4xi32b:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm1, %xmm1
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper4xi32b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
 ; AVX-NEXT:    retq
@@ -752,26 +752,26 @@ define <4 x i32> @_clearupper4xi32b(<4 x
 
 define <8 x i32> @_clearupper8xi32b(<8 x i32>) nounwind {
 ; SSE2-LABEL: _clearupper8xi32b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
 ; SSE2-NEXT:    andps %xmm2, %xmm0
 ; SSE2-NEXT:    andps %xmm2, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper8xi32b:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm2, %xmm2
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: _clearupper8xi32b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: _clearupper8xi32b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
 ; AVX2-NEXT:    retq
@@ -790,12 +790,12 @@ define <8 x i32> @_clearupper8xi32b(<8 x
 
 define <8 x i16> @_clearupper8xi16b(<8 x i16>) nounwind {
 ; SSE-LABEL: _clearupper8xi16b:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper8xi16b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x8 = bitcast <8 x i16> %0 to <16 x i8>
@@ -813,14 +813,14 @@ define <8 x i16> @_clearupper8xi16b(<8 x
 
 define <16 x i16> @_clearupper16xi16b(<16 x i16>) nounwind {
 ; SSE-LABEL: _clearupper16xi16b:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
 ; SSE-NEXT:    andps %xmm2, %xmm0
 ; SSE-NEXT:    andps %xmm2, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper16xi16b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm2
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -850,7 +850,7 @@ define <16 x i16> @_clearupper16xi16b(<1
 
 define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
 ; SSE2-LABEL: _clearupper16xi8b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pushq %r14
 ; SSE2-NEXT:    pushq %rbx
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -926,7 +926,7 @@ define <16 x i8> @_clearupper16xi8b(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper16xi8b:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pushq %r14
 ; SSE42-NEXT:    pushq %rbx
 ; SSE42-NEXT:    movq %xmm0, %rcx
@@ -1001,7 +1001,7 @@ define <16 x i8> @_clearupper16xi8b(<16
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper16xi8b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    pushq %rbp
 ; AVX-NEXT:    pushq %r15
 ; AVX-NEXT:    pushq %r14
@@ -1103,7 +1103,7 @@ define <16 x i8> @_clearupper16xi8b(<16
 
 define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
 ; SSE2-LABEL: _clearupper32xi8b:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pushq %r14
 ; SSE2-NEXT:    pushq %rbx
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
@@ -1179,7 +1179,7 @@ define <32 x i8> @_clearupper32xi8b(<32
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper32xi8b:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pushq %r14
 ; SSE42-NEXT:    pushq %rbx
 ; SSE42-NEXT:    movq %xmm0, %rcx
@@ -1254,7 +1254,7 @@ define <32 x i8> @_clearupper32xi8b(<32
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: _clearupper32xi8b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    pushq %rbp
 ; AVX1-NEXT:    pushq %r15
 ; AVX1-NEXT:    pushq %r14
@@ -1425,7 +1425,7 @@ define <32 x i8> @_clearupper32xi8b(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: _clearupper32xi8b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    pushq %rbp
 ; AVX2-NEXT:    pushq %r15
 ; AVX2-NEXT:    pushq %r14
@@ -1633,24 +1633,24 @@ define <32 x i8> @_clearupper32xi8b(<32
 
 define <2 x i64> @_clearupper2xi64c(<2 x i64>) nounwind {
 ; SSE2-LABEL: _clearupper2xi64c:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper2xi64c:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm1, %xmm1
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: _clearupper2xi64c:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: _clearupper2xi64c:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; AVX2-NEXT:    retq
@@ -1660,21 +1660,21 @@ define <2 x i64> @_clearupper2xi64c(<2 x
 
 define <4 x i64> @_clearupper4xi64c(<4 x i64>) nounwind {
 ; SSE2-LABEL: _clearupper4xi64c:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
 ; SSE2-NEXT:    andps %xmm2, %xmm0
 ; SSE2-NEXT:    andps %xmm2, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper4xi64c:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm2, %xmm2
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper4xi64c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
 ; AVX-NEXT:    retq
@@ -1684,18 +1684,18 @@ define <4 x i64> @_clearupper4xi64c(<4 x
 
 define <4 x i32> @_clearupper4xi32c(<4 x i32>) nounwind {
 ; SSE2-LABEL: _clearupper4xi32c:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper4xi32c:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm1, %xmm1
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper4xi32c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
 ; AVX-NEXT:    retq
@@ -1705,26 +1705,26 @@ define <4 x i32> @_clearupper4xi32c(<4 x
 
 define <8 x i32> @_clearupper8xi32c(<8 x i32>) nounwind {
 ; SSE2-LABEL: _clearupper8xi32c:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
 ; SSE2-NEXT:    andps %xmm2, %xmm0
 ; SSE2-NEXT:    andps %xmm2, %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: _clearupper8xi32c:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pxor %xmm2, %xmm2
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: _clearupper8xi32c:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: _clearupper8xi32c:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
 ; AVX2-NEXT:    retq
@@ -1734,12 +1734,12 @@ define <8 x i32> @_clearupper8xi32c(<8 x
 
 define <8 x i16> @_clearupper8xi16c(<8 x i16>) nounwind {
 ; SSE-LABEL: _clearupper8xi16c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper8xi16c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %r = and <8 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>, %0
@@ -1748,14 +1748,14 @@ define <8 x i16> @_clearupper8xi16c(<8 x
 
 define <16 x i16> @_clearupper16xi16c(<16 x i16>) nounwind {
 ; SSE-LABEL: _clearupper16xi16c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
 ; SSE-NEXT:    andps %xmm2, %xmm0
 ; SSE-NEXT:    andps %xmm2, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper16xi16c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %r = and <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>, %0
@@ -1764,12 +1764,12 @@ define <16 x i16> @_clearupper16xi16c(<1
 
 define <16 x i8> @_clearupper16xi8c(<16 x i8>) nounwind {
 ; SSE-LABEL: _clearupper16xi8c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper16xi8c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %r = and <16 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0
@@ -1778,14 +1778,14 @@ define <16 x i8> @_clearupper16xi8c(<16
 
 define <32 x i8> @_clearupper32xi8c(<32 x i8>) nounwind {
 ; SSE-LABEL: _clearupper32xi8c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
 ; SSE-NEXT:    andps %xmm2, %xmm0
 ; SSE-NEXT:    andps %xmm2, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: _clearupper32xi8c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %r = and <32 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0

Modified: llvm/trunk/test/CodeGen/X86/clflushopt-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/clflushopt-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/clflushopt-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/clflushopt-schedule.ll Mon Dec  4 09:18:51 2017
@@ -7,27 +7,27 @@
 
 define void @clflushopt(i8* %p) nounwind {
 ; GENERIC-LABEL: clflushopt:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    clflushopt (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; GLM-LABEL: clflushopt:
-; GLM:       # BB#0:
+; GLM:       # %bb.0:
 ; GLM-NEXT:    clflushopt (%rdi) # sched: [3:1.00]
 ; GLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SKYLAKE-LABEL: clflushopt:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    clflushopt (%rdi) # sched: [2:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: clflushopt:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    clflushopt (%rdi) # sched: [2:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: clflushopt:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    clflushopt (%rdi) # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void @llvm.x86.clflushopt(i8* %p)

Modified: llvm/trunk/test/CodeGen/X86/clflushopt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/clflushopt.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/clflushopt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/clflushopt.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define void @clflushopt(i8* %p) nounwind {
 ; X86-LABEL: clflushopt:
-; X86:       ## BB#0:
+; X86:       ## %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    clflushopt (%eax)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: clflushopt:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    clflushopt (%rdi)
 ; X64-NEXT:    retq
   tail call void @llvm.x86.clflushopt(i8* %p)

Modified: llvm/trunk/test/CodeGen/X86/clwb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/clwb.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/clwb.ll (original)
+++ llvm/trunk/test/CodeGen/X86/clwb.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @clwb(i8* %p) nounwind {
 ; CHECK-LABEL: clwb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    clwb (%eax)
 ; CHECK-NEXT:    retl

Modified: llvm/trunk/test/CodeGen/X86/clz.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/clz.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/clz.ll (original)
+++ llvm/trunk/test/CodeGen/X86/clz.ll Mon Dec  4 09:18:51 2017
@@ -16,28 +16,28 @@ declare i64 @llvm.ctlz.i64(i64, i1)
 
 define i8 @cttz_i8(i8 %x)  {
 ; X32-LABEL: cttz_i8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    bsfl %eax, %eax
 ; X32-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: cttz_i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    bsfl %eax, %eax
 ; X64-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: cttz_i8:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    tzcntl %eax, %eax
 ; X32-CLZ-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: cttz_i8:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    movzbl %dil, %eax
 ; X64-CLZ-NEXT:    tzcntl %eax, %eax
 ; X64-CLZ-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
@@ -48,22 +48,22 @@ define i8 @cttz_i8(i8 %x)  {
 
 define i16 @cttz_i16(i16 %x)  {
 ; X32-LABEL: cttz_i16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    bsfw {{[0-9]+}}(%esp), %ax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: cttz_i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    bsfw %di, %ax
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: cttz_i16:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    tzcntw {{[0-9]+}}(%esp), %ax
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: cttz_i16:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    tzcntw %di, %ax
 ; X64-CLZ-NEXT:    retq
   %tmp = call i16 @llvm.cttz.i16( i16 %x, i1 true )
@@ -72,22 +72,22 @@ define i16 @cttz_i16(i16 %x)  {
 
 define i32 @cttz_i32(i32 %x)  {
 ; X32-LABEL: cttz_i32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    bsfl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: cttz_i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    bsfl %edi, %eax
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: cttz_i32:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    tzcntl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: cttz_i32:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    tzcntl %edi, %eax
 ; X64-CLZ-NEXT:    retq
   %tmp = call i32 @llvm.cttz.i32( i32 %x, i1 true )
@@ -96,11 +96,11 @@ define i32 @cttz_i32(i32 %x)  {
 
 define i64 @cttz_i64(i64 %x)  {
 ; X32-LABEL: cttz_i64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    testl %eax, %eax
 ; X32-NEXT:    jne .LBB3_1
-; X32-NEXT:  # BB#2:
+; X32-NEXT:  # %bb.2:
 ; X32-NEXT:    bsfl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    addl $32, %eax
 ; X32-NEXT:    xorl %edx, %edx
@@ -111,16 +111,16 @@ define i64 @cttz_i64(i64 %x)  {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: cttz_i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    bsfq %rdi, %rax
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: cttz_i64:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    testl %eax, %eax
 ; X32-CLZ-NEXT:    jne .LBB3_1
-; X32-CLZ-NEXT:  # BB#2:
+; X32-CLZ-NEXT:  # %bb.2:
 ; X32-CLZ-NEXT:    tzcntl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    addl $32, %eax
 ; X32-CLZ-NEXT:    xorl %edx, %edx
@@ -131,7 +131,7 @@ define i64 @cttz_i64(i64 %x)  {
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: cttz_i64:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    tzcntq %rdi, %rax
 ; X64-CLZ-NEXT:    retq
   %tmp = call i64 @llvm.cttz.i64( i64 %x, i1 true )
@@ -140,7 +140,7 @@ define i64 @cttz_i64(i64 %x)  {
 
 define i8 @ctlz_i8(i8 %x) {
 ; X32-LABEL: ctlz_i8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    bsrl %eax, %eax
 ; X32-NEXT:    xorl $7, %eax
@@ -148,7 +148,7 @@ define i8 @ctlz_i8(i8 %x) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    bsrl %eax, %eax
 ; X64-NEXT:    xorl $7, %eax
@@ -156,7 +156,7 @@ define i8 @ctlz_i8(i8 %x) {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_i8:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    lzcntl %eax, %eax
 ; X32-CLZ-NEXT:    addl $-24, %eax
@@ -164,7 +164,7 @@ define i8 @ctlz_i8(i8 %x) {
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_i8:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    movzbl %dil, %eax
 ; X64-CLZ-NEXT:    lzcntl %eax, %eax
 ; X64-CLZ-NEXT:    addl $-24, %eax
@@ -176,26 +176,26 @@ define i8 @ctlz_i8(i8 %x) {
 
 define i16 @ctlz_i16(i16 %x) {
 ; X32-LABEL: ctlz_i16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    bsrw {{[0-9]+}}(%esp), %ax
 ; X32-NEXT:    xorl $15, %eax
 ; X32-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    bsrw %di, %ax
 ; X64-NEXT:    xorl $15, %eax
 ; X64-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_i16:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    lzcntw {{[0-9]+}}(%esp), %ax
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_i16:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    lzcntw %di, %ax
 ; X64-CLZ-NEXT:    retq
   %tmp2 = call i16 @llvm.ctlz.i16( i16 %x, i1 true )
@@ -204,24 +204,24 @@ define i16 @ctlz_i16(i16 %x) {
 
 define i32 @ctlz_i32(i32 %x) {
 ; X32-LABEL: ctlz_i32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    bsrl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    xorl $31, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    bsrl %edi, %eax
 ; X64-NEXT:    xorl $31, %eax
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_i32:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    lzcntl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_i32:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    lzcntl %edi, %eax
 ; X64-CLZ-NEXT:    retq
   %tmp = call i32 @llvm.ctlz.i32( i32 %x, i1 true )
@@ -230,11 +230,11 @@ define i32 @ctlz_i32(i32 %x) {
 
 define i64 @ctlz_i64(i64 %x) {
 ; X32-LABEL: ctlz_i64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    testl %eax, %eax
 ; X32-NEXT:    jne .LBB7_1
-; X32-NEXT:  # BB#2:
+; X32-NEXT:  # %bb.2:
 ; X32-NEXT:    bsrl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    xorl $31, %eax
 ; X32-NEXT:    addl $32, %eax
@@ -247,17 +247,17 @@ define i64 @ctlz_i64(i64 %x) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    bsrq %rdi, %rax
 ; X64-NEXT:    xorq $63, %rax
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_i64:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    testl %eax, %eax
 ; X32-CLZ-NEXT:    jne .LBB7_1
-; X32-CLZ-NEXT:  # BB#2:
+; X32-CLZ-NEXT:  # %bb.2:
 ; X32-CLZ-NEXT:    lzcntl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    addl $32, %eax
 ; X32-CLZ-NEXT:    xorl %edx, %edx
@@ -268,7 +268,7 @@ define i64 @ctlz_i64(i64 %x) {
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_i64:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    lzcntq %rdi, %rax
 ; X64-CLZ-NEXT:    retq
   %tmp = call i64 @llvm.ctlz.i64( i64 %x, i1 true )
@@ -278,11 +278,11 @@ define i64 @ctlz_i64(i64 %x) {
 ; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
 define i8 @ctlz_i8_zero_test(i8 %n) {
 ; X32-LABEL: ctlz_i8_zero_test:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    testb %al, %al
 ; X32-NEXT:    je .LBB8_1
-; X32-NEXT:  # BB#2: # %cond.false
+; X32-NEXT:  # %bb.2: # %cond.false
 ; X32-NEXT:    movzbl %al, %eax
 ; X32-NEXT:    bsrl %eax, %eax
 ; X32-NEXT:    xorl $7, %eax
@@ -294,10 +294,10 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i8_zero_test:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    testb %dil, %dil
 ; X64-NEXT:    je .LBB8_1
-; X64-NEXT:  # BB#2: # %cond.false
+; X64-NEXT:  # %bb.2: # %cond.false
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    bsrl %eax, %eax
 ; X64-NEXT:    xorl $7, %eax
@@ -309,7 +309,7 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_i8_zero_test:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    lzcntl %eax, %eax
 ; X32-CLZ-NEXT:    addl $-24, %eax
@@ -317,7 +317,7 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_i8_zero_test:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    movzbl %dil, %eax
 ; X64-CLZ-NEXT:    lzcntl %eax, %eax
 ; X64-CLZ-NEXT:    addl $-24, %eax
@@ -330,11 +330,11 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
 ; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
 define i16 @ctlz_i16_zero_test(i16 %n) {
 ; X32-LABEL: ctlz_i16_zero_test:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    testw %ax, %ax
 ; X32-NEXT:    je .LBB9_1
-; X32-NEXT:  # BB#2: # %cond.false
+; X32-NEXT:  # %bb.2: # %cond.false
 ; X32-NEXT:    bsrw %ax, %ax
 ; X32-NEXT:    xorl $15, %eax
 ; X32-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
@@ -345,10 +345,10 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i16_zero_test:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    testw %di, %di
 ; X64-NEXT:    je .LBB9_1
-; X64-NEXT:  # BB#2: # %cond.false
+; X64-NEXT:  # %bb.2: # %cond.false
 ; X64-NEXT:    bsrw %di, %ax
 ; X64-NEXT:    xorl $15, %eax
 ; X64-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
@@ -359,12 +359,12 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_i16_zero_test:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    lzcntw {{[0-9]+}}(%esp), %ax
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_i16_zero_test:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    lzcntw %di, %ax
 ; X64-CLZ-NEXT:    retq
   %tmp1 = call i16 @llvm.ctlz.i16(i16 %n, i1 false)
@@ -374,11 +374,11 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
 ; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
 define i32 @ctlz_i32_zero_test(i32 %n) {
 ; X32-LABEL: ctlz_i32_zero_test:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    testl %eax, %eax
 ; X32-NEXT:    je .LBB10_1
-; X32-NEXT:  # BB#2: # %cond.false
+; X32-NEXT:  # %bb.2: # %cond.false
 ; X32-NEXT:    bsrl %eax, %eax
 ; X32-NEXT:    xorl $31, %eax
 ; X32-NEXT:    retl
@@ -387,10 +387,10 @@ define i32 @ctlz_i32_zero_test(i32 %n) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i32_zero_test:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    testl %edi, %edi
 ; X64-NEXT:    je .LBB10_1
-; X64-NEXT:  # BB#2: # %cond.false
+; X64-NEXT:  # %bb.2: # %cond.false
 ; X64-NEXT:    bsrl %edi, %eax
 ; X64-NEXT:    xorl $31, %eax
 ; X64-NEXT:    retq
@@ -399,12 +399,12 @@ define i32 @ctlz_i32_zero_test(i32 %n) {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_i32_zero_test:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    lzcntl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_i32_zero_test:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    lzcntl %edi, %eax
 ; X64-CLZ-NEXT:    retq
   %tmp1 = call i32 @llvm.ctlz.i32(i32 %n, i1 false)
@@ -414,17 +414,17 @@ define i32 @ctlz_i32_zero_test(i32 %n) {
 ; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
 define i64 @ctlz_i64_zero_test(i64 %n) {
 ; X32-LABEL: ctlz_i64_zero_test:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    bsrl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    movl $63, %eax
 ; X32-NEXT:    je .LBB11_2
-; X32-NEXT:  # BB#1:
+; X32-NEXT:  # %bb.1:
 ; X32-NEXT:    movl %edx, %eax
 ; X32-NEXT:  .LBB11_2:
 ; X32-NEXT:    testl %ecx, %ecx
 ; X32-NEXT:    jne .LBB11_3
-; X32-NEXT:  # BB#4:
+; X32-NEXT:  # %bb.4:
 ; X32-NEXT:    xorl $31, %eax
 ; X32-NEXT:    addl $32, %eax
 ; X32-NEXT:    xorl %edx, %edx
@@ -436,10 +436,10 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i64_zero_test:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    testq %rdi, %rdi
 ; X64-NEXT:    je .LBB11_1
-; X64-NEXT:  # BB#2: # %cond.false
+; X64-NEXT:  # %bb.2: # %cond.false
 ; X64-NEXT:    bsrq %rdi, %rax
 ; X64-NEXT:    xorq $63, %rax
 ; X64-NEXT:    retq
@@ -448,11 +448,11 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_i64_zero_test:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    testl %eax, %eax
 ; X32-CLZ-NEXT:    jne .LBB11_1
-; X32-CLZ-NEXT:  # BB#2:
+; X32-CLZ-NEXT:  # %bb.2:
 ; X32-CLZ-NEXT:    lzcntl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    addl $32, %eax
 ; X32-CLZ-NEXT:    xorl %edx, %edx
@@ -463,7 +463,7 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_i64_zero_test:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    lzcntq %rdi, %rax
 ; X64-CLZ-NEXT:    retq
   %tmp1 = call i64 @llvm.ctlz.i64(i64 %n, i1 false)
@@ -473,11 +473,11 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
 ; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
 define i8 @cttz_i8_zero_test(i8 %n) {
 ; X32-LABEL: cttz_i8_zero_test:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    testb %al, %al
 ; X32-NEXT:    je .LBB12_1
-; X32-NEXT:  # BB#2: # %cond.false
+; X32-NEXT:  # %bb.2: # %cond.false
 ; X32-NEXT:    movzbl %al, %eax
 ; X32-NEXT:    bsfl %eax, %eax
 ; X32-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
@@ -488,10 +488,10 @@ define i8 @cttz_i8_zero_test(i8 %n) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: cttz_i8_zero_test:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    testb %dil, %dil
 ; X64-NEXT:    je .LBB12_1
-; X64-NEXT:  # BB#2: # %cond.false
+; X64-NEXT:  # %bb.2: # %cond.false
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    bsfl %eax, %eax
 ; X64-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
@@ -502,7 +502,7 @@ define i8 @cttz_i8_zero_test(i8 %n) {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: cttz_i8_zero_test:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    orl $256, %eax # imm = 0x100
 ; X32-CLZ-NEXT:    tzcntl %eax, %eax
@@ -510,7 +510,7 @@ define i8 @cttz_i8_zero_test(i8 %n) {
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: cttz_i8_zero_test:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    movzbl %dil, %eax
 ; X64-CLZ-NEXT:    orl $256, %eax # imm = 0x100
 ; X64-CLZ-NEXT:    tzcntl %eax, %eax
@@ -523,11 +523,11 @@ define i8 @cttz_i8_zero_test(i8 %n) {
 ; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
 define i16 @cttz_i16_zero_test(i16 %n) {
 ; X32-LABEL: cttz_i16_zero_test:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    testw %ax, %ax
 ; X32-NEXT:    je .LBB13_1
-; X32-NEXT:  # BB#2: # %cond.false
+; X32-NEXT:  # %bb.2: # %cond.false
 ; X32-NEXT:    bsfw %ax, %ax
 ; X32-NEXT:    retl
 ; X32-NEXT:  .LBB13_1
@@ -535,10 +535,10 @@ define i16 @cttz_i16_zero_test(i16 %n) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: cttz_i16_zero_test:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    testw %di, %di
 ; X64-NEXT:    je .LBB13_1
-; X64-NEXT:  # BB#2: # %cond.false
+; X64-NEXT:  # %bb.2: # %cond.false
 ; X64-NEXT:    bsfw %di, %ax
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB13_1:
@@ -546,12 +546,12 @@ define i16 @cttz_i16_zero_test(i16 %n) {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: cttz_i16_zero_test:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    tzcntw {{[0-9]+}}(%esp), %ax
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: cttz_i16_zero_test:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    tzcntw %di, %ax
 ; X64-CLZ-NEXT:    retq
   %tmp1 = call i16 @llvm.cttz.i16(i16 %n, i1 false)
@@ -561,11 +561,11 @@ define i16 @cttz_i16_zero_test(i16 %n) {
 ; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
 define i32 @cttz_i32_zero_test(i32 %n) {
 ; X32-LABEL: cttz_i32_zero_test:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    testl %eax, %eax
 ; X32-NEXT:    je .LBB14_1
-; X32-NEXT:  # BB#2: # %cond.false
+; X32-NEXT:  # %bb.2: # %cond.false
 ; X32-NEXT:    bsfl %eax, %eax
 ; X32-NEXT:    retl
 ; X32-NEXT:  .LBB14_1
@@ -573,10 +573,10 @@ define i32 @cttz_i32_zero_test(i32 %n) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: cttz_i32_zero_test:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    testl %edi, %edi
 ; X64-NEXT:    je .LBB14_1
-; X64-NEXT:  # BB#2: # %cond.false
+; X64-NEXT:  # %bb.2: # %cond.false
 ; X64-NEXT:    bsfl %edi, %eax
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB14_1:
@@ -584,12 +584,12 @@ define i32 @cttz_i32_zero_test(i32 %n) {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: cttz_i32_zero_test:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    tzcntl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: cttz_i32_zero_test:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    tzcntl %edi, %eax
 ; X64-CLZ-NEXT:    retq
   %tmp1 = call i32 @llvm.cttz.i32(i32 %n, i1 false)
@@ -599,17 +599,17 @@ define i32 @cttz_i32_zero_test(i32 %n) {
 ; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
 define i64 @cttz_i64_zero_test(i64 %n) {
 ; X32-LABEL: cttz_i64_zero_test:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    bsfl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    movl $32, %eax
 ; X32-NEXT:    je .LBB15_2
-; X32-NEXT:  # BB#1:
+; X32-NEXT:  # %bb.1:
 ; X32-NEXT:    movl %edx, %eax
 ; X32-NEXT:  .LBB15_2:
 ; X32-NEXT:    testl %ecx, %ecx
 ; X32-NEXT:    jne .LBB15_3
-; X32-NEXT:  # BB#4:
+; X32-NEXT:  # %bb.4:
 ; X32-NEXT:    addl $32, %eax
 ; X32-NEXT:    xorl %edx, %edx
 ; X32-NEXT:    retl
@@ -619,10 +619,10 @@ define i64 @cttz_i64_zero_test(i64 %n) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: cttz_i64_zero_test:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    testq %rdi, %rdi
 ; X64-NEXT:    je .LBB15_1
-; X64-NEXT:  # BB#2: # %cond.false
+; X64-NEXT:  # %bb.2: # %cond.false
 ; X64-NEXT:    bsfq %rdi, %rax
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB15_1:
@@ -630,11 +630,11 @@ define i64 @cttz_i64_zero_test(i64 %n) {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: cttz_i64_zero_test:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    testl %eax, %eax
 ; X32-CLZ-NEXT:    jne .LBB15_1
-; X32-CLZ-NEXT:  # BB#2:
+; X32-CLZ-NEXT:  # %bb.2:
 ; X32-CLZ-NEXT:    tzcntl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    addl $32, %eax
 ; X32-CLZ-NEXT:    xorl %edx, %edx
@@ -645,7 +645,7 @@ define i64 @cttz_i64_zero_test(i64 %n) {
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: cttz_i64_zero_test:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    tzcntq %rdi, %rax
 ; X64-CLZ-NEXT:    retq
   %tmp1 = call i64 @llvm.cttz.i64(i64 %n, i1 false)
@@ -659,11 +659,11 @@ define i64 @cttz_i64_zero_test(i64 %n) {
 ;        codegen doesn't know how to delete the movl and je.
 define i32 @ctlz_i32_fold_cmov(i32 %n) {
 ; X32-LABEL: ctlz_i32_fold_cmov:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    orl $1, %eax
 ; X32-NEXT:    je .LBB16_1
-; X32-NEXT:  # BB#2: # %cond.false
+; X32-NEXT:  # %bb.2: # %cond.false
 ; X32-NEXT:    bsrl %eax, %eax
 ; X32-NEXT:    xorl $31, %eax
 ; X32-NEXT:    retl
@@ -672,10 +672,10 @@ define i32 @ctlz_i32_fold_cmov(i32 %n) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i32_fold_cmov:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    orl $1, %edi
 ; X64-NEXT:    je .LBB16_1
-; X64-NEXT:  # BB#2: # %cond.false
+; X64-NEXT:  # %bb.2: # %cond.false
 ; X64-NEXT:    bsrl %edi, %eax
 ; X64-NEXT:    xorl $31, %eax
 ; X64-NEXT:    retq
@@ -684,14 +684,14 @@ define i32 @ctlz_i32_fold_cmov(i32 %n) {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_i32_fold_cmov:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    orl $1, %eax
 ; X32-CLZ-NEXT:    lzcntl %eax, %eax
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_i32_fold_cmov:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    orl $1, %edi
 ; X64-CLZ-NEXT:    lzcntl %edi, %eax
 ; X64-CLZ-NEXT:    retq
@@ -705,23 +705,23 @@ define i32 @ctlz_i32_fold_cmov(i32 %n) {
 ; FIXME: We should probably select BSR instead of LZCNT in these circumstances.
 define i32 @ctlz_bsr(i32 %n) {
 ; X32-LABEL: ctlz_bsr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    bsrl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_bsr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    bsrl %edi, %eax
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_bsr:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    lzcntl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    xorl $31, %eax
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_bsr:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    lzcntl %edi, %eax
 ; X64-CLZ-NEXT:    xorl $31, %eax
 ; X64-CLZ-NEXT:    retq
@@ -735,11 +735,11 @@ define i32 @ctlz_bsr(i32 %n) {
 ;        codegen doesn't know how to combine the $32 and $31 into $63.
 define i32 @ctlz_bsr_zero_test(i32 %n) {
 ; X32-LABEL: ctlz_bsr_zero_test:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    testl %eax, %eax
 ; X32-NEXT:    je .LBB18_1
-; X32-NEXT:  # BB#2: # %cond.false
+; X32-NEXT:  # %bb.2: # %cond.false
 ; X32-NEXT:    bsrl %eax, %eax
 ; X32-NEXT:    xorl $31, %eax
 ; X32-NEXT:    xorl $31, %eax
@@ -750,10 +750,10 @@ define i32 @ctlz_bsr_zero_test(i32 %n) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_bsr_zero_test:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    testl %edi, %edi
 ; X64-NEXT:    je .LBB18_1
-; X64-NEXT:  # BB#2: # %cond.false
+; X64-NEXT:  # %bb.2: # %cond.false
 ; X64-NEXT:    bsrl %edi, %eax
 ; X64-NEXT:    xorl $31, %eax
 ; X64-NEXT:    xorl $31, %eax
@@ -764,13 +764,13 @@ define i32 @ctlz_bsr_zero_test(i32 %n) {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_bsr_zero_test:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    lzcntl {{[0-9]+}}(%esp), %eax
 ; X32-CLZ-NEXT:    xorl $31, %eax
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_bsr_zero_test:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    lzcntl %edi, %eax
 ; X64-CLZ-NEXT:    xorl $31, %eax
 ; X64-CLZ-NEXT:    retq
@@ -781,7 +781,7 @@ define i32 @ctlz_bsr_zero_test(i32 %n) {
 
 define i8 @cttz_i8_knownbits(i8 %x)  {
 ; X32-LABEL: cttz_i8_knownbits:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    orb $2, %al
 ; X32-NEXT:    movzbl %al, %eax
@@ -790,7 +790,7 @@ define i8 @cttz_i8_knownbits(i8 %x)  {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: cttz_i8_knownbits:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    orb $2, %dil
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    bsfl %eax, %eax
@@ -798,7 +798,7 @@ define i8 @cttz_i8_knownbits(i8 %x)  {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: cttz_i8_knownbits:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-CLZ-NEXT:    orb $2, %al
 ; X32-CLZ-NEXT:    movzbl %al, %eax
@@ -807,7 +807,7 @@ define i8 @cttz_i8_knownbits(i8 %x)  {
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: cttz_i8_knownbits:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    orb $2, %dil
 ; X64-CLZ-NEXT:    movzbl %dil, %eax
 ; X64-CLZ-NEXT:    tzcntl %eax, %eax
@@ -821,7 +821,7 @@ define i8 @cttz_i8_knownbits(i8 %x)  {
 
 define i8 @ctlz_i8_knownbits(i8 %x)  {
 ; X32-LABEL: ctlz_i8_knownbits:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    orb $64, %al
 ; X32-NEXT:    movzbl %al, %eax
@@ -831,7 +831,7 @@ define i8 @ctlz_i8_knownbits(i8 %x)  {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i8_knownbits:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    orb $64, %dil
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    bsrl %eax, %eax
@@ -840,7 +840,7 @@ define i8 @ctlz_i8_knownbits(i8 %x)  {
 ; X64-NEXT:    retq
 ;
 ; X32-CLZ-LABEL: ctlz_i8_knownbits:
-; X32-CLZ:       # BB#0:
+; X32-CLZ:       # %bb.0:
 ; X32-CLZ-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-CLZ-NEXT:    orb $64, %al
 ; X32-CLZ-NEXT:    movzbl %al, %eax
@@ -850,7 +850,7 @@ define i8 @ctlz_i8_knownbits(i8 %x)  {
 ; X32-CLZ-NEXT:    retl
 ;
 ; X64-CLZ-LABEL: ctlz_i8_knownbits:
-; X64-CLZ:       # BB#0:
+; X64-CLZ:       # %bb.0:
 ; X64-CLZ-NEXT:    orb $64, %dil
 ; X64-CLZ-NEXT:    movzbl %dil, %eax
 ; X64-CLZ-NEXT:    lzcntl %eax, %eax

Modified: llvm/trunk/test/CodeGen/X86/clzero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/clzero.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/clzero.ll (original)
+++ llvm/trunk/test/CodeGen/X86/clzero.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define void @foo(i8* %p) #0 {
 ; X64-LABEL: foo:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    leaq (%rdi), %rax
 ; X64-NEXT:    clzero
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: foo:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    leal (%eax), %eax
 ; X32-NEXT:    clzero

Modified: llvm/trunk/test/CodeGen/X86/cmov-into-branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmov-into-branch.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmov-into-branch.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmov-into-branch.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 ; cmp with single-use load, should not form branch.
 define i32 @test1(double %a, double* nocapture %b, i32 %x, i32 %y)  {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ucomisd (%rdi), %xmm0
 ; CHECK-NEXT:    cmovbel %edx, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -18,7 +18,7 @@ define i32 @test1(double %a, double* noc
 ; Sanity check: no load.
 define i32 @test2(double %a, double %b, i32 %x, i32 %y)  {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ucomisd %xmm1, %xmm0
 ; CHECK-NEXT:    cmovbel %esi, %edi
 ; CHECK-NEXT:    movl %edi, %eax
@@ -31,7 +31,7 @@ define i32 @test2(double %a, double %b,
 ; Multiple uses of the load.
 define i32 @test4(i32 %a, i32* nocapture %b, i32 %x, i32 %y)  {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl (%rsi), %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    cmovael %ecx, %edx
@@ -47,7 +47,7 @@ define i32 @test4(i32 %a, i32* nocapture
 ; Multiple uses of the cmp.
 define i32 @test5(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl %edi, (%rsi)
 ; CHECK-NEXT:    cmoval %edi, %ecx
 ; CHECK-NEXT:    cmovael %edx, %ecx
@@ -64,7 +64,7 @@ define i32 @test5(i32 %a, i32* nocapture
 ; Zero-extended select.
 define void @test6(i32 %a, i32 %x, i32* %y.ptr, i64* %z.ptr) {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    cmovnsl (%rdx), %esi
@@ -82,7 +82,7 @@ entry:
 ; If a select is not obviously predictable, don't turn it into a branch.
 define i32 @weighted_select1(i32 %a, i32 %b) {
 ; CHECK-LABEL: weighted_select1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    cmovnel %edi, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -95,10 +95,10 @@ define i32 @weighted_select1(i32 %a, i32
 ; If a select is obviously predictable, turn it into a branch.
 define i32 @weighted_select2(i32 %a, i32 %b) {
 ; CHECK-LABEL: weighted_select2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jne .LBB6_2
-; CHECK-NEXT:  # BB#1: # %select.false
+; CHECK-NEXT:  # %bb.1: # %select.false
 ; CHECK-NEXT:    movl %esi, %edi
 ; CHECK-NEXT:  .LBB6_2: # %select.end
 ; CHECK-NEXT:    movl %edi, %eax
@@ -114,10 +114,10 @@ define i32 @weighted_select2(i32 %a, i32
 ; TODO: But likely true vs. likely false should affect basic block placement?
 define i32 @weighted_select3(i32 %a, i32 %b) {
 ; CHECK-LABEL: weighted_select3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    je .LBB7_1
-; CHECK-NEXT:  # BB#2: # %select.end
+; CHECK-NEXT:  # %bb.2: # %select.end
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB7_1: # %select.false
@@ -132,7 +132,7 @@ define i32 @weighted_select3(i32 %a, i32
 ; Weightlessness is no reason to die.
 define i32 @unweighted_select(i32 %a, i32 %b) {
 ; CHECK-LABEL: unweighted_select:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    cmovnel %edi, %esi
 ; CHECK-NEXT:    movl %esi, %eax

Modified: llvm/trunk/test/CodeGen/X86/cmov-promotion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmov-promotion.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmov-promotion.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmov-promotion.ll Mon Dec  4 09:18:51 2017
@@ -4,11 +4,11 @@
 
 define i16 @cmov_zpromotion_8_to_16(i1 %c) {
 ; CMOV-LABEL: cmov_zpromotion_8_to_16:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movb $117, %al
 ; CMOV-NEXT:    jne .LBB0_2
-; CMOV-NEXT:  # BB#1:
+; CMOV-NEXT:  # %bb.1:
 ; CMOV-NEXT:    movb $-19, %al
 ; CMOV-NEXT:  .LBB0_2:
 ; CMOV-NEXT:    movzbl %al, %eax
@@ -16,11 +16,11 @@ define i16 @cmov_zpromotion_8_to_16(i1 %
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_zpromotion_8_to_16:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movb $117, %al
 ; NO_CMOV-NEXT:    jne .LBB0_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movb $-19, %al
 ; NO_CMOV-NEXT:  .LBB0_2:
 ; NO_CMOV-NEXT:    movzbl %al, %eax
@@ -33,22 +33,22 @@ define i16 @cmov_zpromotion_8_to_16(i1 %
 
 define i32 @cmov_zpromotion_8_to_32(i1 %c) {
 ; CMOV-LABEL: cmov_zpromotion_8_to_32:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movb $126, %al
 ; CMOV-NEXT:    jne .LBB1_2
-; CMOV-NEXT:  # BB#1:
+; CMOV-NEXT:  # %bb.1:
 ; CMOV-NEXT:    movb $-1, %al
 ; CMOV-NEXT:  .LBB1_2:
 ; CMOV-NEXT:    movzbl %al, %eax
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_zpromotion_8_to_32:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movb $126, %al
 ; NO_CMOV-NEXT:    jne .LBB1_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movb $-1, %al
 ; NO_CMOV-NEXT:  .LBB1_2:
 ; NO_CMOV-NEXT:    movzbl %al, %eax
@@ -60,22 +60,22 @@ define i32 @cmov_zpromotion_8_to_32(i1 %
 
 define i64 @cmov_zpromotion_8_to_64(i1 %c) {
 ; CMOV-LABEL: cmov_zpromotion_8_to_64:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movb $126, %al
 ; CMOV-NEXT:    jne .LBB2_2
-; CMOV-NEXT:  # BB#1:
+; CMOV-NEXT:  # %bb.1:
 ; CMOV-NEXT:    movb $-1, %al
 ; CMOV-NEXT:  .LBB2_2:
 ; CMOV-NEXT:    movzbl %al, %eax
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_zpromotion_8_to_64:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movb $126, %al
 ; NO_CMOV-NEXT:    jne .LBB2_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movb $-1, %al
 ; NO_CMOV-NEXT:  .LBB2_2:
 ; NO_CMOV-NEXT:    movzbl %al, %eax
@@ -88,7 +88,7 @@ define i64 @cmov_zpromotion_8_to_64(i1 %
 
 define i32 @cmov_zpromotion_16_to_32(i1 %c) {
 ; CMOV-LABEL: cmov_zpromotion_16_to_32:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movl $12414, %ecx # imm = 0x307E
 ; CMOV-NEXT:    movl $65535, %eax # imm = 0xFFFF
@@ -96,11 +96,11 @@ define i32 @cmov_zpromotion_16_to_32(i1
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_zpromotion_16_to_32:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movl $12414, %eax # imm = 0x307E
 ; NO_CMOV-NEXT:    jne .LBB3_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movl $65535, %eax # imm = 0xFFFF
 ; NO_CMOV-NEXT:  .LBB3_2:
 ; NO_CMOV-NEXT:    retl
@@ -111,7 +111,7 @@ define i32 @cmov_zpromotion_16_to_32(i1
 
 define i64 @cmov_zpromotion_16_to_64(i1 %c) {
 ; CMOV-LABEL: cmov_zpromotion_16_to_64:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movl $12414, %ecx # imm = 0x307E
 ; CMOV-NEXT:    movl $65535, %eax # imm = 0xFFFF
@@ -119,11 +119,11 @@ define i64 @cmov_zpromotion_16_to_64(i1
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_zpromotion_16_to_64:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movl $12414, %eax # imm = 0x307E
 ; NO_CMOV-NEXT:    jne .LBB4_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movl $65535, %eax # imm = 0xFFFF
 ; NO_CMOV-NEXT:  .LBB4_2:
 ; NO_CMOV-NEXT:    xorl %edx, %edx
@@ -135,7 +135,7 @@ define i64 @cmov_zpromotion_16_to_64(i1
 
 define i64 @cmov_zpromotion_32_to_64(i1 %c) {
 ; CMOV-LABEL: cmov_zpromotion_32_to_64:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movl $12414, %ecx # imm = 0x307E
 ; CMOV-NEXT:    movl $-1, %eax
@@ -143,11 +143,11 @@ define i64 @cmov_zpromotion_32_to_64(i1
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_zpromotion_32_to_64:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movl $12414, %eax # imm = 0x307E
 ; NO_CMOV-NEXT:    jne .LBB5_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movl $-1, %eax
 ; NO_CMOV-NEXT:  .LBB5_2:
 ; NO_CMOV-NEXT:    xorl %edx, %edx
@@ -159,11 +159,11 @@ define i64 @cmov_zpromotion_32_to_64(i1
 
 define i16 @cmov_spromotion_8_to_16(i1 %c) {
 ; CMOV-LABEL: cmov_spromotion_8_to_16:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movb $117, %al
 ; CMOV-NEXT:    jne .LBB6_2
-; CMOV-NEXT:  # BB#1:
+; CMOV-NEXT:  # %bb.1:
 ; CMOV-NEXT:    movb $-19, %al
 ; CMOV-NEXT:  .LBB6_2:
 ; CMOV-NEXT:    movsbl %al, %eax
@@ -171,11 +171,11 @@ define i16 @cmov_spromotion_8_to_16(i1 %
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_spromotion_8_to_16:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movb $117, %al
 ; NO_CMOV-NEXT:    jne .LBB6_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movb $-19, %al
 ; NO_CMOV-NEXT:  .LBB6_2:
 ; NO_CMOV-NEXT:    movsbl %al, %eax
@@ -188,22 +188,22 @@ define i16 @cmov_spromotion_8_to_16(i1 %
 
 define i32 @cmov_spromotion_8_to_32(i1 %c) {
 ; CMOV-LABEL: cmov_spromotion_8_to_32:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movb $126, %al
 ; CMOV-NEXT:    jne .LBB7_2
-; CMOV-NEXT:  # BB#1:
+; CMOV-NEXT:  # %bb.1:
 ; CMOV-NEXT:    movb $-1, %al
 ; CMOV-NEXT:  .LBB7_2:
 ; CMOV-NEXT:    movsbl %al, %eax
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_spromotion_8_to_32:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movb $126, %al
 ; NO_CMOV-NEXT:    jne .LBB7_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movb $-1, %al
 ; NO_CMOV-NEXT:  .LBB7_2:
 ; NO_CMOV-NEXT:    movsbl %al, %eax
@@ -215,22 +215,22 @@ define i32 @cmov_spromotion_8_to_32(i1 %
 
 define i64 @cmov_spromotion_8_to_64(i1 %c) {
 ; CMOV-LABEL: cmov_spromotion_8_to_64:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movb $126, %al
 ; CMOV-NEXT:    jne .LBB8_2
-; CMOV-NEXT:  # BB#1:
+; CMOV-NEXT:  # %bb.1:
 ; CMOV-NEXT:    movb $-1, %al
 ; CMOV-NEXT:  .LBB8_2:
 ; CMOV-NEXT:    movsbq %al, %rax
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_spromotion_8_to_64:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movb $126, %al
 ; NO_CMOV-NEXT:    jne .LBB8_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movb $-1, %al
 ; NO_CMOV-NEXT:  .LBB8_2:
 ; NO_CMOV-NEXT:    movsbl %al, %eax
@@ -244,7 +244,7 @@ define i64 @cmov_spromotion_8_to_64(i1 %
 
 define i32 @cmov_spromotion_16_to_32(i1 %c) {
 ; CMOV-LABEL: cmov_spromotion_16_to_32:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movl $12414, %ecx # imm = 0x307E
 ; CMOV-NEXT:    movl $-1, %eax
@@ -252,11 +252,11 @@ define i32 @cmov_spromotion_16_to_32(i1
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_spromotion_16_to_32:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movl $12414, %eax # imm = 0x307E
 ; NO_CMOV-NEXT:    jne .LBB9_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movl $-1, %eax
 ; NO_CMOV-NEXT:  .LBB9_2:
 ; NO_CMOV-NEXT:    retl
@@ -267,7 +267,7 @@ define i32 @cmov_spromotion_16_to_32(i1
 
 define i64 @cmov_spromotion_16_to_64(i1 %c) {
 ; CMOV-LABEL: cmov_spromotion_16_to_64:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movl $12414, %ecx # imm = 0x307E
 ; CMOV-NEXT:    movq $-1, %rax
@@ -275,11 +275,11 @@ define i64 @cmov_spromotion_16_to_64(i1
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_spromotion_16_to_64:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movl $12414, %eax # imm = 0x307E
 ; NO_CMOV-NEXT:    jne .LBB10_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movl $-1, %eax
 ; NO_CMOV-NEXT:  .LBB10_2:
 ; NO_CMOV-NEXT:    movl %eax, %edx
@@ -292,7 +292,7 @@ define i64 @cmov_spromotion_16_to_64(i1
 
 define i64 @cmov_spromotion_32_to_64(i1 %c) {
 ; CMOV-LABEL: cmov_spromotion_32_to_64:
-; CMOV:       # BB#0:
+; CMOV:       # %bb.0:
 ; CMOV-NEXT:    testb $1, %dil
 ; CMOV-NEXT:    movl $12414, %eax # imm = 0x307E
 ; CMOV-NEXT:    movl $-1, %ecx
@@ -301,11 +301,11 @@ define i64 @cmov_spromotion_32_to_64(i1
 ; CMOV-NEXT:    retq
 ;
 ; NO_CMOV-LABEL: cmov_spromotion_32_to_64:
-; NO_CMOV:       # BB#0:
+; NO_CMOV:       # %bb.0:
 ; NO_CMOV-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; NO_CMOV-NEXT:    movl $12414, %eax # imm = 0x307E
 ; NO_CMOV-NEXT:    jne .LBB11_2
-; NO_CMOV-NEXT:  # BB#1:
+; NO_CMOV-NEXT:  # %bb.1:
 ; NO_CMOV-NEXT:    movl $-1, %eax
 ; NO_CMOV-NEXT:  .LBB11_2:
 ; NO_CMOV-NEXT:    movl %eax, %edx

Modified: llvm/trunk/test/CodeGen/X86/cmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmov.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmov.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64-i1:8:8
 
 define i32 @test1(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    btl %esi, %edi
 ; CHECK-NEXT:    movl $12, %eax
 ; CHECK-NEXT:    cmovael (%rcx), %eax
@@ -20,7 +20,7 @@ entry:
 
 define i32 @test2(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    btl %esi, %edi
 ; CHECK-NEXT:    movl $12, %eax
 ; CHECK-NEXT:    cmovbl (%rcx), %eax
@@ -43,7 +43,7 @@ declare void @bar(i64) nounwind
 
 define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    testb $1, %dl
 ; CHECK-NEXT:    cmovel %esi, %edi
@@ -77,7 +77,7 @@ define void @test3(i64 %a, i64 %b, i1 %p
 
 define i1 @test4() nounwind {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movsbl {{.*}}(%rip), %edx
 ; CHECK-NEXT:    movl %edx, %eax
 ; CHECK-NEXT:    shrb $7, %al
@@ -88,7 +88,7 @@ define i1 @test4() nounwind {
 ; CHECK-NEXT:    movb {{.*}}(%rip), %al
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    je .LBB3_2
-; CHECK-NEXT:  # BB#1: # %bb.i.i.i
+; CHECK-NEXT:  # %bb.1: # %bb.i.i.i
 ; CHECK-NEXT:    movb {{.*}}(%rip), %cl
 ; CHECK-NEXT:  .LBB3_2: # %func_4.exit.i
 ; CHECK-NEXT:    pushq %rbx
@@ -96,15 +96,15 @@ define i1 @test4() nounwind {
 ; CHECK-NEXT:    setne %bl
 ; CHECK-NEXT:    movl %eax, %ecx
 ; CHECK-NEXT:    je .LBB3_4
-; CHECK-NEXT:  # BB#3: # %func_4.exit.i
+; CHECK-NEXT:  # %bb.3: # %func_4.exit.i
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:  .LBB3_4: # %func_4.exit.i
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    je .LBB3_7
-; CHECK-NEXT:  # BB#5: # %func_4.exit.i
+; CHECK-NEXT:  # %bb.5: # %func_4.exit.i
 ; CHECK-NEXT:    testb %bl, %bl
 ; CHECK-NEXT:    jne .LBB3_7
-; CHECK-NEXT:  # BB#6: # %bb.i.i
+; CHECK-NEXT:  # %bb.6: # %bb.i.i
 ; CHECK-NEXT:    movb {{.*}}(%rip), %cl
 ; CHECK-NEXT:    xorl %ebx, %ebx
 ; CHECK-NEXT:    movl %eax, %ecx
@@ -160,7 +160,7 @@ declare i32 @printf(i8* nocapture, ...)
 ; rdar://6668608
 define i32 @test5(i32* nocapture %P) nounwind readonly {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl $41, (%rdi)
 ; CHECK-NEXT:    setg %al
@@ -175,7 +175,7 @@ entry:
 
 define i32 @test6(i32* nocapture %P) nounwind readonly {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl $42, (%rdi)
 ; CHECK-NEXT:    setl %al
@@ -193,10 +193,10 @@ entry:
 ; because it isn't worth it. Just use a branch instead.
 define i8 @test7(i1 inreg %c, i8 inreg %a, i8 inreg %b) nounwind {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    jne .LBB6_2
-; CHECK-NEXT:  # BB#1:
+; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    movl %edx, %esi
 ; CHECK-NEXT:  .LBB6_2:
 ; CHECK-NEXT:    movl %esi, %eax
@@ -207,7 +207,7 @@ define i8 @test7(i1 inreg %c, i8 inreg %
 
 define i32 @smin(i32 %x) {
 ; CHECK-LABEL: smin:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    notl %edi
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    movl $-1, %eax

Modified: llvm/trunk/test/CodeGen/X86/cmovcmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmovcmov.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmovcmov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmovcmov.ll Mon Dec  4 09:18:51 2017
@@ -233,13 +233,13 @@ attributes #0 = { nounwind }
 ; was lowered to:
 ;
 ; The first two cmovs got expanded to:
-; BB#0:
-;   JL_1 BB#9
-; BB#7:
-;   JG_1 BB#9
-; BB#8:
-; BB#9:
-;   %12 = phi(%7, BB#8, %11, BB#0, %12, BB#7)
+; %bb.0:
+;   JL_1 %bb.9
+; %bb.7:
+;   JG_1 %bb.9
+; %bb.8:
+; %bb.9:
+;   %12 = phi(%7, %bb.8, %11, %bb.0, %12, %bb.7)
 ;   %13 = COPY %12
 ; Which was invalid as %12 is not the same value as %13
 

Modified: llvm/trunk/test/CodeGen/X86/cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmp.ll Mon Dec  4 09:18:51 2017
@@ -5,11 +5,11 @@
 
 define i32 @test1(i32 %X, i32* %y) nounwind {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpl $0, (%rsi) # encoding: [0x83,0x3e,0x00]
 ; CHECK-NEXT:    je .LBB0_2 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB0_2-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#1: # %cond_true
+; CHECK-NEXT:  # %bb.1: # %cond_true
 ; CHECK-NEXT:    movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
 ; CHECK-NEXT:  .LBB0_2: # %ReturnBlock
@@ -29,12 +29,12 @@ ReturnBlock:
 
 define i32 @test2(i32 %X, i32* %y) nounwind {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testl $536870911, (%rsi) # encoding: [0xf7,0x06,0xff,0xff,0xff,0x1f]
 ; CHECK-NEXT:    # imm = 0x1FFFFFFF
 ; CHECK-NEXT:    je .LBB1_2 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB1_2-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#1: # %cond_true
+; CHECK-NEXT:  # %bb.1: # %cond_true
 ; CHECK-NEXT:    movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
 ; CHECK-NEXT:  .LBB1_2: # %ReturnBlock
@@ -55,11 +55,11 @@ ReturnBlock:
 
 define i8 @test2b(i8 %X, i8* %y) nounwind {
 ; CHECK-LABEL: test2b:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testb $31, (%rsi) # encoding: [0xf6,0x06,0x1f]
 ; CHECK-NEXT:    je .LBB2_2 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB2_2-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#1: # %cond_true
+; CHECK-NEXT:  # %bb.1: # %cond_true
 ; CHECK-NEXT:    movb $1, %al # encoding: [0xb0,0x01]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
 ; CHECK-NEXT:  .LBB2_2: # %ReturnBlock
@@ -80,7 +80,7 @@ ReturnBlock:
 
 define i64 @test3(i64 %x) nounwind {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
 ; CHECK-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
@@ -93,7 +93,7 @@ entry:
 
 define i64 @test4(i64 %x) nounwind {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
 ; CHECK-NEXT:    setle %al # encoding: [0x0f,0x9e,0xc0]
@@ -106,17 +106,17 @@ entry:
 
 define i32 @test5(double %A) nounwind {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    ja .LBB5_3 # encoding: [0x77,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#1: # %entry
+; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    jb .LBB5_3 # encoding: [0x72,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %bb12
+; CHECK-NEXT:  # %bb.2: # %bb12
 ; CHECK-NEXT:    movl $32, %eax # encoding: [0xb8,0x20,0x00,0x00,0x00]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
 ; CHECK-NEXT:  .LBB5_3: # %bb8
@@ -142,11 +142,11 @@ declare i32 @foo(...)
 
 define i32 @test6() nounwind align 2 {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpq $0, -{{[0-9]+}}(%rsp) # encoding: [0x48,0x83,0x7c,0x24,0xf8,0x00]
 ; CHECK-NEXT:    je .LBB6_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB6_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %F
+; CHECK-NEXT:  # %bb.2: # %F
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
 ; CHECK-NEXT:  .LBB6_1: # %T
@@ -168,7 +168,7 @@ F:
 
 define i32 @test7(i64 %res) nounwind {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    shrq $32, %rdi # encoding: [0x48,0xc1,0xef,0x20]
 ; CHECK-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
@@ -181,7 +181,7 @@ entry:
 
 define i32 @test8(i64 %res) nounwind {
 ; CHECK-LABEL: test8:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    shrq $32, %rdi # encoding: [0x48,0xc1,0xef,0x20]
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    cmpq $3, %rdi # encoding: [0x48,0x83,0xff,0x03]
@@ -195,7 +195,7 @@ entry:
 
 define i32 @test9(i64 %res) nounwind {
 ; CHECK-LABEL: test9:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    shrq $33, %rdi # encoding: [0x48,0xc1,0xef,0x21]
 ; CHECK-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
@@ -208,7 +208,7 @@ entry:
 
 define i32 @test10(i64 %res) nounwind {
 ; CHECK-LABEL: test10:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    shrq $32, %rdi # encoding: [0x48,0xc1,0xef,0x20]
 ; CHECK-NEXT:    setne %al # encoding: [0x0f,0x95,0xc0]
@@ -221,7 +221,7 @@ entry:
 
 define i32 @test11(i64 %l) nounwind {
 ; CHECK-LABEL: test11:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    shrq $47, %rdi # encoding: [0x48,0xc1,0xef,0x2f]
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    cmpq $1, %rdi # encoding: [0x48,0x83,0xff,0x01]
@@ -236,7 +236,7 @@ entry:
 
 define i32 @test12() ssp uwtable {
 ; CHECK-LABEL: test12:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushq %rax # encoding: [0x50]
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    callq test12b # encoding: [0xe8,A,A,A,A]
@@ -244,7 +244,7 @@ define i32 @test12() ssp uwtable {
 ; CHECK-NEXT:    testb %al, %al # encoding: [0x84,0xc0]
 ; CHECK-NEXT:    je .LBB12_2 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB12_2-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#1: # %T
+; CHECK-NEXT:  # %bb.1: # %T
 ; CHECK-NEXT:    movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
 ; CHECK-NEXT:    popq %rcx # encoding: [0x59]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
@@ -267,7 +267,7 @@ declare zeroext i1 @test12b()
 
 define i32 @test13(i32 %mask, i32 %base, i32 %intra) {
 ; CHECK-LABEL: test13:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testb $8, %dil # encoding: [0x40,0xf6,0xc7,0x08]
 ; CHECK-NEXT:    cmovnel %edx, %esi # encoding: [0x0f,0x45,0xf2]
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
@@ -282,7 +282,7 @@ entry:
 
 define i32 @test14(i32 %mask, i32 %base, i32 %intra) {
 ; CHECK-LABEL: test14:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    shrl $7, %edi # encoding: [0xc1,0xef,0x07]
 ; CHECK-NEXT:    cmovnsl %edx, %esi # encoding: [0x0f,0x49,0xf2]
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
@@ -298,7 +298,7 @@ entry:
 ; PR19964
 define zeroext i1 @test15(i32 %bf.load, i32 %n) {
 ; CHECK-LABEL: test15:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    shrl $16, %edi # encoding: [0xc1,0xef,0x10]
 ; CHECK-NEXT:    sete %cl # encoding: [0x0f,0x94,0xc1]
 ; CHECK-NEXT:    cmpl %esi, %edi # encoding: [0x39,0xf7]
@@ -316,7 +316,7 @@ entry:
 
 define i8 @test16(i16 signext %L) {
 ; CHECK-LABEL: test16:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testw %di, %di # encoding: [0x66,0x85,0xff]
 ; CHECK-NEXT:    setns %al # encoding: [0x0f,0x99,0xc0]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
@@ -330,7 +330,7 @@ entry:
 
 define i8 @test17(i32 %L) {
 ; CHECK-LABEL: test17:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testl %edi, %edi # encoding: [0x85,0xff]
 ; CHECK-NEXT:    setns %al # encoding: [0x0f,0x99,0xc0]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
@@ -344,7 +344,7 @@ entry:
 
 define i8 @test18(i64 %L) {
 ; CHECK-LABEL: test18:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
 ; CHECK-NEXT:    setns %al # encoding: [0x0f,0x99,0xc0]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
@@ -358,7 +358,7 @@ entry:
 
 define zeroext i1 @test19(i32 %L) {
 ; CHECK-LABEL: test19:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testl %edi, %edi # encoding: [0x85,0xff]
 ; CHECK-NEXT:    setns %al # encoding: [0x0f,0x99,0xc0]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
@@ -373,7 +373,7 @@ entry:
 ; This test failed due to incorrect handling of "shift + icmp" sequence
 define void @test20(i32 %bf.load, i8 %x1, i8* %b_addr) {
 ; CHECK-LABEL: test20:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    testl $16777215, %edi # encoding: [0xf7,0xc7,0xff,0xff,0xff,0x00]
 ; CHECK-NEXT:    # imm = 0xFFFFFF
@@ -405,7 +405,7 @@ entry:
 
 define i32 @test21(i64 %val) {
 ; CHECK-LABEL: test21:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    shrq $41, %rdi # encoding: [0x48,0xc1,0xef,0x29]
 ; CHECK-NEXT:    setne %al # encoding: [0x0f,0x95,0xc0]
@@ -421,7 +421,7 @@ entry:
 ; AND-to-SHR transformation is enabled for eq/ne condition codes only.
 define i32 @test22(i64 %val) {
 ; CHECK-LABEL: test22:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
 entry:
@@ -434,7 +434,7 @@ entry:
 
 define i32 @test23(i64 %val) {
 ; CHECK-LABEL: test23:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    testq $-1048576, %rdi # encoding: [0x48,0xf7,0xc7,0x00,0x00,0xf0,0xff]
 ; CHECK-NEXT:    # imm = 0xFFF00000
@@ -450,7 +450,7 @@ entry:
 
 define i32 @test24(i64 %val) {
 ; CHECK-LABEL: test24:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    shlq $16, %rdi # encoding: [0x48,0xc1,0xe7,0x10]
 ; CHECK-NEXT:    setne %al # encoding: [0x0f,0x95,0xc0]

Modified: llvm/trunk/test/CodeGen/X86/coalesce_commute_movsd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/coalesce_commute_movsd.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/coalesce_commute_movsd.ll (original)
+++ llvm/trunk/test/CodeGen/X86/coalesce_commute_movsd.ll Mon Dec  4 09:18:51 2017
@@ -8,23 +8,23 @@
 
 define <2 x double> @insert_f64(double %a0, <2 x double> %a1) {
 ; SSE2-LABEL: insert_f64:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
 ; SSE2-NEXT:    movapd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_f64:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: insert_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; AVX512-NEXT:    retq
  %1 = insertelement <2 x double> %a1, double %a0, i32 0
@@ -33,23 +33,23 @@ define <2 x double> @insert_f64(double %
 
 define <4 x float> @insert_f32(float %a0, <4 x float> %a1) {
 ; SSE2-LABEL: insert_f32:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_f32:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: insert_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; AVX512-NEXT:    retq
  %1 = insertelement <4 x float> %a1, float %a0, i32 0

Modified: llvm/trunk/test/CodeGen/X86/combine-64bit-vec-binop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-64bit-vec-binop.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-64bit-vec-binop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-64bit-vec-binop.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define double @test1_add(double %A, double %B) {
 ; SSE41-LABEL: test1_add:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    paddd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <2 x i32>
@@ -15,7 +15,7 @@ define double @test1_add(double %A, doub
 
 define double @test2_add(double %A, double %B) {
 ; SSE41-LABEL: test2_add:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    paddw %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <4 x i16>
@@ -27,7 +27,7 @@ define double @test2_add(double %A, doub
 
 define double @test3_add(double %A, double %B) {
 ; SSE41-LABEL: test3_add:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    paddb %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <8 x i8>
@@ -39,7 +39,7 @@ define double @test3_add(double %A, doub
 
 define double @test1_sub(double %A, double %B) {
 ; SSE41-LABEL: test1_sub:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    psubd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <2 x i32>
@@ -51,7 +51,7 @@ define double @test1_sub(double %A, doub
 
 define double @test2_sub(double %A, double %B) {
 ; SSE41-LABEL: test2_sub:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    psubw %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <4 x i16>
@@ -63,7 +63,7 @@ define double @test2_sub(double %A, doub
 
 define double @test3_sub(double %A, double %B) {
 ; SSE41-LABEL: test3_sub:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    psubb %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <8 x i8>
@@ -75,7 +75,7 @@ define double @test3_sub(double %A, doub
 
 define double @test1_mul(double %A, double %B) {
 ; SSE41-LABEL: test1_mul:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmulld %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <2 x i32>
@@ -87,7 +87,7 @@ define double @test1_mul(double %A, doub
 
 define double @test2_mul(double %A, double %B) {
 ; SSE41-LABEL: test2_mul:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmullw %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <4 x i16>
@@ -100,7 +100,7 @@ define double @test2_mul(double %A, doub
 ; There is no legal ISD::MUL with type MVT::v8i16.
 define double @test3_mul(double %A, double %B) {
 ; SSE41-LABEL: test3_mul:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; SSE41-NEXT:    pmullw %xmm2, %xmm0
@@ -115,7 +115,7 @@ define double @test3_mul(double %A, doub
 
 define double @test1_and(double %A, double %B) {
 ; SSE41-LABEL: test1_and:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    andps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <2 x i32>
@@ -127,7 +127,7 @@ define double @test1_and(double %A, doub
 
 define double @test2_and(double %A, double %B) {
 ; SSE41-LABEL: test2_and:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    andps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <4 x i16>
@@ -139,7 +139,7 @@ define double @test2_and(double %A, doub
 
 define double @test3_and(double %A, double %B) {
 ; SSE41-LABEL: test3_and:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    andps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <8 x i8>
@@ -151,7 +151,7 @@ define double @test3_and(double %A, doub
 
 define double @test1_or(double %A, double %B) {
 ; SSE41-LABEL: test1_or:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    orps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <2 x i32>
@@ -163,7 +163,7 @@ define double @test1_or(double %A, doubl
 
 define double @test2_or(double %A, double %B) {
 ; SSE41-LABEL: test2_or:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    orps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <4 x i16>
@@ -175,7 +175,7 @@ define double @test2_or(double %A, doubl
 
 define double @test3_or(double %A, double %B) {
 ; SSE41-LABEL: test3_or:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    orps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <8 x i8>
@@ -187,7 +187,7 @@ define double @test3_or(double %A, doubl
 
 define double @test1_xor(double %A, double %B) {
 ; SSE41-LABEL: test1_xor:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    xorps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <2 x i32>
@@ -199,7 +199,7 @@ define double @test1_xor(double %A, doub
 
 define double @test2_xor(double %A, double %B) {
 ; SSE41-LABEL: test2_xor:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    xorps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <4 x i16>
@@ -211,7 +211,7 @@ define double @test2_xor(double %A, doub
 
 define double @test3_xor(double %A, double %B) {
 ; SSE41-LABEL: test3_xor:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    xorps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <8 x i8>
@@ -223,7 +223,7 @@ define double @test3_xor(double %A, doub
 
 define double @test_fadd(double %A, double %B) {
 ; SSE41-LABEL: test_fadd:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    addps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <2 x float>
@@ -235,7 +235,7 @@ define double @test_fadd(double %A, doub
 
 define double @test_fsub(double %A, double %B) {
 ; SSE41-LABEL: test_fsub:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    subps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <2 x float>
@@ -247,7 +247,7 @@ define double @test_fsub(double %A, doub
 
 define double @test_fmul(double %A, double %B) {
 ; SSE41-LABEL: test_fmul:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    mulps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
   %1 = bitcast double %A to <2 x float>

Modified: llvm/trunk/test/CodeGen/X86/combine-abs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-abs.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-abs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-abs.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 ; fold (abs c1) -> c2
 define <4 x i32> @combine_v4i32_abs_constant() {
 ; CHECK-LABEL: combine_v4i32_abs_constant:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [0,1,3,2147483648]
 ; CHECK-NEXT:    retq
   %1 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> <i32 0, i32 -1, i32 3, i32 -2147483648>)
@@ -15,7 +15,7 @@ define <4 x i32> @combine_v4i32_abs_cons
 
 define <16 x i16> @combine_v16i16_abs_constant() {
 ; CHECK-LABEL: combine_v16i16_abs_constant:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [0,1,1,3,3,7,7,255,255,4096,4096,32767,32767,32768,32768,0]
 ; CHECK-NEXT:    retq
   %1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> <i16 0, i16 1, i16 -1, i16 3, i16 -3, i16 7, i16 -7, i16 255, i16 -255, i16 4096, i16 -4096, i16 32767, i16 -32767, i16 -32768, i16 32768, i16 65536>)
@@ -25,7 +25,7 @@ define <16 x i16> @combine_v16i16_abs_co
 ; fold (abs (abs x)) -> (abs x)
 define i32 @combine_i32_abs_abs(i32 %a) {
 ; CHECK-LABEL: combine_i32_abs_abs:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    negl %eax
 ; CHECK-NEXT:    cmovll %edi, %eax
@@ -41,7 +41,7 @@ define i32 @combine_i32_abs_abs(i32 %a)
 
 define <8 x i16> @combine_v8i16_abs_abs(<8 x i16> %a) {
 ; CHECK-LABEL: combine_v8i16_abs_abs:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpabsw %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a1 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a)
@@ -53,7 +53,7 @@ define <8 x i16> @combine_v8i16_abs_abs(
 
 define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) {
 ; CHECK-LABEL: combine_v32i8_abs_abs:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpabsb %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %n1 = sub <32 x i8> zeroinitializer, %a
@@ -65,7 +65,7 @@ define <32 x i8> @combine_v32i8_abs_abs(
 
 define <4 x i64> @combine_v4i64_abs_abs(<4 x i64> %a) {
 ; AVX2-LABEL: combine_v4i64_abs_abs:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vpaddq %ymm2, %ymm0, %ymm0
@@ -76,14 +76,14 @@ define <4 x i64> @combine_v4i64_abs_abs(
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: combine_v4i64_abs_abs:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512F-NEXT:    vpabsq %zmm0, %zmm0
 ; AVX512F-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: combine_v4i64_abs_abs:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpabsq %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
   %n1 = sub <4 x i64> zeroinitializer, %a
@@ -98,17 +98,17 @@ define <4 x i64> @combine_v4i64_abs_abs(
 ; fold (abs x) -> x iff not-negative
 define <16 x i8> @combine_v16i8_abs_constant(<16 x i8> %a) {
 ; AVX2-LABEL: combine_v16i8_abs_constant:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: combine_v16i8_abs_constant:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: combine_v16i8_abs_constant:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512VL-NEXT:    retq
   %1 = insertelement <16 x i8> undef, i8 15, i32 0
@@ -120,7 +120,7 @@ define <16 x i8> @combine_v16i8_abs_cons
 
 define <8 x i32> @combine_v8i32_abs_pos(<8 x i32> %a) {
 ; CHECK-LABEL: combine_v8i32_abs_pos:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrld $1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %1 = lshr <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>

Modified: llvm/trunk/test/CodeGen/X86/combine-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-add.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-add.ll Mon Dec  4 09:18:51 2017
@@ -5,11 +5,11 @@
 ; fold (add x, 0) -> x
 define <4 x i32> @combine_vec_add_to_zero(<4 x i32> %a) {
 ; SSE-LABEL: combine_vec_add_to_zero:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_to_zero:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = add <4 x i32> %a, zeroinitializer
   ret <4 x i32> %1
@@ -18,14 +18,14 @@ define <4 x i32> @combine_vec_add_to_zer
 ; fold ((c1-A)+c2) -> (c1+c2)-A
 define <4 x i32> @combine_vec_add_constant_sub(<4 x i32> %a) {
 ; SSE-LABEL: combine_vec_add_constant_sub:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [0,2,4,6]
 ; SSE-NEXT:    psubd %xmm0, %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_constant_sub:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,2,4,6]
 ; AVX-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
@@ -37,13 +37,13 @@ define <4 x i32> @combine_vec_add_consta
 ; fold ((0-A) + B) -> B-A
 define <4 x i32> @combine_vec_add_neg0(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_vec_add_neg0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd %xmm0, %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_neg0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> zeroinitializer, %a
@@ -54,12 +54,12 @@ define <4 x i32> @combine_vec_add_neg0(<
 ; fold (A + (0-B)) -> A-B
 define <4 x i32> @combine_vec_add_neg1(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_vec_add_neg1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_neg1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> zeroinitializer, %b
@@ -70,12 +70,12 @@ define <4 x i32> @combine_vec_add_neg1(<
 ; fold (A+(B-A)) -> B
 define <4 x i32> @combine_vec_add_sub0(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_vec_add_sub0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_sub0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> %b, %a
@@ -86,12 +86,12 @@ define <4 x i32> @combine_vec_add_sub0(<
 ; fold ((B-A)+A) -> B
 define <4 x i32> @combine_vec_add_sub1(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_vec_add_sub1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_sub1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> %b, %a
@@ -102,13 +102,13 @@ define <4 x i32> @combine_vec_add_sub1(<
 ; fold (A+(B-(A+C))) to (B-C)
 define <4 x i32> @combine_vec_add_sub_add0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_vec_add_sub_add0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd %xmm2, %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_sub_add0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsubd %xmm2, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = add <4 x i32> %a, %c
@@ -120,13 +120,13 @@ define <4 x i32> @combine_vec_add_sub_ad
 ; fold (A+(B-(C+A))) to (B-C)
 define <4 x i32> @combine_vec_add_sub_add1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_vec_add_sub_add1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd %xmm2, %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_sub_add1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsubd %xmm2, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = add <4 x i32> %c, %a
@@ -138,13 +138,13 @@ define <4 x i32> @combine_vec_add_sub_ad
 ; fold (A+((B-A)+C)) to (B+C)
 define <4 x i32> @combine_vec_add_sub_add2(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_vec_add_sub_add2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd %xmm2, %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_sub_add2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd %xmm2, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> %b, %a
@@ -156,13 +156,13 @@ define <4 x i32> @combine_vec_add_sub_ad
 ; fold (A+((B-A)-C)) to (B-C)
 define <4 x i32> @combine_vec_add_sub_add3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_vec_add_sub_add3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd %xmm2, %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_sub_add3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsubd %xmm2, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> %b, %a
@@ -174,14 +174,14 @@ define <4 x i32> @combine_vec_add_sub_ad
 ; fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant
 define <4 x i32> @combine_vec_add_sub_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32> %d) {
 ; SSE-LABEL: combine_vec_add_sub_sub:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    paddd %xmm2, %xmm1
 ; SSE-NEXT:    psubd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_sub_sub:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
@@ -195,14 +195,14 @@ define <4 x i32> @combine_vec_add_sub_su
 ; fold (a+b) -> (a|b) iff a and b share no bits.
 define <4 x i32> @combine_vec_add_uniquebits(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_vec_add_uniquebits:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_uniquebits:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
@@ -218,13 +218,13 @@ define <4 x i32> @combine_vec_add_unique
 ; fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n))
 define <4 x i32> @combine_vec_add_shl_neg0(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_add_shl_neg0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $5, %xmm1
 ; SSE-NEXT:    psubd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_shl_neg0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $5, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -237,13 +237,13 @@ define <4 x i32> @combine_vec_add_shl_ne
 ; fold (add shl(0 - y, n), x) -> sub(x, shl(y, n))
 define <4 x i32> @combine_vec_add_shl_neg1(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_add_shl_neg1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $5, %xmm1
 ; SSE-NEXT:    psubd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_shl_neg1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $5, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -257,13 +257,13 @@ define <4 x i32> @combine_vec_add_shl_ne
 ; and similar xforms where the inner op is either ~0 or 0.
 define <4 x i32> @combine_vec_add_and_compare(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
 ; SSE-LABEL: combine_vec_add_and_compare:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqd %xmm2, %xmm1
 ; SSE-NEXT:    psubd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_and_compare:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -277,14 +277,14 @@ define <4 x i32> @combine_vec_add_and_co
 ; add (sext i1), X -> sub X, (zext i1)
 define <4 x i32> @combine_vec_add_sext(<4 x i1> %a0, <4 x i32> %a1) {
 ; SSE-LABEL: combine_vec_add_sext:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $31, %xmm0
 ; SSE-NEXT:    psrad $31, %xmm0
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_sext:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $31, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
@@ -297,14 +297,14 @@ define <4 x i32> @combine_vec_add_sext(<
 ; add (sext i1), X -> sub X, (zext i1)
 define <4 x i32> @combine_vec_add_sextinreg(<4 x i32> %a0, <4 x i32> %a1) {
 ; SSE-LABEL: combine_vec_add_sextinreg:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $31, %xmm0
 ; SSE-NEXT:    psrad $31, %xmm0
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_sextinreg:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $31, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/combine-and.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-and.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-and.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-and.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @and_self(i32 %x) {
 ; CHECK-LABEL: and_self:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %and = and i32 %x, %x
@@ -12,7 +12,7 @@ define i32 @and_self(i32 %x) {
 
 define <4 x i32> @and_self_vec(<4 x i32> %x) {
 ; CHECK-LABEL: and_self_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %and = and <4 x i32> %x, %x
   ret <4 x i32> %and
@@ -26,7 +26,7 @@ define <4 x i32> @and_self_vec(<4 x i32>
 
 define <4 x i32> @test1(<4 x i32> %A) {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; CHECK-NEXT:    retq
@@ -36,7 +36,7 @@ define <4 x i32> @test1(<4 x i32> %A) {
 
 define <4 x i32> @test2(<4 x i32> %A) {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
 ; CHECK-NEXT:    retq
@@ -46,7 +46,7 @@ define <4 x i32> @test2(<4 x i32> %A) {
 
 define <4 x i32> @test3(<4 x i32> %A) {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
 ; CHECK-NEXT:    retq
@@ -56,7 +56,7 @@ define <4 x i32> @test3(<4 x i32> %A) {
 
 define <4 x i32> @test4(<4 x i32> %A) {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
 ; CHECK-NEXT:    retq
@@ -66,7 +66,7 @@ define <4 x i32> @test4(<4 x i32> %A) {
 
 define <4 x i32> @test5(<4 x i32> %A) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; CHECK-NEXT:    retq
@@ -76,7 +76,7 @@ define <4 x i32> @test5(<4 x i32> %A) {
 
 define <4 x i32> @test6(<4 x i32> %A) {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
 ; CHECK-NEXT:    retq
@@ -86,7 +86,7 @@ define <4 x i32> @test6(<4 x i32> %A) {
 
 define <4 x i32> @test7(<4 x i32> %A) {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
@@ -96,7 +96,7 @@ define <4 x i32> @test7(<4 x i32> %A) {
 
 define <4 x i32> @test8(<4 x i32> %A) {
 ; CHECK-LABEL: test8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
 ; CHECK-NEXT:    retq
@@ -106,7 +106,7 @@ define <4 x i32> @test8(<4 x i32> %A) {
 
 define <4 x i32> @test9(<4 x i32> %A) {
 ; CHECK-LABEL: test9:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; CHECK-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 0>
@@ -115,7 +115,7 @@ define <4 x i32> @test9(<4 x i32> %A) {
 
 define <4 x i32> @test10(<4 x i32> %A) {
 ; CHECK-LABEL: test10:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
 ; CHECK-NEXT:    retq
@@ -125,7 +125,7 @@ define <4 x i32> @test10(<4 x i32> %A) {
 
 define <4 x i32> @test11(<4 x i32> %A) {
 ; CHECK-LABEL: test11:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; CHECK-NEXT:    retq
@@ -135,7 +135,7 @@ define <4 x i32> @test11(<4 x i32> %A) {
 
 define <4 x i32> @test12(<4 x i32> %A) {
 ; CHECK-LABEL: test12:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
 ; CHECK-NEXT:    retq
@@ -145,7 +145,7 @@ define <4 x i32> @test12(<4 x i32> %A) {
 
 define <4 x i32> @test13(<4 x i32> %A) {
 ; CHECK-LABEL: test13:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
 ; CHECK-NEXT:    retq
@@ -155,7 +155,7 @@ define <4 x i32> @test13(<4 x i32> %A) {
 
 define <4 x i32> @test14(<4 x i32> %A) {
 ; CHECK-LABEL: test14:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
@@ -165,7 +165,7 @@ define <4 x i32> @test14(<4 x i32> %A) {
 
 define <4 x i32> @test15(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test15:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
@@ -176,7 +176,7 @@ define <4 x i32> @test15(<4 x i32> %A, <
 
 define <4 x i32> @test16(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; CHECK-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
@@ -187,7 +187,7 @@ define <4 x i32> @test16(<4 x i32> %A, <
 
 define <4 x i32> @test17(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test17:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
 ; CHECK-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
@@ -202,7 +202,7 @@ define <4 x i32> @test17(<4 x i32> %A, <
 
 define <2 x i64> @and_or_v2i64(<2 x i64> %a0) {
 ; CHECK-LABEL: and_or_v2i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [8,8]
 ; CHECK-NEXT:    retq
   %1 = or <2 x i64> %a0, <i64 255, i64 255>
@@ -212,7 +212,7 @@ define <2 x i64> @and_or_v2i64(<2 x i64>
 
 define <4 x i32> @and_or_v4i32(<4 x i32> %a0) {
 ; CHECK-LABEL: and_or_v4i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [3,3,3,3]
 ; CHECK-NEXT:    retq
   %1 = or <4 x i32> %a0, <i32 15, i32 15, i32 15, i32 15>
@@ -226,7 +226,7 @@ define <4 x i32> @and_or_v4i32(<4 x i32>
 
 define <2 x i64> @and_or_zext_v2i32(<2 x i32> %a0) {
 ; CHECK-LABEL: and_or_zext_v2i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %1 = zext <2 x i32> %a0 to <2 x i64>
@@ -237,7 +237,7 @@ define <2 x i64> @and_or_zext_v2i32(<2 x
 
 define <4 x i32> @and_or_zext_v4i16(<4 x i16> %a0) {
 ; CHECK-LABEL: and_or_zext_v4i16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %1 = zext <4 x i16> %a0 to <4 x i32>
@@ -252,7 +252,7 @@ define <4 x i32> @and_or_zext_v4i16(<4 x
 
 define <8 x i16> @ashr_mask1_v8i16(<8 x i16> %a0) {
 ; CHECK-LABEL: ashr_mask1_v8i16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    psrlw $15, %xmm0
 ; CHECK-NEXT:    retq
   %1 = ashr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
@@ -262,7 +262,7 @@ define <8 x i16> @ashr_mask1_v8i16(<8 x
 
 define <4 x i32> @ashr_mask7_v4i32(<4 x i32> %a0) {
 ; CHECK-LABEL: ashr_mask7_v4i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    psrad $31, %xmm0
 ; CHECK-NEXT:    psrld $29, %xmm0
 ; CHECK-NEXT:    retq
@@ -278,7 +278,7 @@ define <4 x i32> @ashr_mask7_v4i32(<4 x
 ; PR34620 - redundant PAND after vector shift of a byte vector (PSRLW)
 define <16 x i8> @PR34620(<16 x i8> %a0, <16 x i8> %a1) {
 ; CHECK-LABEL: PR34620:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    psrlw $1, %xmm0
 ; CHECK-NEXT:    pand {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    pand {{.*}}(%rip), %xmm0

Modified: llvm/trunk/test/CodeGen/X86/combine-avx-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-avx-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-avx-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-avx-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx_blend_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a0, i32 7)
   ret <4 x double> %1
@@ -12,7 +12,7 @@ define <4 x double> @test_x86_avx_blend_
 
 define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx_blend_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a0, i32 7)
   ret <8 x float> %1
@@ -20,7 +20,7 @@ define <8 x float> @test_x86_avx_blend_p
 
 define <4 x double> @test2_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test2_x86_avx_blend_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 0)
   ret <4 x double> %1
@@ -28,7 +28,7 @@ define <4 x double> @test2_x86_avx_blend
 
 define <8 x float> @test2_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test2_x86_avx_blend_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 0)
   ret <8 x float> %1
@@ -36,7 +36,7 @@ define <8 x float> @test2_x86_avx_blend_
 
 define <4 x double> @test3_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test3_x86_avx_blend_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm0
 ; CHECK-NEXT:    retq
   %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 -1)
@@ -45,7 +45,7 @@ define <4 x double> @test3_x86_avx_blend
 
 define <8 x float> @test3_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test3_x86_avx_blend_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm0
 ; CHECK-NEXT:    retq
   %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 -1)

Modified: llvm/trunk/test/CodeGen/X86/combine-avx2-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-avx2-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-avx2-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-avx2-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0) {
 ; CHECK-LABEL: test_x86_avx2_pblendw:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a0, i32 7)
   ret <16 x i16> %res
@@ -14,7 +14,7 @@ define <16 x i16> @test_x86_avx2_pblendw
 
 define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx2_pblendd_128:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a0, i32 7)
   ret <4 x i32> %res
@@ -22,7 +22,7 @@ define <4 x i32> @test_x86_avx2_pblendd_
 
 define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx2_pblendd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a0, i32 7)
   ret <8 x i32> %res
@@ -30,7 +30,7 @@ define <8 x i32> @test_x86_avx2_pblendd_
 
 define <16 x i16> @test2_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
 ; CHECK-LABEL: test2_x86_avx2_pblendw:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 0)
   ret <16 x i16> %res
@@ -38,7 +38,7 @@ define <16 x i16> @test2_x86_avx2_pblend
 
 define <4 x i32> @test2_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test2_x86_avx2_pblendd_128:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 0)
   ret <4 x i32> %res
@@ -46,7 +46,7 @@ define <4 x i32> @test2_x86_avx2_pblendd
 
 define <8 x i32> @test2_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
 ; CHECK-LABEL: test2_x86_avx2_pblendd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 0)
   ret <8 x i32> %res
@@ -54,7 +54,7 @@ define <8 x i32> @test2_x86_avx2_pblendd
 
 define <16 x i16> @test3_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
 ; CHECK-LABEL: test3_x86_avx2_pblendw:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 -1)
@@ -63,7 +63,7 @@ define <16 x i16> @test3_x86_avx2_pblend
 
 define <4 x i32> @test3_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test3_x86_avx2_pblendd_128:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 -1)
@@ -72,7 +72,7 @@ define <4 x i32> @test3_x86_avx2_pblendd
 
 define <8 x i32> @test3_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
 ; CHECK-LABEL: test3_x86_avx2_pblendd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 -1)

Modified: llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 ; copysign(x, c1) -> fabs(x) iff ispos(c1)
 define <4 x float> @combine_vec_fcopysign_pos_constant0(<4 x float> %x) {
 ; SSE-LABEL: combine_vec_fcopysign_pos_constant0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm1 = [2.000000e+00,2.000000e+00,2.000000e+00,2.000000e+00]
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
@@ -17,7 +17,7 @@ define <4 x float> @combine_vec_fcopysig
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_pos_constant0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm1, %xmm2, %xmm1
@@ -31,7 +31,7 @@ define <4 x float> @combine_vec_fcopysig
 
 define <4 x float> @combine_vec_fcopysign_pos_constant1(<4 x float> %x) {
 ; SSE-LABEL: combine_vec_fcopysign_pos_constant1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm1 = [0.000000e+00,2.000000e+00,4.000000e+00,8.000000e+00]
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
@@ -39,7 +39,7 @@ define <4 x float> @combine_vec_fcopysig
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_pos_constant1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
@@ -52,12 +52,12 @@ define <4 x float> @combine_vec_fcopysig
 
 define <4 x float> @combine_vec_fcopysign_fabs_sgn(<4 x float> %x, <4 x float> %y) {
 ; SSE-LABEL: combine_vec_fcopysign_fabs_sgn:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fabs_sgn:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -69,7 +69,7 @@ define <4 x float> @combine_vec_fcopysig
 ; copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
 define <4 x float> @combine_vec_fcopysign_neg_constant0(<4 x float> %x) {
 ; SSE-LABEL: combine_vec_fcopysign_neg_constant0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm1 = [-2.000000e+00,-2.000000e+00,-2.000000e+00,-2.000000e+00]
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
@@ -77,7 +77,7 @@ define <4 x float> @combine_vec_fcopysig
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_neg_constant0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm1, %xmm2, %xmm1
@@ -91,7 +91,7 @@ define <4 x float> @combine_vec_fcopysig
 
 define <4 x float> @combine_vec_fcopysign_neg_constant1(<4 x float> %x) {
 ; SSE-LABEL: combine_vec_fcopysign_neg_constant1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm1 = [-0.000000e+00,-2.000000e+00,-4.000000e+00,-8.000000e+00]
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
@@ -99,7 +99,7 @@ define <4 x float> @combine_vec_fcopysig
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_neg_constant1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
@@ -112,13 +112,13 @@ define <4 x float> @combine_vec_fcopysig
 
 define <4 x float> @combine_vec_fcopysign_fneg_fabs_sgn(<4 x float> %x, <4 x float> %y) {
 ; SSE-LABEL: combine_vec_fcopysign_fneg_fabs_sgn:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    orps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fneg_fabs_sgn:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
@@ -133,14 +133,14 @@ define <4 x float> @combine_vec_fcopysig
 ; copysign(fabs(x), y) -> copysign(x, y)
 define <4 x float> @combine_vec_fcopysign_fabs_mag(<4 x float> %x, <4 x float> %y) {
 ; SSE-LABEL: combine_vec_fcopysign_fabs_mag:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fabs_mag:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
@@ -155,14 +155,14 @@ define <4 x float> @combine_vec_fcopysig
 ; copysign(fneg(x), y) -> copysign(x, y)
 define <4 x float> @combine_vec_fcopysign_fneg_mag(<4 x float> %x, <4 x float> %y) {
 ; SSE-LABEL: combine_vec_fcopysign_fneg_mag:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fneg_mag:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
@@ -177,14 +177,14 @@ define <4 x float> @combine_vec_fcopysig
 ; copysign(copysign(x,z), y) -> copysign(x, y)
 define <4 x float> @combine_vec_fcopysign_fcopysign_mag(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
 ; SSE-LABEL: combine_vec_fcopysign_fcopysign_mag:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fcopysign_mag:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
@@ -199,14 +199,14 @@ define <4 x float> @combine_vec_fcopysig
 ; copysign(x, copysign(y,z)) -> copysign(x, z)
 define <4 x float> @combine_vec_fcopysign_fcopysign_sgn(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
 ; SSE-LABEL: combine_vec_fcopysign_fcopysign_sgn:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm2
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    orps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fcopysign_sgn:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
 ; AVX-NEXT:    vandps %xmm1, %xmm2, %xmm1
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
@@ -221,7 +221,7 @@ define <4 x float> @combine_vec_fcopysig
 ; copysign(x, fp_extend(y)) -> copysign(x, y)
 define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float> %y) {
 ; SSE-LABEL: combine_vec_fcopysign_fpext_sgn:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm2, %xmm3
 ; SSE-NEXT:    cvtss2sd %xmm2, %xmm4
 ; SSE-NEXT:    movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
@@ -257,7 +257,7 @@ define <4 x double> @combine_vec_fcopysi
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fpext_sgn:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastsd {{.*}}(%rip), %ymm2
 ; AVX-NEXT:    vandps %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    vcvtps2pd %xmm1, %ymm1
@@ -273,7 +273,7 @@ define <4 x double> @combine_vec_fcopysi
 ; copysign(x, fp_round(y)) -> copysign(x, y)
 define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x double> %y) {
 ; SSE-LABEL: combine_vec_fcopysign_fptrunc_sgn:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm0, %xmm3
 ; SSE-NEXT:    movaps {{.*#+}} xmm5
 ; SSE-NEXT:    andps %xmm5, %xmm0
@@ -307,7 +307,7 @@ define <4 x float> @combine_vec_fcopysig
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fptrunc_sgn:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandpd %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vcvtpd2ps %ymm1, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/combine-mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-mul.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-mul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-mul.ll Mon Dec  4 09:18:51 2017
@@ -5,11 +5,11 @@
 ; fold (mul undef, x) -> 0
 define <4 x i32> @combine_vec_mul_undef0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_undef0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_undef0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = mul <4 x i32> undef, %x
   ret <4 x i32> %1
@@ -18,11 +18,11 @@ define <4 x i32> @combine_vec_mul_undef0
 ; fold (mul x, undef) -> 0
 define <4 x i32> @combine_vec_mul_undef1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_undef1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_undef1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = mul <4 x i32> %x, undef
   ret <4 x i32> %1
@@ -31,12 +31,12 @@ define <4 x i32> @combine_vec_mul_undef1
 ; fold (mul x, 0) -> 0
 define <4 x i32> @combine_vec_mul_zero(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_zero:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_zero:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = mul <4 x i32> %x, zeroinitializer
@@ -46,11 +46,11 @@ define <4 x i32> @combine_vec_mul_zero(<
 ; fold (mul x, 1) -> x
 define <4 x i32> @combine_vec_mul_one(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_one:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_one:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = mul <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %1
@@ -59,14 +59,14 @@ define <4 x i32> @combine_vec_mul_one(<4
 ; fold (mul x, -1) -> 0-x
 define <4 x i32> @combine_vec_mul_negone(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_negone:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm1, %xmm1
 ; SSE-NEXT:    psubd %xmm0, %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_negone:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
@@ -77,12 +77,12 @@ define <4 x i32> @combine_vec_mul_negone
 ; fold (mul x, (1 << c)) -> x << c
 define <4 x i32> @combine_vec_mul_pow2a(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_pow2a:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_pow2a:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = mul <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -91,12 +91,12 @@ define <4 x i32> @combine_vec_mul_pow2a(
 
 define <4 x i32> @combine_vec_mul_pow2b(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_pow2b:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_pow2b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = mul <4 x i32> %x, <i32 1, i32 2, i32 4, i32 16>
@@ -105,7 +105,7 @@ define <4 x i32> @combine_vec_mul_pow2b(
 
 define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) {
 ; SSE-LABEL: combine_vec_mul_pow2c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
 ; SSE-NEXT:    psllq $1, %xmm2
 ; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
@@ -117,7 +117,7 @@ define <4 x i64> @combine_vec_mul_pow2c(
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_pow2c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %1 = mul <4 x i64> %x, <i64 1, i64 2, i64 4, i64 16>
@@ -127,7 +127,7 @@ define <4 x i64> @combine_vec_mul_pow2c(
 ; fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
 define <4 x i32> @combine_vec_mul_negpow2a(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_negpow2a:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd %xmm0, %xmm0
 ; SSE-NEXT:    pxor %xmm1, %xmm1
 ; SSE-NEXT:    psubd %xmm0, %xmm1
@@ -135,7 +135,7 @@ define <4 x i32> @combine_vec_mul_negpow
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_negpow2a:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
@@ -146,12 +146,12 @@ define <4 x i32> @combine_vec_mul_negpow
 
 define <4 x i32> @combine_vec_mul_negpow2b(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_negpow2b:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_negpow2b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = mul <4 x i32> %x, <i32 -1, i32 -2, i32 -4, i32 -16>
@@ -160,7 +160,7 @@ define <4 x i32> @combine_vec_mul_negpow
 
 define <4 x i64> @combine_vec_mul_negpow2c(<4 x i64> %x) {
 ; SSE-LABEL: combine_vec_mul_negpow2c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
 ; SSE-NEXT:    movdqa %xmm0, %xmm3
 ; SSE-NEXT:    pmuludq %xmm2, %xmm3
@@ -184,7 +184,7 @@ define <4 x i64> @combine_vec_mul_negpow
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_negpow2c:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
 ; AVX-NEXT:    vpmuludq %ymm1, %ymm0, %ymm1
 ; AVX-NEXT:    vpsrlq $32, %ymm0, %ymm2
@@ -202,12 +202,12 @@ define <4 x i64> @combine_vec_mul_negpow
 ; (mul (shl X, c1), c2) -> (mul X, c2 << c1)
 define <4 x i32> @combine_vec_mul_shl_const(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_shl_const:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_shl_const:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
@@ -218,13 +218,13 @@ define <4 x i32> @combine_vec_mul_shl_co
 ; (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one use.
 define <4 x i32> @combine_vec_mul_shl_oneuse0(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_mul_shl_oneuse0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld %xmm1, %xmm0
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_shl_oneuse0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -235,13 +235,13 @@ define <4 x i32> @combine_vec_mul_shl_on
 
 define <4 x i32> @combine_vec_mul_shl_oneuse1(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_mul_shl_oneuse1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld %xmm1, %xmm0
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_shl_oneuse1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -252,14 +252,14 @@ define <4 x i32> @combine_vec_mul_shl_on
 
 define <4 x i32> @combine_vec_mul_shl_multiuse0(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_mul_shl_multiuse0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    pmulld %xmm0, %xmm1
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_shl_multiuse0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm1
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
@@ -272,14 +272,14 @@ define <4 x i32> @combine_vec_mul_shl_mu
 
 define <4 x i32> @combine_vec_mul_shl_multiuse1(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_mul_shl_multiuse1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    pmulld %xmm0, %xmm1
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_shl_multiuse1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm1
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
@@ -294,13 +294,13 @@ define <4 x i32> @combine_vec_mul_shl_mu
 
 define <4 x i32> @combine_vec_mul_add(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_add:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_add:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/combine-multiplies.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-multiplies.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-multiplies.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-multiplies.ll Mon Dec  4 09:18:51 2017
@@ -33,7 +33,7 @@
 ; Function Attrs: nounwind
 define void @testCombineMultiplies([100 x i32]* nocapture %a, i32 %lll) nounwind {
 ; CHECK-LABEL: testCombineMultiplies:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushl %esi
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -74,7 +74,7 @@ entry:
 ; Output looks something like this:
 ;
 ; testCombineMultiplies_splat:                              # @testCombineMultiplies_splat
-; # BB#0:                                 # %entry
+; # %bb.0:                                 # %entry
 ; 	movdqa	.LCPI1_0, %xmm1         # xmm1 = [11,11,11,11]
 ; 	paddd	%xmm0, %xmm1
 ; 	movdqa	.LCPI1_1, %xmm2         # xmm2 = [22,22,22,22]
@@ -104,7 +104,7 @@ entry:
 ; Function Attrs: nounwind
 define void @testCombineMultiplies_splat(<4 x i32> %v1) nounwind {
 ; CHECK-LABEL: testCombineMultiplies_splat:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [11,11,11,11]
 ; CHECK-NEXT:    paddd %xmm0, %xmm1
 ; CHECK-NEXT:    movdqa {{.*#+}} xmm2 = [22,22,22,22]
@@ -138,7 +138,7 @@ entry:
 ; Function Attrs: nounwind
 define void @testCombineMultiplies_non_splat(<4 x i32> %v1) nounwind {
 ; CHECK-LABEL: testCombineMultiplies_non_splat:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [11,22,33,44]
 ; CHECK-NEXT:    paddd %xmm0, %xmm1
 ; CHECK-NEXT:    movdqa {{.*#+}} xmm2 = [22,33,44,55]

Modified: llvm/trunk/test/CodeGen/X86/combine-or.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-or.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-or.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-or.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @or_self(i32 %x) {
 ; CHECK-LABEL: or_self:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %or = or i32 %x, %x
@@ -12,7 +12,7 @@ define i32 @or_self(i32 %x) {
 
 define <4 x i32> @or_self_vec(<4 x i32> %x) {
 ; CHECK-LABEL: or_self_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %or = or <4 x i32> %x, %x
   ret <4 x i32> %or
@@ -23,7 +23,7 @@ define <4 x i32> @or_self_vec(<4 x i32>
 
 define <2 x i64> @test1(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
@@ -35,7 +35,7 @@ define <2 x i64> @test1(<2 x i64> %a, <2
 
 define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
@@ -47,7 +47,7 @@ define <4 x i32> @test2(<4 x i32> %a, <4
 
 define <2 x i64> @test3(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 1>
@@ -59,7 +59,7 @@ define <2 x i64> @test3(<2 x i64> %a, <2
 
 define <4 x i32> @test4(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4>
@@ -71,7 +71,7 @@ define <4 x i32> @test4(<4 x i32> %a, <4
 
 define <4 x i32> @test5(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 1, i32 2, i32 3>
@@ -83,7 +83,7 @@ define <4 x i32> @test5(<4 x i32> %a, <4
 
 define <4 x i32> @test6(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4>
@@ -95,7 +95,7 @@ define <4 x i32> @test6(<4 x i32> %a, <4
 
 define <4 x i32> @test7(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; CHECK-NEXT:    retq
   %and1 = and <4 x i32> %a, <i32 -1, i32 -1, i32 0, i32 0>
@@ -107,7 +107,7 @@ define <4 x i32> @test7(<4 x i32> %a, <4
 
 define <2 x i64> @test8(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: test8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; CHECK-NEXT:    retq
   %and1 = and <2 x i64> %a, <i64 -1, i64 0>
@@ -119,7 +119,7 @@ define <2 x i64> @test8(<2 x i64> %a, <2
 
 define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test9:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
   %and1 = and <4 x i32> %a, <i32 0, i32 0, i32 -1, i32 -1>
@@ -131,7 +131,7 @@ define <4 x i32> @test9(<4 x i32> %a, <4
 
 define <2 x i64> @test10(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: test10:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
   %and1 = and <2 x i64> %a, <i64 0, i64 -1>
@@ -143,7 +143,7 @@ define <2 x i64> @test10(<2 x i64> %a, <
 
 define <4 x i32> @test11(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test11:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; CHECK-NEXT:    retq
   %and1 = and <4 x i32> %a, <i32 -1, i32 0, i32 0, i32 0>
@@ -155,7 +155,7 @@ define <4 x i32> @test11(<4 x i32> %a, <
 
 define <4 x i32> @test12(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test12:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; CHECK-NEXT:    retq
   %and1 = and <4 x i32> %a, <i32 0, i32 -1, i32 -1, i32 -1>
@@ -169,7 +169,7 @@ define <4 x i32> @test12(<4 x i32> %a, <
 
 define <4 x i32> @test13(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test13:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[2,3]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 1, i32 1, i32 4, i32 4>
@@ -181,7 +181,7 @@ define <4 x i32> @test13(<4 x i32> %a, <
 
 define <2 x i64> @test14(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: test14:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
@@ -193,7 +193,7 @@ define <2 x i64> @test14(<2 x i64> %a, <
 
 define <4 x i32> @test15(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test15:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,1],xmm0[2,1]
 ; CHECK-NEXT:    movaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -206,7 +206,7 @@ define <4 x i32> @test15(<4 x i32> %a, <
 
 define <2 x i64> @test16(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: test16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
 ; CHECK-NEXT:    movaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -222,7 +222,7 @@ define <2 x i64> @test16(<2 x i64> %a, <
 
 define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test17:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    psllq $32, %xmm0
 ; CHECK-NEXT:    movq {{.*#+}} xmm1 = xmm1[0],zero
 ; CHECK-NEXT:    por %xmm1, %xmm0
@@ -236,7 +236,7 @@ define <4 x i32> @test17(<4 x i32> %a, <
 
 define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test18:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pxor %xmm2, %xmm2
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
@@ -252,7 +252,7 @@ define <4 x i32> @test18(<4 x i32> %a, <
 
 define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test19:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,0,2,3]
 ; CHECK-NEXT:    pxor %xmm3, %xmm3
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
@@ -269,7 +269,7 @@ define <4 x i32> @test19(<4 x i32> %a, <
 
 define <2 x i64> @test20(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: test20:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    por %xmm1, %xmm0
 ; CHECK-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; CHECK-NEXT:    retq
@@ -282,7 +282,7 @@ define <2 x i64> @test20(<2 x i64> %a, <
 
 define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: test21:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    por %xmm1, %xmm0
 ; CHECK-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
 ; CHECK-NEXT:    retq
@@ -298,7 +298,7 @@ define <2 x i64> @test21(<2 x i64> %a, <
 
 define <2 x double> @test22(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test22:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; CHECK-NEXT:    retq
   %bc1 = bitcast <2 x double> %a0 to <2 x i64>
@@ -313,7 +313,7 @@ define <2 x double> @test22(<2 x double>
 
 define <4 x float> @test23(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test23:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
 ; CHECK-NEXT:    retq
   %bc1 = bitcast <4 x float> %a0 to <4 x i32>
@@ -328,7 +328,7 @@ define <4 x float> @test23(<4 x float> %
 
 define <4 x float> @test24(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test24:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; CHECK-NEXT:    retq
   %bc1 = bitcast <4 x float> %a0 to <2 x i64>
@@ -343,7 +343,7 @@ define <4 x float> @test24(<4 x float> %
 
 define <4 x float> @test25(<4 x float> %a0) {
 ; CHECK-LABEL: test25:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    blendps {{.*#+}} xmm0 = mem[0],xmm0[1,2],mem[3]
 ; CHECK-NEXT:    retq
   %bc1 = bitcast <4 x float> %a0 to <4 x i32>
@@ -361,7 +361,7 @@ define <4 x float> @test25(<4 x float> %
 ; handle legal vector value types.
 define <4 x i8> @test_crash(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-LABEL: test_crash:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <4 x i8> %a, <4 x i8> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
@@ -374,7 +374,7 @@ define <4 x i8> @test_crash(<4 x i8> %a,
 
 define <4 x i32> @test2b(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test2b:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32><i32 0, i32 0, i32 6, i32 7>
@@ -385,7 +385,7 @@ define <4 x i32> @test2b(<4 x i32> %a, <
 
 define <4 x i32> @test2c(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test2c:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32><i32 0, i32 0, i32 6, i32 7>
@@ -397,7 +397,7 @@ define <4 x i32> @test2c(<4 x i32> %a, <
 
 define <4 x i32> @test2d(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test2d:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
@@ -410,7 +410,7 @@ define <4 x i32> @test2d(<4 x i32> %a, <
 
 define <4 x i32> @test2e(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test2e:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <4 x i32> %a, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>, <4 x i32><i32 undef, i32 4, i32 2, i32 3>
@@ -421,7 +421,7 @@ define <4 x i32> @test2e(<4 x i32> %a, <
 
 define <4 x i32> @test2f(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test2f:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; CHECK-NEXT:    retq
   %shuf1 = shufflevector <4 x i32> %a, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>, <4 x i32><i32 4, i32 4, i32 2, i32 3>
@@ -435,7 +435,7 @@ define <4 x i32> @test2f(<4 x i32> %a, <
 
 define <2 x i64> @or_and_v2i64(<2 x i64> %a0) {
 ; CHECK-LABEL: or_and_v2i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    orps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
@@ -448,7 +448,7 @@ define <2 x i64> @or_and_v2i64(<2 x i64>
 
 define <4 x i32> @or_and_v4i32(<4 x i32> %a0) {
 ; CHECK-LABEL: or_and_v4i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [3,3,3,3]
 ; CHECK-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 1, i32 1, i32 1, i32 1>
@@ -460,7 +460,7 @@ define <4 x i32> @or_and_v4i32(<4 x i32>
 
 define <2 x i64> @or_zext_v2i32(<2 x i32> %a0) {
 ; CHECK-LABEL: or_zext_v2i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [4294967295,4294967295]
 ; CHECK-NEXT:    retq
   %1 = zext <2 x i32> %a0 to <2 x i64>
@@ -470,7 +470,7 @@ define <2 x i64> @or_zext_v2i32(<2 x i32
 
 define <4 x i32> @or_zext_v4i16(<4 x i16> %a0) {
 ; CHECK-LABEL: or_zext_v4i16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [65535,65535,65535,65535]
 ; CHECK-NEXT:    retq
   %1 = zext <4 x i16> %a0 to <4 x i32>

Modified: llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 ; TODO - shuffle+sext are superfluous
 define <2 x i64> @combine_shuffle_sext_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
 ; SSE-LABEL: combine_shuffle_sext_pmuldq:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; SSE-NEXT:    pmovsxdq %xmm0, %xmm2
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
@@ -14,7 +14,7 @@ define <2 x i64> @combine_shuffle_sext_p
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_shuffle_sext_pmuldq:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; AVX-NEXT:    vpmovsxdq %xmm0, %xmm0
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -32,7 +32,7 @@ define <2 x i64> @combine_shuffle_sext_p
 ; TODO - shuffle+zext are superfluous
 define <2 x i64> @combine_shuffle_zext_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
 ; SSE-LABEL: combine_shuffle_zext_pmuludq:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; SSE-NEXT:    pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
@@ -41,7 +41,7 @@ define <2 x i64> @combine_shuffle_zext_p
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_shuffle_zext_pmuludq:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -59,7 +59,7 @@ define <2 x i64> @combine_shuffle_zext_p
 ; TODO - blends are superfluous
 define <2 x i64> @combine_shuffle_zero_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
 ; SSE-LABEL: combine_shuffle_zero_pmuludq:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm2, %xmm2
 ; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
@@ -67,7 +67,7 @@ define <2 x i64> @combine_shuffle_zero_p
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_shuffle_zero_pmuludq:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
@@ -84,7 +84,7 @@ define <2 x i64> @combine_shuffle_zero_p
 ; TODO - blends are superfluous
 define <4 x i64> @combine_shuffle_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1) {
 ; SSE-LABEL: combine_shuffle_zero_pmuludq_256:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm4, %xmm4
 ; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
 ; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
@@ -95,7 +95,7 @@ define <4 x i64> @combine_shuffle_zero_p
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_shuffle_zero_pmuludq_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
 ; AVX-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]

Modified: llvm/trunk/test/CodeGen/X86/combine-rotates.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-rotates.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-rotates.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-rotates.ll Mon Dec  4 09:18:51 2017
@@ -5,12 +5,12 @@
 ; fold (rot (rot x, c1), c2) -> rot x, c1+c2
 define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
 ; XOP-LABEL: combine_vec_rot_rot:
-; XOP:       # BB#0:
+; XOP:       # %bb.0:
 ; XOP-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512-LABEL: combine_vec_rot_rot:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vprolvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
@@ -24,12 +24,12 @@ define <4 x i32> @combine_vec_rot_rot(<4
 
 define <4 x i32> @combine_vec_rot_rot_splat(<4 x i32> %x) {
 ; XOP-LABEL: combine_vec_rot_rot_splat:
-; XOP:       # BB#0:
+; XOP:       # %bb.0:
 ; XOP-NEXT:    vprotd $7, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512-LABEL: combine_vec_rot_rot_splat:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vprold $7, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %1 = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
@@ -43,11 +43,11 @@ define <4 x i32> @combine_vec_rot_rot_sp
 
 define <4 x i32> @combine_vec_rot_rot_splat_zero(<4 x i32> %x) {
 ; XOP-LABEL: combine_vec_rot_rot_splat_zero:
-; XOP:       # BB#0:
+; XOP:       # %bb.0:
 ; XOP-NEXT:    retq
 ;
 ; AVX512-LABEL: combine_vec_rot_rot_splat_zero:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    retq
   %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
   %2 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>

Modified: llvm/trunk/test/CodeGen/X86/combine-sdiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sdiv.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sdiv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-sdiv.ll Mon Dec  4 09:18:51 2017
@@ -6,11 +6,11 @@
 ; fold (sdiv undef, x) -> 0
 define <4 x i32> @combine_vec_sdiv_undef0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_sdiv_undef0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sdiv_undef0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = sdiv <4 x i32> undef, %x
   ret <4 x i32> %1
@@ -19,11 +19,11 @@ define <4 x i32> @combine_vec_sdiv_undef
 ; fold (sdiv x, undef) -> undef
 define <4 x i32> @combine_vec_sdiv_undef1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_sdiv_undef1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sdiv_undef1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = sdiv <4 x i32> %x, undef
   ret <4 x i32> %1
@@ -32,11 +32,11 @@ define <4 x i32> @combine_vec_sdiv_undef
 ; fold (sdiv x, 1) -> x
 define <4 x i32> @combine_vec_sdiv_by_one(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_sdiv_by_one:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sdiv_by_one:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = sdiv <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %1
@@ -45,14 +45,14 @@ define <4 x i32> @combine_vec_sdiv_by_on
 ; fold (sdiv x, -1) -> 0 - x
 define <4 x i32> @combine_vec_sdiv_by_negone(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_sdiv_by_negone:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm1, %xmm1
 ; SSE-NEXT:    psubd %xmm0, %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sdiv_by_negone:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
@@ -63,13 +63,13 @@ define <4 x i32> @combine_vec_sdiv_by_ne
 ; fold (sdiv x, y) -> (udiv x, y) iff x and y are positive
 define <4 x i32> @combine_vec_sdiv_by_pos0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_sdiv_by_pos0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    psrld $2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sdiv_by_pos0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpsrld $2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -80,7 +80,7 @@ define <4 x i32> @combine_vec_sdiv_by_po
 
 define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_sdiv_by_pos1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
@@ -94,7 +94,7 @@ define <4 x i32> @combine_vec_sdiv_by_po
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_sdiv_by_pos1:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vpsrld $4, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrld $2, %xmm0, %xmm2
@@ -105,7 +105,7 @@ define <4 x i32> @combine_vec_sdiv_by_po
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_sdiv_by_pos1:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    retq
@@ -117,7 +117,7 @@ define <4 x i32> @combine_vec_sdiv_by_po
 ; fold (sdiv x, (1 << c)) -> x >>u c
 define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_sdiv_by_pow2a:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrad $31, %xmm1
 ; SSE-NEXT:    psrld $30, %xmm1
@@ -127,7 +127,7 @@ define <4 x i32> @combine_vec_sdiv_by_po
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sdiv_by_pow2a:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrad $31, %xmm0, %xmm1
 ; AVX-NEXT:    vpsrld $30, %xmm1, %xmm1
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
@@ -139,7 +139,7 @@ define <4 x i32> @combine_vec_sdiv_by_po
 
 define <4 x i32> @combine_vec_sdiv_by_pow2b(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_sdiv_by_pow2b:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pextrd $1, %xmm0, %eax
 ; SSE-NEXT:    movl %eax, %ecx
 ; SSE-NEXT:    sarl $31, %ecx
@@ -164,7 +164,7 @@ define <4 x i32> @combine_vec_sdiv_by_po
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sdiv_by_pow2b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpextrd $1, %xmm0, %eax
 ; AVX-NEXT:    movl %eax, %ecx
 ; AVX-NEXT:    sarl $31, %ecx

Modified: llvm/trunk/test/CodeGen/X86/combine-sext-in-reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sext-in-reg.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sext-in-reg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-sext-in-reg.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 ; fold sextinreg(zext) -> sext
 define <4 x i64> @sextinreg_zext_v16i8_4i64(<16 x i8> %a0) {
 ; SSE-LABEL: sextinreg_zext_v16i8_4i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmovsxbq %xmm0, %xmm2
 ; SSE-NEXT:    psrld $16, %xmm0
 ; SSE-NEXT:    pmovsxbq %xmm0, %xmm1
@@ -13,7 +13,7 @@ define <4 x i64> @sextinreg_zext_v16i8_4
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sextinreg_zext_v16i8_4i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmovsxbq %xmm0, %ymm0
 ; AVX-NEXT:    retq
   %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -26,7 +26,7 @@ define <4 x i64> @sextinreg_zext_v16i8_4
 ; fold sextinreg(zext(sext)) -> sext
 define <4 x i64> @sextinreg_zext_sext_v16i8_4i64(<16 x i8> %a0) {
 ; SSE-LABEL: sextinreg_zext_sext_v16i8_4i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmovsxbq %xmm0, %xmm2
 ; SSE-NEXT:    psrld $16, %xmm0
 ; SSE-NEXT:    pmovsxbq %xmm0, %xmm1
@@ -34,7 +34,7 @@ define <4 x i64> @sextinreg_zext_sext_v1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sextinreg_zext_sext_v16i8_4i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmovsxbq %xmm0, %ymm0
 ; AVX-NEXT:    retq
   %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>

Modified: llvm/trunk/test/CodeGen/X86/combine-shl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-shl.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-shl.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-shl.ll Mon Dec  4 09:18:51 2017
@@ -5,12 +5,12 @@
 ; fold (shl 0, x) -> 0
 define <4 x i32> @combine_vec_shl_zero(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_zero:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_zero:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> zeroinitializer, %x
@@ -20,11 +20,11 @@ define <4 x i32> @combine_vec_shl_zero(<
 ; fold (shl x, c >= size(x)) -> undef
 define <4 x i32> @combine_vec_shl_outofrange0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_outofrange0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_outofrange0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
   ret <4 x i32> %1
@@ -32,11 +32,11 @@ define <4 x i32> @combine_vec_shl_outofr
 
 define <4 x i32> @combine_vec_shl_outofrange1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_outofrange1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_outofrange1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
   ret <4 x i32> %1
@@ -44,11 +44,11 @@ define <4 x i32> @combine_vec_shl_outofr
 
 define <4 x i32> @combine_vec_shl_outofrange2(<4 x i32> %a0) {
 ; SSE-LABEL: combine_vec_shl_outofrange2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_outofrange2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
   %2 = shl <4 x i32> %1, <i32 33, i32 33, i32 33, i32 33>
@@ -58,11 +58,11 @@ define <4 x i32> @combine_vec_shl_outofr
 ; fold (shl x, 0) -> x
 define <4 x i32> @combine_vec_shl_by_zero(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_by_zero:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_by_zero:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> %x, zeroinitializer
   ret <4 x i32> %1
@@ -71,12 +71,12 @@ define <4 x i32> @combine_vec_shl_by_zer
 ; if (shl x, c) is known to be zero, return 0
 define <4 x i32> @combine_vec_shl_known_zero0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_known_zero0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_known_zero0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = and <4 x i32> %x, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
@@ -86,13 +86,13 @@ define <4 x i32> @combine_vec_shl_known_
 
 define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_known_zero1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_known_zero1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -104,7 +104,7 @@ define <4 x i32> @combine_vec_shl_known_
 ; fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
 define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) {
 ; SSE-LABEL: combine_vec_shl_trunc_and:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    pslld $23, %xmm1
@@ -114,7 +114,7 @@ define <4 x i32> @combine_vec_shl_trunc_
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_trunc_and:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
 ; AVX-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
@@ -130,12 +130,12 @@ define <4 x i32> @combine_vec_shl_trunc_
 ; fold (shl (shl x, c1), c2) -> (shl x, (add c1, c2))
 define <4 x i32> @combine_vec_shl_shl0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_shl0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $6, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_shl0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $6, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -145,12 +145,12 @@ define <4 x i32> @combine_vec_shl_shl0(<
 
 define <4 x i32> @combine_vec_shl_shl1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_shl1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_shl1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
@@ -161,12 +161,12 @@ define <4 x i32> @combine_vec_shl_shl1(<
 ; fold (shl (shl x, c1), c2) -> 0
 define <4 x i32> @combine_vec_shl_shlr_zero0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_shlr_zero0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_shlr_zero0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
@@ -176,12 +176,12 @@ define <4 x i32> @combine_vec_shl_shlr_z
 
 define <4 x i32> @combine_vec_shl_shl_zero1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_shl_zero1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_shl_zero1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
@@ -192,7 +192,7 @@ define <4 x i32> @combine_vec_shl_shl_ze
 ; fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
 define <8 x i32> @combine_vec_shl_ext_shl0(<8 x i16> %x) {
 ; SSE-LABEL: combine_vec_shl_ext_shl0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 ; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -201,7 +201,7 @@ define <8 x i32> @combine_vec_shl_ext_sh
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_ext_shl0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX-NEXT:    vpslld $20, %ymm0, %ymm0
 ; AVX-NEXT:    retq
@@ -213,7 +213,7 @@ define <8 x i32> @combine_vec_shl_ext_sh
 
 define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) {
 ; SSE-LABEL: combine_vec_shl_ext_shl1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmullw {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; SSE-NEXT:    pmovsxwd %xmm1, %xmm1
@@ -223,7 +223,7 @@ define <8 x i32> @combine_vec_shl_ext_sh
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_ext_shl1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
@@ -237,7 +237,7 @@ define <8 x i32> @combine_vec_shl_ext_sh
 ; fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
 define <8 x i32> @combine_vec_shl_zext_lshr0(<8 x i16> %x) {
 ; SSE-LABEL: combine_vec_shl_zext_lshr0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -246,7 +246,7 @@ define <8 x i32> @combine_vec_shl_zext_l
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_zext_lshr0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX-NEXT:    retq
@@ -258,7 +258,7 @@ define <8 x i32> @combine_vec_shl_zext_l
 
 define <8 x i32> @combine_vec_shl_zext_lshr1(<8 x i16> %x) {
 ; SSE-LABEL: combine_vec_shl_zext_lshr1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrlw $8, %xmm1
 ; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,6],xmm1[7]
@@ -279,7 +279,7 @@ define <8 x i32> @combine_vec_shl_zext_l
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_zext_lshr1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,2,3,4,5,6,7,8]
 ; AVX-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
@@ -297,12 +297,12 @@ define <8 x i32> @combine_vec_shl_zext_l
 ; fold (shl (sr[la] exact X,  C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
 define <4 x i32> @combine_vec_shl_ge_ashr_extact0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_ge_ashr_extact0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_ge_ashr_extact0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = ashr exact <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
@@ -312,7 +312,7 @@ define <4 x i32> @combine_vec_shl_ge_ash
 
 define <4 x i32> @combine_vec_shl_ge_ashr_extact1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_ge_ashr_extact1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrad $8, %xmm1
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
@@ -327,7 +327,7 @@ define <4 x i32> @combine_vec_shl_ge_ash
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_ge_ashr_extact1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -339,12 +339,12 @@ define <4 x i32> @combine_vec_shl_ge_ash
 ; fold (shl (sr[la] exact X,  C1), C2) -> (sr[la] X, (C2-C1)) if C1  > C2
 define <4 x i32> @combine_vec_shl_lt_ashr_extact0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_lt_ashr_extact0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrad $2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_lt_ashr_extact0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrad $2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = ashr exact <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
@@ -354,7 +354,7 @@ define <4 x i32> @combine_vec_shl_lt_ash
 
 define <4 x i32> @combine_vec_shl_lt_ashr_extact1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_lt_ashr_extact1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrad $8, %xmm1
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
@@ -369,7 +369,7 @@ define <4 x i32> @combine_vec_shl_lt_ash
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_lt_ashr_extact1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -381,13 +381,13 @@ define <4 x i32> @combine_vec_shl_lt_ash
 ; fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) if C2 > C1
 define <4 x i32> @combine_vec_shl_gt_lshr0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_gt_lshr0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $2, %xmm0
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_gt_lshr0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
 ; AVX-NEXT:    vpslld $2, %xmm0, %xmm0
 ; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -399,7 +399,7 @@ define <4 x i32> @combine_vec_shl_gt_lsh
 
 define <4 x i32> @combine_vec_shl_gt_lshr1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_gt_lshr1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrld $8, %xmm1
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
@@ -414,7 +414,7 @@ define <4 x i32> @combine_vec_shl_gt_lsh
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_gt_lshr1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -426,13 +426,13 @@ define <4 x i32> @combine_vec_shl_gt_lsh
 ; fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 >= C2
 define <4 x i32> @combine_vec_shl_le_lshr0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_le_lshr0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrld $2, %xmm0
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_le_lshr0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1073741816,1073741816,1073741816,1073741816]
 ; AVX-NEXT:    vpsrld $2, %xmm0, %xmm0
 ; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -444,7 +444,7 @@ define <4 x i32> @combine_vec_shl_le_lsh
 
 define <4 x i32> @combine_vec_shl_le_lshr1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_le_lshr1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrld $8, %xmm1
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
@@ -459,7 +459,7 @@ define <4 x i32> @combine_vec_shl_le_lsh
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_le_lshr1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -471,12 +471,12 @@ define <4 x i32> @combine_vec_shl_le_lsh
 ; fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
 define <4 x i32> @combine_vec_shl_ashr0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_ashr0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_ashr0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -487,12 +487,12 @@ define <4 x i32> @combine_vec_shl_ashr0(
 
 define <4 x i32> @combine_vec_shl_ashr1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_ashr1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_ashr1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = ashr <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
@@ -503,13 +503,13 @@ define <4 x i32> @combine_vec_shl_ashr1(
 ; fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
 define <4 x i32> @combine_vec_shl_add0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_add0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $2, %xmm0
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_add0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $2, %xmm0, %xmm0
 ; AVX-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
@@ -521,13 +521,13 @@ define <4 x i32> @combine_vec_shl_add0(<
 
 define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_add1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_add1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -539,13 +539,13 @@ define <4 x i32> @combine_vec_shl_add1(<
 ; fold (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
 define <4 x i32> @combine_vec_shl_or0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_or0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $2, %xmm0
 ; SSE-NEXT:    por {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_or0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $2, %xmm0, %xmm0
 ; AVX-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
 ; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
@@ -557,13 +557,13 @@ define <4 x i32> @combine_vec_shl_or0(<4
 
 define <4 x i32> @combine_vec_shl_or1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_or1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    por {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_or1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpor {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -575,12 +575,12 @@ define <4 x i32> @combine_vec_shl_or1(<4
 ; fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
 define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_mul0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_mul0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
 ; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -591,12 +591,12 @@ define <4 x i32> @combine_vec_shl_mul0(<
 
 define <4 x i32> @combine_vec_shl_mul1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_shl_mul1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_shl_mul1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = mul <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>

Modified: llvm/trunk/test/CodeGen/X86/combine-sra.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sra.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sra.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-sra.ll Mon Dec  4 09:18:51 2017
@@ -5,12 +5,12 @@
 ; fold (sra 0, x) -> 0
 define <4 x i32> @combine_vec_ashr_zero(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_ashr_zero:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_zero:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = ashr <4 x i32> zeroinitializer, %x
@@ -20,12 +20,12 @@ define <4 x i32> @combine_vec_ashr_zero(
 ; fold (sra -1, x) -> -1
 define <4 x i32> @combine_vec_ashr_allones(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_ashr_allones:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_allones:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = ashr <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
@@ -35,11 +35,11 @@ define <4 x i32> @combine_vec_ashr_allon
 ; fold (sra x, c >= size(x)) -> undef
 define <4 x i32> @combine_vec_ashr_outofrange0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_ashr_outofrange0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_outofrange0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = ashr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
   ret <4 x i32> %1
@@ -47,11 +47,11 @@ define <4 x i32> @combine_vec_ashr_outof
 
 define <4 x i32> @combine_vec_ashr_outofrange1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_ashr_outofrange1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_outofrange1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
   ret <4 x i32> %1
@@ -60,11 +60,11 @@ define <4 x i32> @combine_vec_ashr_outof
 ; fold (sra x, 0) -> x
 define <4 x i32> @combine_vec_ashr_by_zero(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_ashr_by_zero:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_by_zero:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = ashr <4 x i32> %x, zeroinitializer
   ret <4 x i32> %1
@@ -73,12 +73,12 @@ define <4 x i32> @combine_vec_ashr_by_ze
 ; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
 define <4 x i32> @combine_vec_ashr_ashr0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_ashr_ashr0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrad $6, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_ashr0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrad $6, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = ashr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -88,7 +88,7 @@ define <4 x i32> @combine_vec_ashr_ashr0
 
 define <4 x i32> @combine_vec_ashr_ashr1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_ashr_ashr1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrad $10, %xmm1
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
@@ -102,7 +102,7 @@ define <4 x i32> @combine_vec_ashr_ashr1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_ashr1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = ashr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
@@ -112,12 +112,12 @@ define <4 x i32> @combine_vec_ashr_ashr1
 
 define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_ashr_ashr2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrad $31, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_ashr2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = ashr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
@@ -127,7 +127,7 @@ define <4 x i32> @combine_vec_ashr_ashr2
 
 define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_ashr_ashr3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrad $27, %xmm1
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
@@ -146,7 +146,7 @@ define <4 x i32> @combine_vec_ashr_ashr3
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_ashr3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -158,7 +158,7 @@ define <4 x i32> @combine_vec_ashr_ashr3
 ; fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
 define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
 ; SSE-LABEL: combine_vec_ashr_trunc_and:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm2
@@ -181,7 +181,7 @@ define <4 x i32> @combine_vec_ashr_trunc
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_trunc_and:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
 ; AVX-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
@@ -198,7 +198,7 @@ define <4 x i32> @combine_vec_ashr_trunc
 ;      if c1 is equal to the number of bits the trunc removes
 define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
 ; SSE-LABEL: combine_vec_ashr_trunc_lshr:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlq $32, %xmm1
 ; SSE-NEXT:    psrlq $32, %xmm0
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -214,7 +214,7 @@ define <4 x i32> @combine_vec_ashr_trunc
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_trunc_lshr:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlq $32, %ymm0, %ymm0
 ; AVX-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -231,7 +231,7 @@ define <4 x i32> @combine_vec_ashr_trunc
 ;      if c1 is equal to the number of bits the trunc removes
 define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
 ; SSE-LABEL: combine_vec_ashr_trunc_ashr:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
 ; SSE-NEXT:    psrad $31, %xmm1
 ; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
@@ -248,7 +248,7 @@ define <4 x i32> @combine_vec_ashr_trunc
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_trunc_ashr:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
 ; AVX-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
@@ -263,7 +263,7 @@ define <4 x i32> @combine_vec_ashr_trunc
 ; If the sign bit is known to be zero, switch this to a SRL.
 define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_ashr_positive:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    movdqa %xmm1, %xmm2
 ; SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -285,7 +285,7 @@ define <4 x i32> @combine_vec_ashr_posit
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_positive:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -296,12 +296,12 @@ define <4 x i32> @combine_vec_ashr_posit
 
 define <4 x i32> @combine_vec_ashr_positive_splat(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_ashr_positive_splat:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_ashr_positive_splat:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = and <4 x i32> %x, <i32 1023, i32 1023, i32 1023, i32 1023>

Modified: llvm/trunk/test/CodeGen/X86/combine-srem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-srem.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-srem.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-srem.ll Mon Dec  4 09:18:51 2017
@@ -6,11 +6,11 @@
 ; fold (srem undef, x) -> 0
 define <4 x i32> @combine_vec_srem_undef0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_srem_undef0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_srem_undef0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = srem <4 x i32> undef, %x
   ret <4 x i32> %1
@@ -19,11 +19,11 @@ define <4 x i32> @combine_vec_srem_undef
 ; fold (srem x, undef) -> undef
 define <4 x i32> @combine_vec_srem_undef1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_srem_undef1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_srem_undef1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = srem <4 x i32> %x, undef
   ret <4 x i32> %1
@@ -32,17 +32,17 @@ define <4 x i32> @combine_vec_srem_undef
 ; fold (srem x, y) -> (urem x, y) iff x and y are positive
 define <4 x i32> @combine_vec_srem_by_pos0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_srem_by_pos0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_srem_by_pos0:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_srem_by_pos0:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
 ; AVX2-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
@@ -53,12 +53,12 @@ define <4 x i32> @combine_vec_srem_by_po
 
 define <4 x i32> @combine_vec_srem_by_pos1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_srem_by_pos1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_srem_by_pos1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>

Modified: llvm/trunk/test/CodeGen/X86/combine-srl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-srl.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-srl.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-srl.ll Mon Dec  4 09:18:51 2017
@@ -5,12 +5,12 @@
 ; fold (srl 0, x) -> 0
 define <4 x i32> @combine_vec_lshr_zero(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_zero:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_zero:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = lshr <4 x i32> zeroinitializer, %x
@@ -20,11 +20,11 @@ define <4 x i32> @combine_vec_lshr_zero(
 ; fold (srl x, c >= size(x)) -> undef
 define <4 x i32> @combine_vec_lshr_outofrange0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_outofrange0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_outofrange0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = lshr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
   ret <4 x i32> %1
@@ -32,11 +32,11 @@ define <4 x i32> @combine_vec_lshr_outof
 
 define <4 x i32> @combine_vec_lshr_outofrange1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_outofrange1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_outofrange1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
   ret <4 x i32> %1
@@ -45,11 +45,11 @@ define <4 x i32> @combine_vec_lshr_outof
 ; fold (srl x, 0) -> x
 define <4 x i32> @combine_vec_lshr_by_zero(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_by_zero:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_by_zero:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = lshr <4 x i32> %x, zeroinitializer
   ret <4 x i32> %1
@@ -58,12 +58,12 @@ define <4 x i32> @combine_vec_lshr_by_ze
 ; if (srl x, c) is known to be zero, return 0
 define <4 x i32> @combine_vec_lshr_known_zero0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_known_zero0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_known_zero0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
@@ -73,7 +73,7 @@ define <4 x i32> @combine_vec_lshr_known
 
 define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_known_zero1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrld $11, %xmm1
@@ -88,7 +88,7 @@ define <4 x i32> @combine_vec_lshr_known
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_known_zero1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
 ; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
@@ -101,12 +101,12 @@ define <4 x i32> @combine_vec_lshr_known
 ; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2))
 define <4 x i32> @combine_vec_lshr_lshr0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_lshr0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrld $6, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_lshr0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrld $6, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -116,7 +116,7 @@ define <4 x i32> @combine_vec_lshr_lshr0
 
 define <4 x i32> @combine_vec_lshr_lshr1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_lshr1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrld $10, %xmm1
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
@@ -130,7 +130,7 @@ define <4 x i32> @combine_vec_lshr_lshr1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_lshr1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = lshr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
@@ -141,12 +141,12 @@ define <4 x i32> @combine_vec_lshr_lshr1
 ; fold (srl (srl x, c1), c2) -> 0
 define <4 x i32> @combine_vec_lshr_lshr_zero0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_lshr_zero0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_lshr_zero0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = lshr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
@@ -156,12 +156,12 @@ define <4 x i32> @combine_vec_lshr_lshr_
 
 define <4 x i32> @combine_vec_lshr_lshr_zero1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_lshr_zero1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_lshr_zero1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = lshr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
@@ -172,14 +172,14 @@ define <4 x i32> @combine_vec_lshr_lshr_
 ; fold (srl (trunc (srl x, c1)), c2) -> (trunc (srl x, (add c1, c2)))
 define <4 x i32> @combine_vec_lshr_trunc_lshr0(<4 x i64> %x) {
 ; SSE-LABEL: combine_vec_lshr_trunc_lshr0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrlq $48, %xmm1
 ; SSE-NEXT:    psrlq $48, %xmm0
 ; SSE-NEXT:    packusdw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_trunc_lshr0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlq $48, %ymm0, %ymm0
 ; AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
@@ -193,7 +193,7 @@ define <4 x i32> @combine_vec_lshr_trunc
 
 define <4 x i32> @combine_vec_lshr_trunc_lshr1(<4 x i64> %x) {
 ; SSE-LABEL: combine_vec_lshr_trunc_lshr1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm1, %xmm2
 ; SSE-NEXT:    psrlq $35, %xmm2
 ; SSE-NEXT:    psrlq $34, %xmm1
@@ -216,7 +216,7 @@ define <4 x i32> @combine_vec_lshr_trunc
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_trunc_lshr1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
 ; AVX-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -232,12 +232,12 @@ define <4 x i32> @combine_vec_lshr_trunc
 ; fold (srl (trunc (srl x, c1)), c2) -> 0
 define <4 x i32> @combine_vec_lshr_trunc_lshr_zero0(<4 x i64> %x) {
 ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = lshr <4 x i64> %x, <i64 48, i64 48, i64 48, i64 48>
@@ -248,7 +248,7 @@ define <4 x i32> @combine_vec_lshr_trunc
 
 define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) {
 ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm1, %xmm2
 ; SSE-NEXT:    psrlq $51, %xmm2
 ; SSE-NEXT:    psrlq $50, %xmm1
@@ -271,7 +271,7 @@ define <4 x i32> @combine_vec_lshr_trunc
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
@@ -287,12 +287,12 @@ define <4 x i32> @combine_vec_lshr_trunc
 ; fold (srl (shl x, c), c) -> (and x, cst2)
 define <4 x i32> @combine_vec_lshr_shl_mask0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_shl_mask0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_shl_mask0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1073741823,1073741823,1073741823,1073741823]
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -303,12 +303,12 @@ define <4 x i32> @combine_vec_lshr_shl_m
 
 define <4 x i32> @combine_vec_lshr_shl_mask1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_shl_mask1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_shl_mask1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 =  shl <4 x i32> %x, <i32 2, i32 3, i32 4, i32 5>
@@ -319,12 +319,12 @@ define <4 x i32> @combine_vec_lshr_shl_m
 ; fold (srl (sra X, Y), 31) -> (srl X, 31)
 define <4 x i32> @combine_vec_lshr_ashr_sign(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_lshr_ashr_sign:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrld $31, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_ashr_sign:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = ashr <4 x i32> %x, %y
@@ -335,14 +335,14 @@ define <4 x i32> @combine_vec_lshr_ashr_
 ; fold (srl (ctlz x), "5") -> x  iff x has one bit set (the low bit).
 define <4 x i32> @combine_vec_lshr_lzcnt_bit0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_lzcnt_bit0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    psrld $4, %xmm0
 ; SSE-NEXT:    pxor {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_lzcnt_bit0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16]
 ; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrld $4, %xmm0, %xmm0
@@ -357,7 +357,7 @@ define <4 x i32> @combine_vec_lshr_lzcnt
 
 define <4 x i32> @combine_vec_lshr_lzcnt_bit1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_lshr_lzcnt_bit1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
@@ -389,7 +389,7 @@ define <4 x i32> @combine_vec_lshr_lzcnt
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_lzcnt_bit1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
 ; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm2
@@ -424,7 +424,7 @@ declare <4 x i32> @llvm.ctlz.v4i32(<4 x
 ; fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
 define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
 ; SSE-LABEL: combine_vec_lshr_trunc_and:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm2
@@ -447,7 +447,7 @@ define <4 x i32> @combine_vec_lshr_trunc
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_lshr_trunc_and:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
 ; AVX-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/combine-sse41-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sse41-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sse41-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-sse41-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <2 x double> @test_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_sse41_blend_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 0)
   ret <2 x double> %1
@@ -12,7 +12,7 @@ define <2 x double> @test_x86_sse41_blen
 
 define <4 x float> @test_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_x86_sse41_blend_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 0)
   ret <4 x float> %1
@@ -20,7 +20,7 @@ define <4 x float> @test_x86_sse41_blend
 
 define <8 x i16> @test_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; CHECK-LABEL: test_x86_sse41_pblend_w:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 0)
   ret <8 x i16> %1
@@ -28,7 +28,7 @@ define <8 x i16> @test_x86_sse41_pblend_
 
 define <2 x double> @test2_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test2_x86_sse41_blend_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 -1)
@@ -37,7 +37,7 @@ define <2 x double> @test2_x86_sse41_ble
 
 define <4 x float> @test2_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test2_x86_sse41_blend_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 -1)
@@ -46,7 +46,7 @@ define <4 x float> @test2_x86_sse41_blen
 
 define <8 x i16> @test2_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; CHECK-LABEL: test2_x86_sse41_pblend_w:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 -1)
@@ -55,7 +55,7 @@ define <8 x i16> @test2_x86_sse41_pblend
 
 define <2 x double> @test3_x86_sse41_blend_pd(<2 x double> %a0) {
 ; CHECK-LABEL: test3_x86_sse41_blend_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a0, i32 7)
   ret <2 x double> %1
@@ -63,7 +63,7 @@ define <2 x double> @test3_x86_sse41_ble
 
 define <4 x float> @test3_x86_sse41_blend_ps(<4 x float> %a0) {
 ; CHECK-LABEL: test3_x86_sse41_blend_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a0, i32 7)
   ret <4 x float> %1
@@ -71,7 +71,7 @@ define <4 x float> @test3_x86_sse41_blen
 
 define <8 x i16> @test3_x86_sse41_pblend_w(<8 x i16> %a0) {
 ; CHECK-LABEL: test3_x86_sse41_pblend_w:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a0, i32 7)
   ret <8 x i16> %1

Modified: llvm/trunk/test/CodeGen/X86/combine-sub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sub.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-sub.ll Mon Dec  4 09:18:51 2017
@@ -5,11 +5,11 @@
 ; fold (sub x, 0) -> x
 define <4 x i32> @combine_vec_sub_zero(<4 x i32> %a) {
 ; SSE-LABEL: combine_vec_sub_zero:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_zero:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> %a, zeroinitializer
   ret <4 x i32> %1
@@ -18,12 +18,12 @@ define <4 x i32> @combine_vec_sub_zero(<
 ; fold (sub x, x) -> 0
 define <4 x i32> @combine_vec_sub_self(<4 x i32> %a) {
 ; SSE-LABEL: combine_vec_sub_self:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_self:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> %a, %a
@@ -33,12 +33,12 @@ define <4 x i32> @combine_vec_sub_self(<
 ; fold (sub x, c) -> (add x, -c)
 define <4 x i32> @combine_vec_sub_constant(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_sub_constant:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_constant:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsubd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
@@ -48,13 +48,13 @@ define <4 x i32> @combine_vec_sub_consta
 ; Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
 define <4 x i32> @combine_vec_sub_negone(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_sub_negone:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; SSE-NEXT:    pxor %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_negone:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -65,12 +65,12 @@ define <4 x i32> @combine_vec_sub_negone
 ; fold A-(A-B) -> B
 define <4 x i32> @combine_vec_sub_sub(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_vec_sub_sub:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_sub:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> %a, %b
@@ -81,12 +81,12 @@ define <4 x i32> @combine_vec_sub_sub(<4
 ; fold (A+B)-A -> B
 define <4 x i32> @combine_vec_sub_add0(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_vec_sub_add0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_add0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = add <4 x i32> %a, %b
@@ -97,11 +97,11 @@ define <4 x i32> @combine_vec_sub_add0(<
 ; fold (A+B)-B -> A
 define <4 x i32> @combine_vec_sub_add1(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: combine_vec_sub_add1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_add1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = add <4 x i32> %a, %b
   %2 = sub <4 x i32> %1, %b
@@ -111,14 +111,14 @@ define <4 x i32> @combine_vec_sub_add1(<
 ; fold C2-(A+C1) -> (C2-C1)-A
 define <4 x i32> @combine_vec_sub_constant_add(<4 x i32> %a) {
 ; SSE-LABEL: combine_vec_sub_constant_add:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [3,1,4294967295,4294967293]
 ; SSE-NEXT:    psubd %xmm0, %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_constant_add:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [3,1,4294967295,4294967293]
 ; AVX-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
@@ -130,12 +130,12 @@ define <4 x i32> @combine_vec_sub_consta
 ; fold ((A+(B+C))-B) -> A+C
 define <4 x i32> @combine_vec_sub_add_add(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_vec_sub_add_add:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_add_add:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = add <4 x i32> %b, %c
@@ -147,12 +147,12 @@ define <4 x i32> @combine_vec_sub_add_ad
 ; fold ((A+(B-C))-B) -> A-C
 define <4 x i32> @combine_vec_sub_add_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_vec_sub_add_sub:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_add_sub:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> %b, %c
@@ -164,12 +164,12 @@ define <4 x i32> @combine_vec_sub_add_su
 ; fold ((A-(B-C))-C) -> A-B
 define <4 x i32> @combine_vec_sub_sub_sub(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; SSE-LABEL: combine_vec_sub_sub_sub:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_sub_sub:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> %b, %c
@@ -181,11 +181,11 @@ define <4 x i32> @combine_vec_sub_sub_su
 ; fold undef-A -> undef
 define <4 x i32> @combine_vec_sub_undef0(<4 x i32> %a) {
 ; SSE-LABEL: combine_vec_sub_undef0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_undef0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> undef, %a
   ret <4 x i32> %1
@@ -194,11 +194,11 @@ define <4 x i32> @combine_vec_sub_undef0
 ; fold A-undef -> undef
 define <4 x i32> @combine_vec_sub_undef1(<4 x i32> %a) {
 ; SSE-LABEL: combine_vec_sub_undef1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_undef1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = sub <4 x i32> %a, undef
   ret <4 x i32> %1
@@ -207,14 +207,14 @@ define <4 x i32> @combine_vec_sub_undef1
 ; sub X, (sext Y i1) -> add X, (and Y 1)
 define <4 x i32> @combine_vec_add_sext(<4 x i32> %x, <4 x i1> %y) {
 ; SSE-LABEL: combine_vec_add_sext:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $31, %xmm1
 ; SSE-NEXT:    psrad $31, %xmm1
 ; SSE-NEXT:    psubd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_add_sext:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $31, %xmm1, %xmm1
 ; AVX-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
@@ -227,14 +227,14 @@ define <4 x i32> @combine_vec_add_sext(<
 ; sub X, (sextinreg Y i1) -> add X, (and Y 1)
 define <4 x i32> @combine_vec_sub_sextinreg(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_sub_sextinreg:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $31, %xmm1
 ; SSE-NEXT:    psrad $31, %xmm1
 ; SSE-NEXT:    psubd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_sub_sextinreg:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpslld $31, %xmm1, %xmm1
 ; AVX-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/combine-testm-and.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-testm-and.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-testm-and.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-testm-and.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @combineTESTM_AND_1(<8 x i64> %a, <8 x i64> %b) {
 ; CHECK-LABEL: combineTESTM_AND_1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vptestmq %zmm0, %zmm1, %k0
 ; CHECK-NEXT:    kmovb %k0, %eax
 ; CHECK-NEXT:    vzeroupper
@@ -16,7 +16,7 @@ define i32 @combineTESTM_AND_1(<8 x i64>
 
 define i32 @combineTESTM_AND_2(<8 x i64> %a, <8 x i64> %b , i8 %mask) {
 ; CHECK-LABEL: combineTESTM_AND_2:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vptestmq %zmm0, %zmm1, %k0 {%k1}
 ; CHECK-NEXT:    kmovb %k0, %eax
@@ -30,7 +30,7 @@ define i32 @combineTESTM_AND_2(<8 x i64>
 
 define i32 @combineTESTM_AND_mask_3(<8 x i64> %a, <8 x i64>* %bptr , i8 %mask) {
 ; CHECK-LABEL: combineTESTM_AND_mask_3:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vptestmq (%rdi), %zmm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovb %k0, %eax
@@ -45,7 +45,7 @@ define i32 @combineTESTM_AND_mask_3(<8 x
 
 define i32 @combineTESTM_AND_mask_4(<8 x i64> %a, <8 x i64>* %bptr , i8 %mask) {
 ; CHECK-LABEL: combineTESTM_AND_mask_4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vptestmq (%rdi), %zmm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovb %k0, %eax

Modified: llvm/trunk/test/CodeGen/X86/combine-udiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-udiv.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-udiv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-udiv.ll Mon Dec  4 09:18:51 2017
@@ -6,11 +6,11 @@
 ; fold (udiv undef, x) -> 0
 define <4 x i32> @combine_vec_udiv_undef0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_udiv_undef0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_udiv_undef0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = udiv <4 x i32> undef, %x
   ret <4 x i32> %1
@@ -19,11 +19,11 @@ define <4 x i32> @combine_vec_udiv_undef
 ; fold (udiv x, undef) -> undef
 define <4 x i32> @combine_vec_udiv_undef1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_udiv_undef1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_udiv_undef1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = udiv <4 x i32> %x, undef
   ret <4 x i32> %1
@@ -32,12 +32,12 @@ define <4 x i32> @combine_vec_udiv_undef
 ; fold (udiv x, (1 << c)) -> x >>u c
 define <4 x i32> @combine_vec_udiv_by_pow2a(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_udiv_by_pow2a:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psrld $2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_udiv_by_pow2a:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpsrld $2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = udiv <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
@@ -46,7 +46,7 @@ define <4 x i32> @combine_vec_udiv_by_po
 
 define <4 x i32> @combine_vec_udiv_by_pow2b(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_udiv_by_pow2b:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrld $3, %xmm1
@@ -59,7 +59,7 @@ define <4 x i32> @combine_vec_udiv_by_po
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_udiv_by_pow2b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsrld $4, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrld $2, %xmm0, %xmm2
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
@@ -69,7 +69,7 @@ define <4 x i32> @combine_vec_udiv_by_po
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_udiv_by_pow2b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    retq
   %1 = udiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
@@ -78,7 +78,7 @@ define <4 x i32> @combine_vec_udiv_by_po
 
 define <4 x i32> @combine_vec_udiv_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_udiv_by_pow2c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm1, %xmm2
 ; SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; SSE-NEXT:    movdqa %xmm0, %xmm3
@@ -99,7 +99,7 @@ define <4 x i32> @combine_vec_udiv_by_po
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_udiv_by_pow2c:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vpsrld %xmm2, %xmm0, %xmm2
 ; AVX1-NEXT:    vpsrlq $32, %xmm1, %xmm3
@@ -115,7 +115,7 @@ define <4 x i32> @combine_vec_udiv_by_po
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_udiv_by_pow2c:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
   %1 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
@@ -126,7 +126,7 @@ define <4 x i32> @combine_vec_udiv_by_po
 ; fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
 define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_udiv_by_shl_pow2a:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm2
 ; SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -148,7 +148,7 @@ define <4 x i32> @combine_vec_udiv_by_sh
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_udiv_by_shl_pow2a:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm1, %xmm1
 ; AVX1-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vpsrld %xmm2, %xmm0, %xmm2
@@ -165,7 +165,7 @@ define <4 x i32> @combine_vec_udiv_by_sh
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_udiv_by_shl_pow2a:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2,2,2,2]
 ; AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
 ; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
@@ -177,7 +177,7 @@ define <4 x i32> @combine_vec_udiv_by_sh
 
 define <4 x i32> @combine_vec_udiv_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_udiv_by_shl_pow2b:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm2
 ; SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -199,7 +199,7 @@ define <4 x i32> @combine_vec_udiv_by_sh
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_udiv_by_shl_pow2b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm1, %xmm1
 ; AVX1-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vpsrld %xmm2, %xmm0, %xmm2
@@ -216,7 +216,7 @@ define <4 x i32> @combine_vec_udiv_by_sh
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_udiv_by_shl_pow2b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddd {{.*}}(%rip), %xmm1, %xmm1
 ; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/combine-urem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-urem.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-urem.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-urem.ll Mon Dec  4 09:18:51 2017
@@ -6,11 +6,11 @@
 ; fold (urem undef, x) -> 0
 define <4 x i32> @combine_vec_urem_undef0(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_urem_undef0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_urem_undef0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = urem <4 x i32> undef, %x
   ret <4 x i32> %1
@@ -19,11 +19,11 @@ define <4 x i32> @combine_vec_urem_undef
 ; fold (urem x, undef) -> undef
 define <4 x i32> @combine_vec_urem_undef1(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_urem_undef1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_urem_undef1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %1 = urem <4 x i32> %x, undef
   ret <4 x i32> %1
@@ -32,17 +32,17 @@ define <4 x i32> @combine_vec_urem_undef
 ; fold (urem x, pow2) -> (and x, (pow2-1))
 define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_urem_by_pow2a:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_urem_by_pow2a:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_urem_by_pow2a:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vbroadcastss {{.*#+}} xmm1 = [3,3,3,3]
 ; AVX2-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
@@ -52,12 +52,12 @@ define <4 x i32> @combine_vec_urem_by_po
 
 define <4 x i32> @combine_vec_urem_by_pow2b(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_urem_by_pow2b:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_urem_by_pow2b:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = urem <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
@@ -66,7 +66,7 @@ define <4 x i32> @combine_vec_urem_by_po
 
 define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_urem_by_pow2c:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $23, %xmm1
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    cvttps2dq %xmm1, %xmm1
@@ -76,7 +76,7 @@ define <4 x i32> @combine_vec_urem_by_po
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_urem_by_pow2c:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
 ; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm1, %xmm1
 ; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
@@ -86,7 +86,7 @@ define <4 x i32> @combine_vec_urem_by_po
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_urem_by_pow2c:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
 ; AVX2-NEXT:    vpsllvd %xmm1, %xmm2, %xmm1
 ; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -100,7 +100,7 @@ define <4 x i32> @combine_vec_urem_by_po
 
 define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_urem_by_pow2d:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm1, %xmm2
 ; SSE-NEXT:    psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
@@ -125,7 +125,7 @@ define <4 x i32> @combine_vec_urem_by_po
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_urem_by_pow2d:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
 ; AVX1-NEXT:    vpsrld %xmm2, %xmm3, %xmm2
@@ -145,7 +145,7 @@ define <4 x i32> @combine_vec_urem_by_po
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_urem_by_pow2d:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; AVX2-NEXT:    vpsrlvd %xmm1, %xmm2, %xmm1
 ; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -160,7 +160,7 @@ define <4 x i32> @combine_vec_urem_by_po
 ; fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
 define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_urem_by_shl_pow2a:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $23, %xmm1
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    cvttps2dq %xmm1, %xmm1
@@ -171,7 +171,7 @@ define <4 x i32> @combine_vec_urem_by_sh
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_urem_by_shl_pow2a:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
 ; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm1, %xmm1
 ; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
@@ -182,7 +182,7 @@ define <4 x i32> @combine_vec_urem_by_sh
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_urem_by_shl_pow2a:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [4,4,4,4]
 ; AVX2-NEXT:    vpsllvd %xmm1, %xmm2, %xmm1
 ; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -196,7 +196,7 @@ define <4 x i32> @combine_vec_urem_by_sh
 
 define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_urem_by_shl_pow2b:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pslld $23, %xmm1
 ; SSE-NEXT:    paddd {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    cvttps2dq %xmm1, %xmm1
@@ -207,7 +207,7 @@ define <4 x i32> @combine_vec_urem_by_sh
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: combine_vec_urem_by_shl_pow2b:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
 ; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm1, %xmm1
 ; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
@@ -218,7 +218,7 @@ define <4 x i32> @combine_vec_urem_by_sh
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_urem_by_shl_pow2b:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,4,8,16]
 ; AVX2-NEXT:    vpsllvd %xmm1, %xmm2, %xmm1
 ; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2

Modified: llvm/trunk/test/CodeGen/X86/commute-3dnow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-3dnow.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-3dnow.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-3dnow.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define void @commute_m_pfadd(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
 ; X32-LABEL: commute_m_pfadd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -15,7 +15,7 @@ define void @commute_m_pfadd(x86_mmx *%a
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_m_pfadd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    pfadd (%rsi), %mm0
 ; X64-NEXT:    pfadd (%rdx), %mm0
@@ -33,7 +33,7 @@ declare x86_mmx @llvm.x86.3dnow.pfadd(x8
 
 define void @commute_m_pfsub(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
 ; X32-LABEL: commute_m_pfsub:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -44,7 +44,7 @@ define void @commute_m_pfsub(x86_mmx *%a
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_m_pfsub:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    pfsub (%rsi), %mm0
 ; X64-NEXT:    pfsubr (%rdx), %mm0
@@ -62,7 +62,7 @@ declare x86_mmx @llvm.x86.3dnow.pfsub(x8
 
 define void @commute_m_pfsubr(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
 ; X32-LABEL: commute_m_pfsubr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -73,7 +73,7 @@ define void @commute_m_pfsubr(x86_mmx *%
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_m_pfsubr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    pfsubr (%rsi), %mm0
 ; X64-NEXT:    pfsub (%rdx), %mm0
@@ -91,7 +91,7 @@ declare x86_mmx @llvm.x86.3dnow.pfsubr(x
 
 define void @commute_m_pfmul(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
 ; X32-LABEL: commute_m_pfmul:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -102,7 +102,7 @@ define void @commute_m_pfmul(x86_mmx *%a
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_m_pfmul:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    pfmul (%rsi), %mm0
 ; X64-NEXT:    pfmul (%rdx), %mm0
@@ -121,7 +121,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmul(x8
 ; PFMAX can't commute without fast-math.
 define void @commute_m_pfmax(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
 ; X32-LABEL: commute_m_pfmax:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -133,7 +133,7 @@ define void @commute_m_pfmax(x86_mmx *%a
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_m_pfmax:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    movq (%rdx), %mm1
 ; X64-NEXT:    pfmax (%rsi), %mm0
@@ -153,7 +153,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmax(x8
 ; PFMIN can't commute without fast-math.
 define void @commute_m_pfmin(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
 ; X32-LABEL: commute_m_pfmin:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -165,7 +165,7 @@ define void @commute_m_pfmin(x86_mmx *%a
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_m_pfmin:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    movq (%rdx), %mm1
 ; X64-NEXT:    pfmin (%rsi), %mm0
@@ -184,7 +184,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmin(x8
 
 define void @commute_m_pfcmpeq(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
 ; X32-LABEL: commute_m_pfcmpeq:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -195,7 +195,7 @@ define void @commute_m_pfcmpeq(x86_mmx *
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_m_pfcmpeq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    pfcmpeq (%rsi), %mm0
 ; X64-NEXT:    pfcmpeq (%rdx), %mm0
@@ -213,7 +213,7 @@ declare x86_mmx @llvm.x86.3dnow.pfcmpeq(
 
 define void @commute_m_pavgusb(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
 ; X32-LABEL: commute_m_pavgusb:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -224,7 +224,7 @@ define void @commute_m_pavgusb(x86_mmx *
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_m_pavgusb:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    pavgusb (%rsi), %mm0
 ; X64-NEXT:    pavgusb (%rdx), %mm0
@@ -242,7 +242,7 @@ declare x86_mmx @llvm.x86.3dnow.pavgusb(
 
 define void @commute_m_pmulhrw(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
 ; X32-LABEL: commute_m_pmulhrw:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -253,7 +253,7 @@ define void @commute_m_pmulhrw(x86_mmx *
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_m_pmulhrw:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    pmulhrw (%rsi), %mm0
 ; X64-NEXT:    pmulhrw (%rdx), %mm0

Modified: llvm/trunk/test/CodeGen/X86/commute-blend-avx2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-blend-avx2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-blend-avx2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-blend-avx2.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <8 x i16> @commute_fold_vpblendw_128(<8 x i16> %a, <8 x i16>* %b) #0 {
 ; CHECK-LABEL: commute_fold_vpblendw_128:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2,3],xmm0[4],mem[5,6,7]
 ; CHECK-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %b
@@ -14,7 +14,7 @@ declare <8 x i16> @llvm.x86.sse41.pblend
 
 define <16 x i16> @commute_fold_vpblendw_256(<16 x i16> %a, <16 x i16>* %b) #0 {
 ; CHECK-LABEL: commute_fold_vpblendw_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],mem[1,2,3],ymm0[4],mem[5,6,7],ymm0[8],mem[9,10,11],ymm0[12],mem[13,14,15]
 ; CHECK-NEXT:    retq
   %1 = load <16 x i16>, <16 x i16>* %b
@@ -25,7 +25,7 @@ declare <16 x i16> @llvm.x86.avx2.pblend
 
 define <4 x i32> @commute_fold_vpblendd_128(<4 x i32> %a, <4 x i32>* %b) #0 {
 ; CHECK-LABEL: commute_fold_vpblendd_128:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],mem[1,2,3]
 ; CHECK-NEXT:    retq
   %1 = load <4 x i32>, <4 x i32>* %b
@@ -36,7 +36,7 @@ declare <4 x i32> @llvm.x86.avx2.pblendd
 
 define <8 x i32> @commute_fold_vpblendd_256(<8 x i32> %a, <8 x i32>* %b) #0 {
 ; CHECK-LABEL: commute_fold_vpblendd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],mem[1,2,3,4,5,6],ymm0[7]
 ; CHECK-NEXT:    retq
   %1 = load <8 x i32>, <8 x i32>* %b
@@ -47,7 +47,7 @@ declare <8 x i32> @llvm.x86.avx2.pblendd
 
 define <4 x float> @commute_fold_vblendps_128(<4 x float> %a, <4 x float>* %b) #0 {
 ; CHECK-LABEL: commute_fold_vblendps_128:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3]
 ; CHECK-NEXT:    retq
   %1 = load <4 x float>, <4 x float>* %b
@@ -58,7 +58,7 @@ declare <4 x float> @llvm.x86.sse41.blen
 
 define <8 x float> @commute_fold_vblendps_256(<8 x float> %a, <8 x float>* %b) #0 {
 ; CHECK-LABEL: commute_fold_vblendps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],mem[3,4,5,6,7]
 ; CHECK-NEXT:    retq
   %1 = load <8 x float>, <8 x float>* %b
@@ -69,7 +69,7 @@ declare <8 x float> @llvm.x86.avx.blend.
 
 define <2 x double> @commute_fold_vblendpd_128(<2 x double> %a, <2 x double>* %b) #0 {
 ; CHECK-LABEL: commute_fold_vblendpd_128:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1]
 ; CHECK-NEXT:    retq
   %1 = load <2 x double>, <2 x double>* %b
@@ -80,7 +80,7 @@ declare <2 x double> @llvm.x86.sse41.ble
 
 define <4 x double> @commute_fold_vblendpd_256(<4 x double> %a, <4 x double>* %b) #0 {
 ; CHECK-LABEL: commute_fold_vblendpd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],mem[3]
 ; CHECK-NEXT:    retq
   %1 = load <4 x double>, <4 x double>* %b

Modified: llvm/trunk/test/CodeGen/X86/commute-blend-sse41.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-blend-sse41.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-blend-sse41.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-blend-sse41.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <8 x i16> @commute_fold_pblendw(<8 x i16> %a, <8 x i16>* %b) #0 {
 ; CHECK-LABEL: commute_fold_pblendw:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2,3],xmm0[4],mem[5,6,7]
 ; CHECK-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %b
@@ -14,7 +14,7 @@ declare <8 x i16> @llvm.x86.sse41.pblend
 
 define <4 x float> @commute_fold_blendps(<4 x float> %a, <4 x float>* %b) #0 {
 ; CHECK-LABEL: commute_fold_blendps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3]
 ; CHECK-NEXT:    retq
   %1 = load <4 x float>, <4 x float>* %b
@@ -25,7 +25,7 @@ declare <4 x float> @llvm.x86.sse41.blen
 
 define <2 x double> @commute_fold_blendpd(<2 x double> %a, <2 x double>* %b) #0 {
 ; CHECK-LABEL: commute_fold_blendpd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    blendpd {{.*#+}} xmm0 = xmm0[0],mem[1]
 ; CHECK-NEXT:    retq
   %1 = load <2 x double>, <2 x double>* %b

Modified: llvm/trunk/test/CodeGen/X86/commute-clmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-clmul.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-clmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-clmul.ll Mon Dec  4 09:18:51 2017
@@ -7,12 +7,12 @@ declare <2 x i64> @llvm.x86.pclmulqdq(<2
 
 define <2 x i64> @commute_lq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
 ; SSE-LABEL: commute_lq_lq:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pclmulqdq $0, (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_lq_lq:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpclmulqdq $0, (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = load <2 x i64>, <2 x i64>* %a0
@@ -22,12 +22,12 @@ define <2 x i64> @commute_lq_lq(<2 x i64
 
 define <2 x i64> @commute_lq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
 ; SSE-LABEL: commute_lq_hq:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pclmulqdq $1, (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_lq_hq:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpclmulqdq $1, (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = load <2 x i64>, <2 x i64>* %a0
@@ -37,12 +37,12 @@ define <2 x i64> @commute_lq_hq(<2 x i64
 
 define <2 x i64> @commute_hq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
 ; SSE-LABEL: commute_hq_lq:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pclmulqdq $16, (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_hq_lq:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpclmulqdq $16, (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = load <2 x i64>, <2 x i64>* %a0
@@ -52,12 +52,12 @@ define <2 x i64> @commute_hq_lq(<2 x i64
 
 define <2 x i64> @commute_hq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
 ; SSE-LABEL: commute_hq_hq:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pclmulqdq $17, (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_hq_hq:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpclmulqdq $17, (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = load <2 x i64>, <2 x i64>* %a0

Modified: llvm/trunk/test/CodeGen/X86/commute-fcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-fcmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-fcmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-fcmp.ll Mon Dec  4 09:18:51 2017
@@ -10,17 +10,17 @@
 
 define <4 x i32> @commute_cmpps_eq(<4 x float>* %a0, <4 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_eq:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpeqps (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_eq:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpeqps (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_eq:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpeqps (%rdi), %xmm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -33,17 +33,17 @@ define <4 x i32> @commute_cmpps_eq(<4 x
 
 define <4 x i32> @commute_cmpps_ne(<4 x float>* %a0, <4 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_ne:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpneqps (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_ne:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpneqps (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_ne:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpneqps (%rdi), %xmm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -56,17 +56,17 @@ define <4 x i32> @commute_cmpps_ne(<4 x
 
 define <4 x i32> @commute_cmpps_ord(<4 x float>* %a0, <4 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_ord:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpordps (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_ord:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpordps (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_ord:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpordps (%rdi), %xmm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -79,17 +79,17 @@ define <4 x i32> @commute_cmpps_ord(<4 x
 
 define <4 x i32> @commute_cmpps_uno(<4 x float>* %a0, <4 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_uno:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpunordps (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_uno:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpunordps (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_uno:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpunordps (%rdi), %xmm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -102,7 +102,7 @@ define <4 x i32> @commute_cmpps_uno(<4 x
 
 define <4 x i32> @commute_cmpps_ueq(<4 x float>* %a0, <4 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_ueq:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps (%rdi), %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm2
 ; SSE-NEXT:    cmpeqps %xmm0, %xmm2
@@ -111,13 +111,13 @@ define <4 x i32> @commute_cmpps_ueq(<4 x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_ueq:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps (%rdi), %xmm1
 ; AVX-NEXT:    vcmpeq_uqps %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_ueq:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %xmm1
 ; AVX512-NEXT:    vcmpeq_uqps %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -131,7 +131,7 @@ define <4 x i32> @commute_cmpps_ueq(<4 x
 
 define <4 x i32> @commute_cmpps_one(<4 x float>* %a0, <4 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_one:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps (%rdi), %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm2
 ; SSE-NEXT:    cmpneqps %xmm0, %xmm2
@@ -140,13 +140,13 @@ define <4 x i32> @commute_cmpps_one(<4 x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_one:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps (%rdi), %xmm1
 ; AVX-NEXT:    vcmpneq_oqps %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_one:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %xmm1
 ; AVX512-NEXT:    vcmpneq_oqps %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -160,20 +160,20 @@ define <4 x i32> @commute_cmpps_one(<4 x
 
 define <4 x i32> @commute_cmpps_lt(<4 x float>* %a0, <4 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_lt:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps (%rdi), %xmm1
 ; SSE-NEXT:    cmpltps %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_lt:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps (%rdi), %xmm1
 ; AVX-NEXT:    vcmpltps %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_lt:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %xmm1
 ; AVX512-NEXT:    vcmpltps %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -187,20 +187,20 @@ define <4 x i32> @commute_cmpps_lt(<4 x
 
 define <4 x i32> @commute_cmpps_le(<4 x float>* %a0, <4 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_le:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps (%rdi), %xmm1
 ; SSE-NEXT:    cmpleps %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_le:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps (%rdi), %xmm1
 ; AVX-NEXT:    vcmpleps %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_le:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %xmm1
 ; AVX512-NEXT:    vcmpleps %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -214,18 +214,18 @@ define <4 x i32> @commute_cmpps_le(<4 x
 
 define <8 x i32> @commute_cmpps_eq_ymm(<8 x float>* %a0, <8 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_eq_ymm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpeqps (%rdi), %xmm0
 ; SSE-NEXT:    cmpeqps 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_eq_ymm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpeqps (%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_eq_ymm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpeqps (%rdi), %ymm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -238,18 +238,18 @@ define <8 x i32> @commute_cmpps_eq_ymm(<
 
 define <8 x i32> @commute_cmpps_ne_ymm(<8 x float>* %a0, <8 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_ne_ymm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpneqps (%rdi), %xmm0
 ; SSE-NEXT:    cmpneqps 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_ne_ymm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpneqps (%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_ne_ymm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpneqps (%rdi), %ymm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -262,18 +262,18 @@ define <8 x i32> @commute_cmpps_ne_ymm(<
 
 define <8 x i32> @commute_cmpps_ord_ymm(<8 x float>* %a0, <8 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_ord_ymm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpordps (%rdi), %xmm0
 ; SSE-NEXT:    cmpordps 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_ord_ymm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpordps (%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_ord_ymm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpordps (%rdi), %ymm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -286,18 +286,18 @@ define <8 x i32> @commute_cmpps_ord_ymm(
 
 define <8 x i32> @commute_cmpps_uno_ymm(<8 x float>* %a0, <8 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_uno_ymm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpunordps (%rdi), %xmm0
 ; SSE-NEXT:    cmpunordps 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_uno_ymm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpunordps (%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_uno_ymm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpunordps (%rdi), %ymm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -310,7 +310,7 @@ define <8 x i32> @commute_cmpps_uno_ymm(
 
 define <8 x i32> @commute_cmpps_ueq_ymm(<8 x float>* %a0, <8 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_ueq_ymm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps (%rdi), %xmm2
 ; SSE-NEXT:    movaps 16(%rdi), %xmm3
 ; SSE-NEXT:    movaps %xmm2, %xmm4
@@ -324,13 +324,13 @@ define <8 x i32> @commute_cmpps_ueq_ymm(
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_ueq_ymm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps (%rdi), %ymm1
 ; AVX-NEXT:    vcmpeq_uqps %ymm0, %ymm1, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_ueq_ymm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %ymm1
 ; AVX512-NEXT:    vcmpeq_uqps %ymm0, %ymm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -344,7 +344,7 @@ define <8 x i32> @commute_cmpps_ueq_ymm(
 
 define <8 x i32> @commute_cmpps_one_ymm(<8 x float>* %a0, <8 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_one_ymm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps (%rdi), %xmm2
 ; SSE-NEXT:    movaps 16(%rdi), %xmm3
 ; SSE-NEXT:    movaps %xmm2, %xmm4
@@ -358,13 +358,13 @@ define <8 x i32> @commute_cmpps_one_ymm(
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_one_ymm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps (%rdi), %ymm1
 ; AVX-NEXT:    vcmpneq_oqps %ymm0, %ymm1, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_one_ymm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %ymm1
 ; AVX512-NEXT:    vcmpneq_oqps %ymm0, %ymm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -378,7 +378,7 @@ define <8 x i32> @commute_cmpps_one_ymm(
 
 define <8 x i32> @commute_cmpps_lt_ymm(<8 x float>* %a0, <8 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_lt_ymm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps (%rdi), %xmm2
 ; SSE-NEXT:    movaps 16(%rdi), %xmm3
 ; SSE-NEXT:    cmpltps %xmm0, %xmm2
@@ -388,13 +388,13 @@ define <8 x i32> @commute_cmpps_lt_ymm(<
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_lt_ymm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps (%rdi), %ymm1
 ; AVX-NEXT:    vcmpltps %ymm0, %ymm1, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_lt_ymm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %ymm1
 ; AVX512-NEXT:    vcmpltps %ymm0, %ymm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -408,7 +408,7 @@ define <8 x i32> @commute_cmpps_lt_ymm(<
 
 define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) {
 ; SSE-LABEL: commute_cmpps_le_ymm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps (%rdi), %xmm2
 ; SSE-NEXT:    movaps 16(%rdi), %xmm3
 ; SSE-NEXT:    cmpleps %xmm0, %xmm2
@@ -418,13 +418,13 @@ define <8 x i32> @commute_cmpps_le_ymm(<
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmpps_le_ymm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps (%rdi), %ymm1
 ; AVX-NEXT:    vcmpleps %ymm0, %ymm1, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmpps_le_ymm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %ymm1
 ; AVX512-NEXT:    vcmpleps %ymm0, %ymm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -443,17 +443,17 @@ define <8 x i32> @commute_cmpps_le_ymm(<
 
 define <2 x i64> @commute_cmppd_eq(<2 x double>* %a0, <2 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_eq:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpeqpd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_eq:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpeqpd (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_eq:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpeqpd (%rdi), %xmm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -466,17 +466,17 @@ define <2 x i64> @commute_cmppd_eq(<2 x
 
 define <2 x i64> @commute_cmppd_ne(<2 x double>* %a0, <2 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_ne:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpneqpd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_ne:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpneqpd (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_ne:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpneqpd (%rdi), %xmm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -489,17 +489,17 @@ define <2 x i64> @commute_cmppd_ne(<2 x
 
 define <2 x i64> @commute_cmppd_ord(<2 x double>* %a0, <2 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_ord:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpordpd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_ord:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpordpd (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_ord:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpordpd (%rdi), %xmm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -512,7 +512,7 @@ define <2 x i64> @commute_cmppd_ord(<2 x
 
 define <2 x i64> @commute_cmppd_ueq(<2 x double>* %a0, <2 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_ueq:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movapd (%rdi), %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm2
 ; SSE-NEXT:    cmpeqpd %xmm0, %xmm2
@@ -521,13 +521,13 @@ define <2 x i64> @commute_cmppd_ueq(<2 x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_ueq:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovapd (%rdi), %xmm1
 ; AVX-NEXT:    vcmpeq_uqpd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_ueq:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovapd (%rdi), %xmm1
 ; AVX512-NEXT:    vcmpeq_uqpd %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -541,7 +541,7 @@ define <2 x i64> @commute_cmppd_ueq(<2 x
 
 define <2 x i64> @commute_cmppd_one(<2 x double>* %a0, <2 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_one:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movapd (%rdi), %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm2
 ; SSE-NEXT:    cmpneqpd %xmm0, %xmm2
@@ -550,13 +550,13 @@ define <2 x i64> @commute_cmppd_one(<2 x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_one:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovapd (%rdi), %xmm1
 ; AVX-NEXT:    vcmpneq_oqpd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_one:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovapd (%rdi), %xmm1
 ; AVX512-NEXT:    vcmpneq_oqpd %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -570,17 +570,17 @@ define <2 x i64> @commute_cmppd_one(<2 x
 
 define <2 x i64> @commute_cmppd_uno(<2 x double>* %a0, <2 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_uno:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpunordpd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_uno:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpunordpd (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_uno:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpunordpd (%rdi), %xmm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -593,20 +593,20 @@ define <2 x i64> @commute_cmppd_uno(<2 x
 
 define <2 x i64> @commute_cmppd_lt(<2 x double>* %a0, <2 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_lt:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movapd (%rdi), %xmm1
 ; SSE-NEXT:    cmpltpd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_lt:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovapd (%rdi), %xmm1
 ; AVX-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_lt:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovapd (%rdi), %xmm1
 ; AVX512-NEXT:    vcmpltpd %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -620,20 +620,20 @@ define <2 x i64> @commute_cmppd_lt(<2 x
 
 define <2 x i64> @commute_cmppd_le(<2 x double>* %a0, <2 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_le:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movapd (%rdi), %xmm1
 ; SSE-NEXT:    cmplepd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_le:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovapd (%rdi), %xmm1
 ; AVX-NEXT:    vcmplepd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_le:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovapd (%rdi), %xmm1
 ; AVX512-NEXT:    vcmplepd %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -647,18 +647,18 @@ define <2 x i64> @commute_cmppd_le(<2 x
 
 define <4 x i64> @commute_cmppd_eq_ymmm(<4 x double>* %a0, <4 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_eq_ymmm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpeqpd (%rdi), %xmm0
 ; SSE-NEXT:    cmpeqpd 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_eq_ymmm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpeqpd (%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_eq_ymmm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpeqpd (%rdi), %ymm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -671,18 +671,18 @@ define <4 x i64> @commute_cmppd_eq_ymmm(
 
 define <4 x i64> @commute_cmppd_ne_ymmm(<4 x double>* %a0, <4 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_ne_ymmm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpneqpd (%rdi), %xmm0
 ; SSE-NEXT:    cmpneqpd 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_ne_ymmm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpneqpd (%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_ne_ymmm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpneqpd (%rdi), %ymm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -695,18 +695,18 @@ define <4 x i64> @commute_cmppd_ne_ymmm(
 
 define <4 x i64> @commute_cmppd_ord_ymmm(<4 x double>* %a0, <4 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_ord_ymmm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpordpd (%rdi), %xmm0
 ; SSE-NEXT:    cmpordpd 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_ord_ymmm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpordpd (%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_ord_ymmm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpordpd (%rdi), %ymm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -719,18 +719,18 @@ define <4 x i64> @commute_cmppd_ord_ymmm
 
 define <4 x i64> @commute_cmppd_uno_ymmm(<4 x double>* %a0, <4 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_uno_ymmm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpunordpd (%rdi), %xmm0
 ; SSE-NEXT:    cmpunordpd 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_uno_ymmm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpunordpd (%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_uno_ymmm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpunordpd (%rdi), %ymm0, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -743,7 +743,7 @@ define <4 x i64> @commute_cmppd_uno_ymmm
 
 define <4 x i64> @commute_cmppd_ueq_ymmm(<4 x double>* %a0, <4 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_ueq_ymmm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movapd (%rdi), %xmm2
 ; SSE-NEXT:    movapd 16(%rdi), %xmm3
 ; SSE-NEXT:    movapd %xmm2, %xmm4
@@ -757,13 +757,13 @@ define <4 x i64> @commute_cmppd_ueq_ymmm
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_ueq_ymmm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovapd (%rdi), %ymm1
 ; AVX-NEXT:    vcmpeq_uqpd %ymm0, %ymm1, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_ueq_ymmm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovapd (%rdi), %ymm1
 ; AVX512-NEXT:    vcmpeq_uqpd %ymm0, %ymm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -777,7 +777,7 @@ define <4 x i64> @commute_cmppd_ueq_ymmm
 
 define <4 x i64> @commute_cmppd_one_ymmm(<4 x double>* %a0, <4 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_one_ymmm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movapd (%rdi), %xmm2
 ; SSE-NEXT:    movapd 16(%rdi), %xmm3
 ; SSE-NEXT:    movapd %xmm2, %xmm4
@@ -791,13 +791,13 @@ define <4 x i64> @commute_cmppd_one_ymmm
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_one_ymmm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovapd (%rdi), %ymm1
 ; AVX-NEXT:    vcmpneq_oqpd %ymm0, %ymm1, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_one_ymmm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovapd (%rdi), %ymm1
 ; AVX512-NEXT:    vcmpneq_oqpd %ymm0, %ymm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -811,7 +811,7 @@ define <4 x i64> @commute_cmppd_one_ymmm
 
 define <4 x i64> @commute_cmppd_lt_ymmm(<4 x double>* %a0, <4 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_lt_ymmm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movapd (%rdi), %xmm2
 ; SSE-NEXT:    movapd 16(%rdi), %xmm3
 ; SSE-NEXT:    cmpltpd %xmm0, %xmm2
@@ -821,13 +821,13 @@ define <4 x i64> @commute_cmppd_lt_ymmm(
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_lt_ymmm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovapd (%rdi), %ymm1
 ; AVX-NEXT:    vcmpltpd %ymm0, %ymm1, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_lt_ymmm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovapd (%rdi), %ymm1
 ; AVX512-NEXT:    vcmpltpd %ymm0, %ymm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -841,7 +841,7 @@ define <4 x i64> @commute_cmppd_lt_ymmm(
 
 define <4 x i64> @commute_cmppd_le_ymmm(<4 x double>* %a0, <4 x double> %a1) {
 ; SSE-LABEL: commute_cmppd_le_ymmm:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movapd (%rdi), %xmm2
 ; SSE-NEXT:    movapd 16(%rdi), %xmm3
 ; SSE-NEXT:    cmplepd %xmm0, %xmm2
@@ -851,13 +851,13 @@ define <4 x i64> @commute_cmppd_le_ymmm(
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: commute_cmppd_le_ymmm:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovapd (%rdi), %ymm1
 ; AVX-NEXT:    vcmplepd %ymm0, %ymm1, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: commute_cmppd_le_ymmm:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovapd (%rdi), %ymm1
 ; AVX512-NEXT:    vcmplepd %ymm0, %ymm1, %k1
 ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0

Modified: llvm/trunk/test/CodeGen/X86/commute-vpclmulqdq-avx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-vpclmulqdq-avx.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-vpclmulqdq-avx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-vpclmulqdq-avx.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@ declare <4 x i64> @llvm.x86.pclmulqdq.25
 
 define <4 x i64> @commute_v1(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: commute_v1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $0, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    vxorps %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -18,7 +18,7 @@ define <4 x i64> @commute_v1(<4 x i64> %
 
 define <4 x i64> @commute_v2(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: commute_v2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $16, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    vxorps %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -30,7 +30,7 @@ define <4 x i64> @commute_v2(<4 x i64> %
 
 define <4 x i64> @commute_v3(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: commute_v3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $17, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    vxorps %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/commute-vpclmulqdq-avx512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-vpclmulqdq-avx512.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-vpclmulqdq-avx512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-vpclmulqdq-avx512.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@ declare <8 x i64> @llvm.x86.pclmulqdq.51
 
 define <2 x i64> @commute_xmm_v1(<2 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: commute_xmm_v1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -20,7 +20,7 @@ define <2 x i64> @commute_xmm_v1(<2 x i6
 
 define <2 x i64> @commute_xmm_v2(<2 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: commute_xmm_v2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $16, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -32,7 +32,7 @@ define <2 x i64> @commute_xmm_v2(<2 x i6
 
 define <2 x i64> @commute_xmm_v3(<2 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: commute_xmm_v3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -44,7 +44,7 @@ define <2 x i64> @commute_xmm_v3(<2 x i6
 
 define <4 x i64> @commute_ymm_v1(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: commute_ymm_v1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $0, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    vpxor %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -56,7 +56,7 @@ define <4 x i64> @commute_ymm_v1(<4 x i6
 
 define <4 x i64> @commute_ymm_v2(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: commute_ymm_v2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $16, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    vpxor %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -68,7 +68,7 @@ define <4 x i64> @commute_ymm_v2(<4 x i6
 
 define <4 x i64> @commute_ymm_v3(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: commute_ymm_v3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $17, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    vpxor %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -80,7 +80,7 @@ define <4 x i64> @commute_ymm_v3(<4 x i6
 
 define <8 x i64> @commute_zmm_v1(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: commute_zmm_v1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $0, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    vpxorq %zmm0, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
@@ -92,7 +92,7 @@ define <8 x i64> @commute_zmm_v1(<8 x i6
 
 define <8 x i64> @commute_zmm_v2(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: commute_zmm_v2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $16, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    vpxorq %zmm0, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
@@ -104,7 +104,7 @@ define <8 x i64> @commute_zmm_v2(<8 x i6
 
 define <8 x i64> @commute_zmm_v3(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: commute_zmm_v3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $17, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    vpxorq %zmm0, %zmm0, %zmm0
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/commute-xop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/commute-xop.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/commute-xop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/commute-xop.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define <16 x i8> @commute_fold_vpcomb(<16 x i8>* %a0, <16 x i8> %a1) {
 ; X32-LABEL: commute_fold_vpcomb:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpcomgtb (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpcomb:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpcomgtb (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a0
@@ -21,13 +21,13 @@ declare <16 x i8> @llvm.x86.xop.vpcomb(<
 
 define <4 x i32> @commute_fold_vpcomd(<4 x i32>* %a0, <4 x i32> %a1) {
 ; X32-LABEL: commute_fold_vpcomd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpcomged (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpcomd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpcomged (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <4 x i32>, <4 x i32>* %a0
@@ -38,13 +38,13 @@ declare <4 x i32> @llvm.x86.xop.vpcomd(<
 
 define <2 x i64> @commute_fold_vpcomq(<2 x i64>* %a0, <2 x i64> %a1) {
 ; X32-LABEL: commute_fold_vpcomq:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpcomltq (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpcomq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpcomltq (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <2 x i64>, <2 x i64>* %a0
@@ -55,13 +55,13 @@ declare <2 x i64> @llvm.x86.xop.vpcomq(<
 
 define <16 x i8> @commute_fold_vpcomub(<16 x i8>* %a0, <16 x i8> %a1) {
 ; X32-LABEL: commute_fold_vpcomub:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpcomleub (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpcomub:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpcomleub (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a0
@@ -72,13 +72,13 @@ declare <16 x i8> @llvm.x86.xop.vpcomub(
 
 define <4 x i32> @commute_fold_vpcomud(<4 x i32>* %a0, <4 x i32> %a1) {
 ; X32-LABEL: commute_fold_vpcomud:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpcomequd (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpcomud:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpcomequd (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <4 x i32>, <4 x i32>* %a0
@@ -89,13 +89,13 @@ declare <4 x i32> @llvm.x86.xop.vpcomud(
 
 define <2 x i64> @commute_fold_vpcomuq(<2 x i64>* %a0, <2 x i64> %a1) {
 ; X32-LABEL: commute_fold_vpcomuq:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpcomnequq (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpcomuq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpcomnequq (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <2 x i64>, <2 x i64>* %a0
@@ -106,13 +106,13 @@ declare <2 x i64> @llvm.x86.xop.vpcomuq(
 
 define <8 x i16> @commute_fold_vpcomuw(<8 x i16>* %a0, <8 x i16> %a1) {
 ; X32-LABEL: commute_fold_vpcomuw:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpcomfalseuw (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpcomuw:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpcomfalseuw (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a0
@@ -123,13 +123,13 @@ declare <8 x i16> @llvm.x86.xop.vpcomuw(
 
 define <8 x i16> @commute_fold_vpcomw(<8 x i16>* %a0, <8 x i16> %a1) {
 ; X32-LABEL: commute_fold_vpcomw:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpcomtruew (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpcomw:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpcomtruew (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a0
@@ -140,13 +140,13 @@ declare <8 x i16> @llvm.x86.xop.vpcomw(<
 
 define <4 x i32> @commute_fold_vpmacsdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
 ; X32-LABEL: commute_fold_vpmacsdd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmacsdd %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmacsdd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmacsdd %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <4 x i32>, <4 x i32>* %a0
@@ -157,13 +157,13 @@ declare <4 x i32> @llvm.x86.xop.vpmacsdd
 
 define <2 x i64> @commute_fold_vpmacsdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
 ; X32-LABEL: commute_fold_vpmacsdqh:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmacsdqh %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmacsdqh:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmacsdqh %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <4 x i32>, <4 x i32>* %a0
@@ -174,13 +174,13 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdq
 
 define <2 x i64> @commute_fold_vpmacsdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
 ; X32-LABEL: commute_fold_vpmacsdql:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmacsdql %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmacsdql:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmacsdql %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <4 x i32>, <4 x i32>* %a0
@@ -191,13 +191,13 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdq
 
 define <4 x i32> @commute_fold_vpmacssdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
 ; X32-LABEL: commute_fold_vpmacssdd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmacssdd %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmacssdd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmacssdd %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <4 x i32>, <4 x i32>* %a0
@@ -208,13 +208,13 @@ declare <4 x i32> @llvm.x86.xop.vpmacssd
 
 define <2 x i64> @commute_fold_vpmacssdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
 ; X32-LABEL: commute_fold_vpmacssdqh:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmacssdqh %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmacssdqh:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmacssdqh %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <4 x i32>, <4 x i32>* %a0
@@ -225,13 +225,13 @@ declare <2 x i64> @llvm.x86.xop.vpmacssd
 
 define <2 x i64> @commute_fold_vpmacssdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
 ; X32-LABEL: commute_fold_vpmacssdql:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmacssdql %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmacssdql:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmacssdql %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <4 x i32>, <4 x i32>* %a0
@@ -242,13 +242,13 @@ declare <2 x i64> @llvm.x86.xop.vpmacssd
 
 define <4 x i32> @commute_fold_vpmacsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
 ; X32-LABEL: commute_fold_vpmacsswd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmacsswd %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmacsswd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmacsswd %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a0
@@ -259,13 +259,13 @@ declare <4 x i32> @llvm.x86.xop.vpmacssw
 
 define <8 x i16> @commute_fold_vpmacssww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
 ; X32-LABEL: commute_fold_vpmacssww:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmacssww %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmacssww:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmacssww %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a0
@@ -276,13 +276,13 @@ declare <8 x i16> @llvm.x86.xop.vpmacssw
 
 define <4 x i32> @commute_fold_vpmacswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
 ; X32-LABEL: commute_fold_vpmacswd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmacswd %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmacswd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmacswd %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a0
@@ -293,13 +293,13 @@ declare <4 x i32> @llvm.x86.xop.vpmacswd
 
 define <8 x i16> @commute_fold_vpmacsww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
 ; X32-LABEL: commute_fold_vpmacsww:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmacsww %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmacsww:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmacsww %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a0
@@ -310,13 +310,13 @@ declare <8 x i16> @llvm.x86.xop.vpmacsww
 
 define <4 x i32> @commute_fold_vpmadcsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
 ; X32-LABEL: commute_fold_vpmadcsswd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmadcsswd %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmadcsswd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmadcsswd %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a0
@@ -327,13 +327,13 @@ declare <4 x i32> @llvm.x86.xop.vpmadcss
 
 define <4 x i32> @commute_fold_vpmadcswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
 ; X32-LABEL: commute_fold_vpmadcswd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmadcswd %xmm1, (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: commute_fold_vpmadcswd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmadcswd %xmm1, (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a0

Modified: llvm/trunk/test/CodeGen/X86/complex-fastmath.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/complex-fastmath.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/complex-fastmath.ll (original)
+++ llvm/trunk/test/CodeGen/X86/complex-fastmath.ll Mon Dec  4 09:18:51 2017
@@ -11,7 +11,7 @@
 
 define <2 x float> @complex_square_f32(<2 x float>) #0 {
 ; SSE-LABEL: complex_square_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    addss %xmm2, %xmm2
@@ -23,7 +23,7 @@ define <2 x float> @complex_square_f32(<
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: complex_square_f32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX1-NEXT:    vaddss %xmm0, %xmm0, %xmm2
 ; AVX1-NEXT:    vmulss %xmm2, %xmm1, %xmm2
@@ -34,7 +34,7 @@ define <2 x float> @complex_square_f32(<
 ; AVX1-NEXT:    retq
 ;
 ; FMA-LABEL: complex_square_f32:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; FMA-NEXT:    vaddss %xmm0, %xmm0, %xmm2
 ; FMA-NEXT:    vmulss %xmm2, %xmm1, %xmm2
@@ -56,7 +56,7 @@ define <2 x float> @complex_square_f32(<
 
 define <2 x double> @complex_square_f64(<2 x double>) #0 {
 ; SSE-LABEL: complex_square_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm0, %xmm1
 ; SSE-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1]
 ; SSE-NEXT:    movaps %xmm0, %xmm2
@@ -69,7 +69,7 @@ define <2 x double> @complex_square_f64(
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: complex_square_f64:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX1-NEXT:    vaddsd %xmm0, %xmm0, %xmm2
 ; AVX1-NEXT:    vmulsd %xmm2, %xmm1, %xmm2
@@ -80,7 +80,7 @@ define <2 x double> @complex_square_f64(
 ; AVX1-NEXT:    retq
 ;
 ; FMA-LABEL: complex_square_f64:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; FMA-NEXT:    vaddsd %xmm0, %xmm0, %xmm2
 ; FMA-NEXT:    vmulsd %xmm2, %xmm1, %xmm2
@@ -106,7 +106,7 @@ define <2 x double> @complex_square_f64(
 
 define <2 x float> @complex_mul_f32(<2 x float>, <2 x float>) #0 {
 ; SSE-LABEL: complex_mul_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; SSE-NEXT:    movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
 ; SSE-NEXT:    movaps %xmm3, %xmm4
@@ -120,7 +120,7 @@ define <2 x float> @complex_mul_f32(<2 x
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: complex_mul_f32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; AVX1-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
 ; AVX1-NEXT:    vmulss %xmm0, %xmm3, %xmm4
@@ -133,7 +133,7 @@ define <2 x float> @complex_mul_f32(<2 x
 ; AVX1-NEXT:    retq
 ;
 ; FMA-LABEL: complex_mul_f32:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; FMA-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
 ; FMA-NEXT:    vmulss %xmm2, %xmm1, %xmm4
@@ -159,7 +159,7 @@ define <2 x float> @complex_mul_f32(<2 x
 
 define <2 x double> @complex_mul_f64(<2 x double>, <2 x double>) #0 {
 ; SSE-LABEL: complex_mul_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    movhlps {{.*#+}} xmm2 = xmm2[1,1]
 ; SSE-NEXT:    movaps %xmm1, %xmm3
@@ -175,7 +175,7 @@ define <2 x double> @complex_mul_f64(<2
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: complex_mul_f64:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
 ; AVX1-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
 ; AVX1-NEXT:    vmulsd %xmm0, %xmm3, %xmm4
@@ -188,7 +188,7 @@ define <2 x double> @complex_mul_f64(<2
 ; AVX1-NEXT:    retq
 ;
 ; FMA-LABEL: complex_mul_f64:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
 ; FMA-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
 ; FMA-NEXT:    vmulsd %xmm2, %xmm1, %xmm4

Modified: llvm/trunk/test/CodeGen/X86/compress_expand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/compress_expand.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/compress_expand.ll (original)
+++ llvm/trunk/test/CodeGen/X86/compress_expand.ll Mon Dec  4 09:18:51 2017
@@ -9,14 +9,14 @@ target triple = "x86_64-unknown-linux-gn
 
 define <16 x float> @test1(float* %base) {
 ; SKX-LABEL: test1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movw $-2049, %ax # imm = 0xF7FF
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vexpandps (%rdi), %zmm0 {%k1} {z}
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test1:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    movw $-2049, %ax # imm = 0xF7FF
 ; KNL-NEXT:    kmovw %eax, %k1
 ; KNL-NEXT:    vexpandps (%rdi), %zmm0 {%k1} {z}
@@ -27,14 +27,14 @@ define <16 x float> @test1(float* %base)
 
 define <16 x float> @test2(float* %base, <16 x float> %src0) {
 ; SKX-LABEL: test2:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movw $30719, %ax # imm = 0x77FF
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vexpandps (%rdi), %zmm0 {%k1}
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test2:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    movw $30719, %ax # imm = 0x77FF
 ; KNL-NEXT:    kmovw %eax, %k1
 ; KNL-NEXT:    vexpandps (%rdi), %zmm0 {%k1}
@@ -45,14 +45,14 @@ define <16 x float> @test2(float* %base,
 
 define <8 x double> @test3(double* %base, <8 x double> %src0, <8 x i1> %mask) {
 ; SKX-LABEL: test3:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
 ; SKX-NEXT:    vpmovw2m %xmm1, %k1
 ; SKX-NEXT:    vexpandpd (%rdi), %zmm0 {%k1}
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test3:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vpmovsxwq %xmm1, %zmm1
 ; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
 ; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
@@ -64,14 +64,14 @@ define <8 x double> @test3(double* %base
 
 define <4 x float> @test4(float* %base, <4 x float> %src0) {
 ; SKX-LABEL: test4:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movb $7, %al
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vexpandps (%rdi), %xmm0 {%k1}
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test4:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; KNL-NEXT:    movw $7, %ax
 ; KNL-NEXT:    kmovw %eax, %k1
@@ -84,14 +84,14 @@ define <4 x float> @test4(float* %base,
 
 define <2 x i64> @test5(i64* %base, <2 x i64> %src0) {
 ; SKX-LABEL: test5:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movb $2, %al
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vpexpandq (%rdi), %xmm0 {%k1}
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test5:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; KNL-NEXT:    movb $2, %al
 ; KNL-NEXT:    kmovw %eax, %k1
@@ -109,7 +109,7 @@ declare <2 x i64>    @llvm.masked.expand
 
 define void @test6(float* %base, <16 x float> %V) {
 ; SKX-LABEL: test6:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movw $-2049, %ax # imm = 0xF7FF
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
@@ -117,7 +117,7 @@ define void @test6(float* %base, <16 x f
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test6:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    movw $-2049, %ax # imm = 0xF7FF
 ; KNL-NEXT:    kmovw %eax, %k1
 ; KNL-NEXT:    vcompressps %zmm0, (%rdi) {%k1}
@@ -128,7 +128,7 @@ define void @test6(float* %base, <16 x f
 
 define void @test7(float* %base, <8 x float> %V, <8 x i1> %mask) {
 ; SKX-LABEL: test7:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
 ; SKX-NEXT:    vpmovw2m %xmm1, %k1
 ; SKX-NEXT:    vcompressps %ymm0, (%rdi) {%k1}
@@ -136,7 +136,7 @@ define void @test7(float* %base, <8 x fl
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test7:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; KNL-NEXT:    vpmovsxwq %xmm1, %zmm1
 ; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
@@ -149,7 +149,7 @@ define void @test7(float* %base, <8 x fl
 
 define void @test8(double* %base, <8 x double> %V, <8 x i1> %mask) {
 ; SKX-LABEL: test8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
 ; SKX-NEXT:    vpmovw2m %xmm1, %k1
 ; SKX-NEXT:    vcompresspd %zmm0, (%rdi) {%k1}
@@ -157,7 +157,7 @@ define void @test8(double* %base, <8 x d
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test8:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vpmovsxwq %xmm1, %zmm1
 ; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
 ; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
@@ -169,7 +169,7 @@ define void @test8(double* %base, <8 x d
 
 define void @test9(i64* %base, <8 x i64> %V, <8 x i1> %mask) {
 ; SKX-LABEL: test9:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1
 ; SKX-NEXT:    vpmovw2m %xmm1, %k1
 ; SKX-NEXT:    vpcompressq %zmm0, (%rdi) {%k1}
@@ -177,7 +177,7 @@ define void @test9(i64* %base, <8 x i64>
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test9:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vpmovsxwq %xmm1, %zmm1
 ; KNL-NEXT:    vpsllq $63, %zmm1, %zmm1
 ; KNL-NEXT:    vptestmq %zmm1, %zmm1, %k1
@@ -189,7 +189,7 @@ define void @test9(i64* %base, <8 x i64>
 
 define void @test10(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
 ; SKX-LABEL: test10:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm1, %xmm1
 ; SKX-NEXT:    vptestmd %xmm1, %xmm1, %k1
 ; SKX-NEXT:    vpcompressq %ymm0, (%rdi) {%k1}
@@ -197,7 +197,7 @@ define void @test10(i64* %base, <4 x i64
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test10:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; KNL-NEXT:    vpslld $31, %xmm1, %xmm1
 ; KNL-NEXT:    vpsrad $31, %xmm1, %xmm1
@@ -213,14 +213,14 @@ define void @test10(i64* %base, <4 x i64
 
 define void @test11(i64* %base, <2 x i64> %V, <2 x i1> %mask) {
 ; SKX-LABEL: test11:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm1, %xmm1
 ; SKX-NEXT:    vptestmq %xmm1, %xmm1, %k1
 ; SKX-NEXT:    vpcompressq %xmm0, (%rdi) {%k1}
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test11:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; KNL-NEXT:    vpsllq $63, %xmm1, %xmm1
 ; KNL-NEXT:    vpsraq $63, %zmm1, %zmm1
@@ -235,14 +235,14 @@ define void @test11(i64* %base, <2 x i64
 
 define void @test12(float* %base, <4 x float> %V, <4 x i1> %mask) {
 ; SKX-LABEL: test12:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm1, %xmm1
 ; SKX-NEXT:    vptestmd %xmm1, %xmm1, %k1
 ; SKX-NEXT:    vcompressps %xmm0, (%rdi) {%k1}
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test12:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; KNL-NEXT:    vpslld $31, %xmm1, %xmm1
 ; KNL-NEXT:    vpsrad $31, %xmm1, %xmm1
@@ -257,7 +257,7 @@ define void @test12(float* %base, <4 x f
 
 define <2 x float> @test13(float* %base, <2 x float> %src0, <2 x i32> %trigger) {
 ; SKX-LABEL: test13:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm1, %k1
@@ -265,7 +265,7 @@ define <2 x float> @test13(float* %base,
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test13:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
@@ -284,7 +284,7 @@ define <2 x float> @test13(float* %base,
 
 define void @test14(float* %base, <2 x float> %V, <2 x i32> %trigger) {
 ; SKX-LABEL: test14:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm1, %k1
@@ -292,7 +292,7 @@ define void @test14(float* %base, <2 x f
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test14:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
@@ -310,7 +310,7 @@ define void @test14(float* %base, <2 x f
 
 define <32 x float> @test15(float* %base, <32 x float> %src0, <32 x i32> %trigger) {
 ; ALL-LABEL: test15:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; ALL-NEXT:    vpcmpeqd %zmm4, %zmm3, %k1
 ; ALL-NEXT:    vpcmpeqd %zmm4, %zmm2, %k2
@@ -326,7 +326,7 @@ define <32 x float> @test15(float* %base
 
 define <16 x double> @test16(double* %base, <16 x double> %src0, <16 x i32> %trigger) {
 ; SKX-LABEL: test16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
 ; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; SKX-NEXT:    vpcmpeqd %ymm4, %ymm3, %k1
@@ -338,7 +338,7 @@ define <16 x double> @test16(double* %ba
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test16:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL-NEXT:    vextracti64x4 $1, %zmm2, %ymm4
 ; KNL-NEXT:    vpcmpeqd %zmm3, %zmm4, %k1
@@ -356,7 +356,7 @@ define <16 x double> @test16(double* %ba
 
 define void @test17(float* %base, <32 x float> %V, <32 x i32> %trigger) {
 ; SKX-LABEL: test17:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; SKX-NEXT:    vpcmpeqd %zmm4, %zmm3, %k1
 ; SKX-NEXT:    vpcmpeqd %zmm4, %zmm2, %k2
@@ -368,7 +368,7 @@ define void @test17(float* %base, <32 x
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test17:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 ; KNL-NEXT:    vpcmpeqd %zmm4, %zmm3, %k1
 ; KNL-NEXT:    vpcmpeqd %zmm4, %zmm2, %k2
@@ -384,7 +384,7 @@ define void @test17(float* %base, <32 x
 
 define void @test18(double* %base, <16 x double> %V, <16 x i1> %mask) {
 ; SKX-LABEL: test18:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm2, %xmm2
 ; SKX-NEXT:    vpmovb2m %xmm2, %k1
 ; SKX-NEXT:    kshiftrw $8, %k1, %k2
@@ -396,7 +396,7 @@ define void @test18(double* %base, <16 x
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test18:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; KNL-NEXT:    vpslld $31, %zmm2, %zmm2
 ; KNL-NEXT:    vptestmd %zmm2, %zmm2, %k1

Modified: llvm/trunk/test/CodeGen/X86/computeKnownBits_urem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/computeKnownBits_urem.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/computeKnownBits_urem.ll (original)
+++ llvm/trunk/test/CodeGen/X86/computeKnownBits_urem.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i32 @main() nounwind {
 ; X86-LABEL: main:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %eax
 ; X86-NEXT:    movl $1, (%esp)
 ; X86-NEXT:    movl $1, %eax
@@ -12,7 +12,7 @@ define i32 @main() nounwind {
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: main:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl $1, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movl $1, %eax
 ; X64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/conditional-indecrement.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/conditional-indecrement.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/conditional-indecrement.ll (original)
+++ llvm/trunk/test/CodeGen/X86/conditional-indecrement.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @test1(i32 %a, i32 %b) nounwind readnone {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $1, %edi
 ; CHECK-NEXT:    sbbl $-1, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -16,7 +16,7 @@ define i32 @test1(i32 %a, i32 %b) nounwi
 
 define i32 @test1_commute(i32 %a, i32 %b) nounwind readnone {
 ; CHECK-LABEL: test1_commute:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $1, %edi
 ; CHECK-NEXT:    sbbl $-1, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -29,7 +29,7 @@ define i32 @test1_commute(i32 %a, i32 %b
 
 define i32 @test2(i32 %a, i32 %b) nounwind readnone {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $1, %edi
 ; CHECK-NEXT:    adcl $0, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -42,7 +42,7 @@ define i32 @test2(i32 %a, i32 %b) nounwi
 
 define i32 @test3(i32 %a, i32 %b) nounwind readnone {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $1, %edi
 ; CHECK-NEXT:    adcl $0, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -55,7 +55,7 @@ define i32 @test3(i32 %a, i32 %b) nounwi
 
 define i32 @test4(i32 %a, i32 %b) nounwind readnone {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $1, %edi
 ; CHECK-NEXT:    sbbl $-1, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -68,7 +68,7 @@ define i32 @test4(i32 %a, i32 %b) nounwi
 
 define i32 @test5(i32 %a, i32 %b) nounwind readnone {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $1, %edi
 ; CHECK-NEXT:    adcl $-1, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -81,7 +81,7 @@ define i32 @test5(i32 %a, i32 %b) nounwi
 
 define i32 @test6(i32 %a, i32 %b) nounwind readnone {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $1, %edi
 ; CHECK-NEXT:    sbbl $0, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -94,7 +94,7 @@ define i32 @test6(i32 %a, i32 %b) nounwi
 
 define i32 @test7(i32 %a, i32 %b) nounwind readnone {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $1, %edi
 ; CHECK-NEXT:    sbbl $0, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -107,7 +107,7 @@ define i32 @test7(i32 %a, i32 %b) nounwi
 
 define i32 @test8(i32 %a, i32 %b) nounwind readnone {
 ; CHECK-LABEL: test8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $1, %edi
 ; CHECK-NEXT:    adcl $-1, %esi
 ; CHECK-NEXT:    movl %esi, %eax

Modified: llvm/trunk/test/CodeGen/X86/conditional-tailcall-samedest.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/conditional-tailcall-samedest.mir?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/conditional-tailcall-samedest.mir (original)
+++ llvm/trunk/test/CodeGen/X86/conditional-tailcall-samedest.mir Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 # CHECK: body:             |
 # CHECK:   bb.0.entry:
-# CHECK:     successors: %bb.1.sw.bb(0x40000000)
+# CHECK:     successors: %bb.1(0x40000000)
 # CHECK:     liveins: %edi
 # CHECK:     CMP32ri8 killed %edi, 2, implicit-def %eflags
 # CHECK:     TCRETURNdi64cc @mergeable_conditional_tailcall
@@ -101,27 +101,27 @@ stack:
 constants:       
 body:             |
   bb.0.entry:
-    successors: %bb.2.sw.bb(0x40000000), %bb.1.entry(0x40000000)
+    successors: %bb.2(0x40000000), %bb.1(0x40000000)
     liveins: %edi
   
     CMP32ri8 killed %edi, 2, implicit-def %eflags
-    JB_1 %bb.2.sw.bb, implicit %eflags
-    JMP_1 %bb.1.entry
+    JB_1 %bb.2, implicit %eflags
+    JMP_1 %bb.1
   
   bb.1.entry:
-    successors: %bb.4.sw.bb2(0x40000000), %bb.5.sw.epilog(0x40000000)
+    successors: %bb.4(0x40000000), %bb.5(0x40000000)
     liveins: %eflags
   
-    JE_1 %bb.4.sw.bb2, implicit killed %eflags
-    JMP_1 %bb.5.sw.epilog
+    JE_1 %bb.4, implicit killed %eflags
+    JMP_1 %bb.5
   
   bb.2.sw.bb:
-    successors: %bb.3.init.check.i(0x00000800), %bb.6.return(0x7ffff800)
+    successors: %bb.3(0x00000800), %bb.6(0x7ffff800)
   
     %al = ACQUIRE_MOV8rm %rip, 1, %noreg, @static_local_guard, %noreg :: (volatile load acquire 1 from `i8* bitcast (i64* @static_local_guard to i8*)`, align 8)
     TEST8rr killed %al, %al, implicit-def %eflags
-    JNE_1 %bb.6.return, implicit killed %eflags
-    JMP_1 %bb.3.init.check.i
+    JNE_1 %bb.6, implicit killed %eflags
+    JMP_1 %bb.3
   
   bb.3.init.check.i:
     dead %edi = MOV32ri64 @static_local_guard, implicit-def %rdi

Modified: llvm/trunk/test/CodeGen/X86/constant-combines.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/constant-combines.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/constant-combines.ll (original)
+++ llvm/trunk/test/CodeGen/X86/constant-combines.ll Mon Dec  4 09:18:51 2017
@@ -14,7 +14,7 @@ define void @PR22524({ float, float }* %
 ; being useful.
 ;
 ; CHECK-LABEL: PR22524:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    movd %eax, %xmm0
 ; CHECK-NEXT:    xorps %xmm1, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/copysign-constant-magnitude.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/copysign-constant-magnitude.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/copysign-constant-magnitude.ll (original)
+++ llvm/trunk/test/CodeGen/X86/copysign-constant-magnitude.ll Mon Dec  4 09:18:51 2017
@@ -11,7 +11,7 @@ target datalayout = "e-m:o-i64:64-f80:12
 
 define double @mag_pos0_double(double %x) nounwind {
 ; CHECK-LABEL: mag_pos0_double:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    andps [[SIGNMASK1]](%rip), %xmm0
 ; CHECK-NEXT:    retq
 ;
@@ -24,7 +24,7 @@ define double @mag_pos0_double(double %x
 
 define double @mag_neg0_double(double %x) nounwind {
 ; CHECK-LABEL: mag_neg0_double:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movsd [[SIGNMASK2]](%rip), %xmm1
 ; CHECK-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0,0]
 ; CHECK-NEXT:    andps %xmm1, %xmm0
@@ -42,7 +42,7 @@ define double @mag_neg0_double(double %x
 
 define double @mag_pos1_double(double %x) nounwind {
 ; CHECK-LABEL: mag_pos1_double:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    andps [[SIGNMASK3]](%rip), %xmm0
 ; CHECK-NEXT:    movsd [[ONE3]](%rip), %xmm1
 ; CHECK-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0,0]
@@ -62,7 +62,7 @@ define double @mag_pos1_double(double %x
 
 define double @mag_neg1_double(double %x) nounwind {
 ; CHECK-LABEL: mag_neg1_double:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    andps [[SIGNMASK4]](%rip), %xmm0
 ; CHECK-NEXT:    orps [[ONE4]](%rip), %xmm0
 ; CHECK-NEXT:    retq
@@ -77,7 +77,7 @@ define double @mag_neg1_double(double %x
 
 define float @mag_pos0_float(float %x) nounwind {
 ; CHECK-LABEL: mag_pos0_float:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    andps [[SIGNMASK5]](%rip), %xmm0
 ; CHECK-NEXT:    retq
 ;
@@ -90,7 +90,7 @@ define float @mag_pos0_float(float %x) n
 
 define float @mag_neg0_float(float %x) nounwind {
 ; CHECK-LABEL: mag_neg0_float:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movss [[SIGNMASK6]](%rip), %xmm1
 ; CHECK-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
 ; CHECK-NEXT:    andps %xmm1, %xmm0
@@ -108,7 +108,7 @@ define float @mag_neg0_float(float %x) n
 
 define float @mag_pos1_float(float %x) nounwind {
 ; CHECK-LABEL: mag_pos1_float:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    andps [[SIGNMASK7]](%rip), %xmm0
 ; CHECK-NEXT:    movss [[ONE7]](%rip), %xmm1
 ; CHECK-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
@@ -130,7 +130,7 @@ define float @mag_pos1_float(float %x) n
 
 define float @mag_neg1_float(float %x) nounwind {
 ; CHECK-LABEL: mag_neg1_float:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    andps [[SIGNMASK8]](%rip), %xmm0
 ; CHECK-NEXT:    orps [[ONE8]](%rip), %xmm0
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/critical-edge-split-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/critical-edge-split-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/critical-edge-split-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/critical-edge-split-2.ll Mon Dec  4 09:18:51 2017
@@ -10,11 +10,11 @@
 ; PR8642
 define i16 @test1(i1 zeroext %C, i8** nocapture %argv) nounwind ssp {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movw $1, %ax
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jne .LBB0_2
-; CHECK-NEXT:  # BB#1: # %cond.false.i
+; CHECK-NEXT:  # %bb.1: # %cond.false.i
 ; CHECK-NEXT:    movl $g_4, %eax
 ; CHECK-NEXT:    movl $g_2+4, %ecx
 ; CHECK-NEXT:    xorl %esi, %esi

Modified: llvm/trunk/test/CodeGen/X86/ctpop-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ctpop-combine.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ctpop-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ctpop-combine.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@ declare i64 @llvm.ctpop.i64(i64) nounwin
 
 define i32 @test1(i64 %x) nounwind readnone {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    leaq -1(%rdi), %rcx
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testq %rcx, %rdi
@@ -22,7 +22,7 @@ define i32 @test1(i64 %x) nounwind readn
 
 define i32 @test2(i64 %x) nounwind readnone {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    leaq -1(%rdi), %rcx
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testq %rcx, %rdi
@@ -36,7 +36,7 @@ define i32 @test2(i64 %x) nounwind readn
 
 define i32 @test3(i64 %x) nounwind readnone {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    popcntq %rdi, %rcx
 ; CHECK-NEXT:    andb $63, %cl
 ; CHECK-NEXT:    xorl %eax, %eax
@@ -52,7 +52,7 @@ define i32 @test3(i64 %x) nounwind readn
 
 define i8 @test4(i8 %x) nounwind readnone {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $127, %edi
 ; CHECK-NEXT:    popcntl %edi, %eax
 ; CHECK-NEXT:    # kill: %al<def> %al<kill> %eax<kill>

Modified: llvm/trunk/test/CodeGen/X86/cvtv2f32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cvtv2f32.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cvtv2f32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cvtv2f32.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define <2 x float> @uitofp_2i32_buildvector(i32 %x, i32 %y, <2 x float> %v) {
 ; X32-LABEL: uitofp_2i32_buildvector:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; X32-NEXT:    movdqa {{.*#+}} xmm2 = [1258291200,1258291200,1258291200,1258291200]
 ; X32-NEXT:    pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
@@ -19,7 +19,7 @@ define <2 x float> @uitofp_2i32_buildvec
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: uitofp_2i32_buildvector:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movd %edi, %xmm1
 ; X64-NEXT:    pinsrd $1, %esi, %xmm1
 ; X64-NEXT:    movdqa {{.*#+}} xmm2 = [1258291200,1258291200,1258291200,1258291200]
@@ -40,7 +40,7 @@ define <2 x float> @uitofp_2i32_buildvec
 
 define <2 x float> @uitofp_2i32_legalized(<2 x i32> %in, <2 x float> %v) {
 ; X32-LABEL: uitofp_2i32_legalized:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pxor %xmm2, %xmm2
 ; X32-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; X32-NEXT:    movdqa {{.*#+}} xmm0 = [4.503600e+15,4.503600e+15]
@@ -51,7 +51,7 @@ define <2 x float> @uitofp_2i32_legalize
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: uitofp_2i32_legalized:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pxor %xmm2, %xmm2
 ; X64-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; X64-NEXT:    movdqa {{.*#+}} xmm0 = [4.503600e+15,4.503600e+15]

Modified: llvm/trunk/test/CodeGen/X86/dag-fmf-cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dag-fmf-cse.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dag-fmf-cse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dag-fmf-cse.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define float @fmf_should_not_break_cse(float %a, float %b) {
 ; CHECK-LABEL: fmf_should_not_break_cse:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmulss %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/dag-merge-fast-accesses.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dag-merge-fast-accesses.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dag-merge-fast-accesses.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dag-merge-fast-accesses.ll Mon Dec  4 09:18:51 2017
@@ -7,13 +7,13 @@
 
 define void @merge_const_vec_store(i64* %ptr) {
 ; FAST-LABEL: merge_const_vec_store:
-; FAST:       # BB#0:
+; FAST:       # %bb.0:
 ; FAST-NEXT:    xorps %xmm0, %xmm0
 ; FAST-NEXT:    movups %xmm0, (%rdi)
 ; FAST-NEXT:    retq
 ;
 ; SLOW-LABEL: merge_const_vec_store:
-; SLOW:       # BB#0:
+; SLOW:       # %bb.0:
 ; SLOW-NEXT:    movq $0, (%rdi)
 ; SLOW-NEXT:    movq $0, 8(%rdi)
 ; SLOW-NEXT:    retq
@@ -29,12 +29,12 @@ define void @merge_const_vec_store(i64*
 
 define void @merge_vec_element_store(<4 x double> %v, double* %ptr) {
 ; FAST-LABEL: merge_vec_element_store:
-; FAST:       # BB#0:
+; FAST:       # %bb.0:
 ; FAST-NEXT:    movups %xmm0, (%rdi)
 ; FAST-NEXT:    retq
 ;
 ; SLOW-LABEL: merge_vec_element_store:
-; SLOW:       # BB#0:
+; SLOW:       # %bb.0:
 ; SLOW-NEXT:    movlpd %xmm0, (%rdi)
 ; SLOW-NEXT:    movhpd %xmm0, 8(%rdi)
 ; SLOW-NEXT:    retq
@@ -53,13 +53,13 @@ define void @merge_vec_element_store(<4
 
 define void @merge_vec_load_and_stores(i64 *%ptr) {
 ; FAST-LABEL: merge_vec_load_and_stores:
-; FAST:       # BB#0:
+; FAST:       # %bb.0:
 ; FAST-NEXT:    movups (%rdi), %xmm0
 ; FAST-NEXT:    movups %xmm0, 40(%rdi)
 ; FAST-NEXT:    retq
 ;
 ; SLOW-LABEL: merge_vec_load_and_stores:
-; SLOW:       # BB#0:
+; SLOW:       # %bb.0:
 ; SLOW-NEXT:    movq (%rdi), %rax
 ; SLOW-NEXT:    movq 8(%rdi), %rcx
 ; SLOW-NEXT:    movq %rax, 40(%rdi)

Modified: llvm/trunk/test/CodeGen/X86/dagcombine-buildvector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dagcombine-buildvector.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dagcombine-buildvector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dagcombine-buildvector.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @test(<2 x double>* %dst, <4 x double> %src) nounwind {
 ; CHECK-LABEL: test:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; CHECK-NEXT:    movaps %xmm0, (%eax)
@@ -19,7 +19,7 @@ entry:
 
 define void @test2(<4 x i16>* %src, <4 x i32>* %dest) nounwind {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero

Modified: llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i32 @t(i8* %ref_frame_ptr, i32 %ref_frame_stride, i32 %idxX, i32 %idxY) nounwind  {
 ; X32-LABEL: t:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    imull {{[0-9]+}}(%esp), %ecx
@@ -18,7 +18,7 @@ define i32 @t(i8* %ref_frame_ptr, i32 %r
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    ## kill: %edx<def> %edx<kill> %rdx<def>
 ; X64-NEXT:    ## kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    imull %ecx, %esi

Modified: llvm/trunk/test/CodeGen/X86/debugloc-no-line-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/debugloc-no-line-0.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/debugloc-no-line-0.ll (original)
+++ llvm/trunk/test/CodeGen/X86/debugloc-no-line-0.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 ; annotation, and that the annotation is identical to the one on e.g.,
 ; the jmp to bb4.
 ;
-; CHECK: JMP{{.*}}%bb.4.entry, debug-location ![[JUMPLOC:[0-9]+]]
+; CHECK: JMP{{.*}}%bb.4, debug-location ![[JUMPLOC:[0-9]+]]
 ; CHECK: bb.4.entry:
 ; CHECK: successors:
 ; CHECK: JE{{.*}}debug-location ![[JUMPLOC]]

Modified: llvm/trunk/test/CodeGen/X86/div-rem-simplify.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/div-rem-simplify.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/div-rem-simplify.ll (original)
+++ llvm/trunk/test/CodeGen/X86/div-rem-simplify.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define i32 @srem0(i32 %x) {
 ; CHECK-LABEL: srem0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %rem = srem i32 %x, 0
   ret i32 %rem
@@ -13,7 +13,7 @@ define i32 @srem0(i32 %x) {
 
 define i32 @urem0(i32 %x) {
 ; CHECK-LABEL: urem0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %rem = urem i32 %x, 0
   ret i32 %rem
@@ -21,7 +21,7 @@ define i32 @urem0(i32 %x) {
 
 define i32 @sdiv0(i32 %x) {
 ; CHECK-LABEL: sdiv0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %div = sdiv i32 %x, 0
   ret i32 %div
@@ -29,7 +29,7 @@ define i32 @sdiv0(i32 %x) {
 
 define i32 @udiv0(i32 %x) {
 ; CHECK-LABEL: udiv0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %div = udiv i32 %x, 0
   ret i32 %div
@@ -39,7 +39,7 @@ define i32 @udiv0(i32 %x) {
 
 define <4 x i32> @srem_vec0(<4 x i32> %x) {
 ; CHECK-LABEL: srem_vec0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %rem = srem <4 x i32> %x, zeroinitializer
   ret <4 x i32> %rem
@@ -47,7 +47,7 @@ define <4 x i32> @srem_vec0(<4 x i32> %x
 
 define <4 x i32> @urem_vec0(<4 x i32> %x) {
 ; CHECK-LABEL: urem_vec0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %rem = urem <4 x i32> %x, zeroinitializer
   ret <4 x i32> %rem
@@ -55,7 +55,7 @@ define <4 x i32> @urem_vec0(<4 x i32> %x
 
 define <4 x i32> @sdiv_vec0(<4 x i32> %x) {
 ; CHECK-LABEL: sdiv_vec0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %div = sdiv <4 x i32> %x, zeroinitializer
   ret <4 x i32> %div
@@ -63,7 +63,7 @@ define <4 x i32> @sdiv_vec0(<4 x i32> %x
 
 define <4 x i32> @udiv_vec0(<4 x i32> %x) {
 ; CHECK-LABEL: udiv_vec0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %div = udiv <4 x i32> %x, zeroinitializer
   ret <4 x i32> %div
@@ -74,7 +74,7 @@ define <4 x i32> @udiv_vec0(<4 x i32> %x
 
 define i32 @sel_urem0(i1 %cond) {
 ; CHECK-LABEL: sel_urem0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %sel = select i1 %cond, i32 23, i32 234
   %rem = urem i32 %sel, 0
@@ -83,7 +83,7 @@ define i32 @sel_urem0(i1 %cond) {
 
 define i32 @sel_srem0(i1 %cond) {
 ; CHECK-LABEL: sel_srem0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %sel = select i1 %cond, i32 23, i32 234
   %rem = srem i32 %sel, 0
@@ -92,7 +92,7 @@ define i32 @sel_srem0(i1 %cond) {
 
 define i32 @sel_udiv0(i1 %cond) {
 ; CHECK-LABEL: sel_udiv0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %sel = select i1 %cond, i32 23, i32 234
   %div = udiv i32 %sel, 0
@@ -101,7 +101,7 @@ define i32 @sel_udiv0(i1 %cond) {
 
 define i32 @sel_sdiv0(i1 %cond) {
 ; CHECK-LABEL: sel_sdiv0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %sel = select i1 %cond, i32 23, i32 234
   %div = sdiv i32 %sel, 0
@@ -113,7 +113,7 @@ define i32 @sel_sdiv0(i1 %cond) {
 
 define <4 x i32> @sel_urem0_vec(i1 %cond) {
 ; CHECK-LABEL: sel_urem0_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
   %rem = urem <4 x i32> %sel, zeroinitializer
@@ -122,7 +122,7 @@ define <4 x i32> @sel_urem0_vec(i1 %cond
 
 define <4 x i32> @sel_srem0_vec(i1 %cond) {
 ; CHECK-LABEL: sel_srem0_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
   %rem = srem <4 x i32> %sel, zeroinitializer
@@ -131,7 +131,7 @@ define <4 x i32> @sel_srem0_vec(i1 %cond
 
 define <4 x i32> @sel_udiv0_vec(i1 %cond) {
 ; CHECK-LABEL: sel_udiv0_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
   %div = udiv <4 x i32> %sel, zeroinitializer
@@ -140,7 +140,7 @@ define <4 x i32> @sel_udiv0_vec(i1 %cond
 
 define <4 x i32> @sel_sdiv0_vec(i1 %cond) {
 ; CHECK-LABEL: sel_sdiv0_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
   %div = sdiv <4 x i32> %sel, zeroinitializer
@@ -151,7 +151,7 @@ define <4 x i32> @sel_sdiv0_vec(i1 %cond
 
 define <4 x i32> @sdiv0elt_vec(<4 x i32> %x) {
 ; CHECK-LABEL: sdiv0elt_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %zero = and <4 x i32> %x, <i32 0, i32 0, i32 0, i32 0>
   %some_ones = or <4 x i32> %zero, <i32 0, i32 -1, i32 0, i32 3>
@@ -161,7 +161,7 @@ define <4 x i32> @sdiv0elt_vec(<4 x i32>
 
 define <4 x i32> @udiv0elt_vec(<4 x i32> %x) {
 ; CHECK-LABEL: udiv0elt_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %div = udiv <4 x i32> <i32 11, i32 12, i32 13, i32 14>, <i32 0, i32 3, i32 4, i32 0>
   ret <4 x i32> %div
@@ -169,7 +169,7 @@ define <4 x i32> @udiv0elt_vec(<4 x i32>
 
 define <4 x i32> @urem0elt_vec(<4 x i32> %x) {
 ; CHECK-LABEL: urem0elt_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %zero = and <4 x i32> %x, <i32 0, i32 0, i32 0, i32 0>
   %some_ones = or <4 x i32> %zero, <i32 0, i32 0, i32 0, i32 3>
@@ -179,7 +179,7 @@ define <4 x i32> @urem0elt_vec(<4 x i32>
 
 define <4 x i32> @srem0elt_vec(<4 x i32> %x) {
 ; CHECK-LABEL: srem0elt_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %rem = srem <4 x i32> <i32 -11, i32 -12, i32 -13, i32 -14>, <i32 -3, i32 -3, i32 0, i32 2>
   ret <4 x i32> %rem

Modified: llvm/trunk/test/CodeGen/X86/divide-by-constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/divide-by-constant.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/divide-by-constant.ll (original)
+++ llvm/trunk/test/CodeGen/X86/divide-by-constant.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define zeroext i16 @test1(i16 zeroext %x) nounwind {
 ; X32-LABEL: test1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    imull $63551, %eax, %eax # imm = 0xF83F
 ; X32-NEXT:    shrl $21, %eax
@@ -12,7 +12,7 @@ define zeroext i16 @test1(i16 zeroext %x
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test1:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    imull $63551, %edi, %eax # imm = 0xF83F
 ; X64-NEXT:    shrl $21, %eax
 ; X64-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
@@ -24,7 +24,7 @@ entry:
 
 define zeroext i16 @test2(i8 signext %x, i16 zeroext %c) nounwind readnone ssp noredzone {
 ; X32-LABEL: test2:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    imull $43691, %eax, %eax # imm = 0xAAAB
 ; X32-NEXT:    shrl $17, %eax
@@ -32,7 +32,7 @@ define zeroext i16 @test2(i8 signext %x,
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    imull $43691, %esi, %eax # imm = 0xAAAB
 ; X64-NEXT:    shrl $17, %eax
 ; X64-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
@@ -45,7 +45,7 @@ entry:
 
 define zeroext i8 @test3(i8 zeroext %x, i8 zeroext %c) nounwind readnone ssp noredzone {
 ; X32-LABEL: test3:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    imull $171, %eax, %eax
 ; X32-NEXT:    shrl $9, %eax
@@ -54,7 +54,7 @@ define zeroext i8 @test3(i8 zeroext %x,
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test3:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    imull $171, %esi, %eax
 ; X64-NEXT:    shrl $9, %eax
 ; X64-NEXT:    movzwl %ax, %eax
@@ -67,7 +67,7 @@ entry:
 
 define signext i16 @test4(i16 signext %x) nounwind {
 ; X32-LABEL: test4:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movswl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    imull $1986, %eax, %eax # imm = 0x7C2
 ; X32-NEXT:    movl %eax, %ecx
@@ -78,7 +78,7 @@ define signext i16 @test4(i16 signext %x
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    imull $1986, %edi, %eax # imm = 0x7C2
 ; X64-NEXT:    movl %eax, %ecx
 ; X64-NEXT:    shrl $31, %ecx
@@ -93,7 +93,7 @@ entry:
 
 define i32 @test5(i32 %A) nounwind {
 ; X32-LABEL: test5:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl $365384439, %eax # imm = 0x15C752F7
 ; X32-NEXT:    mull {{[0-9]+}}(%esp)
 ; X32-NEXT:    shrl $27, %edx
@@ -101,7 +101,7 @@ define i32 @test5(i32 %A) nounwind {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test5:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    imulq $365384439, %rax, %rax # imm = 0x15C752F7
 ; X64-NEXT:    shrq $59, %rax
@@ -113,7 +113,7 @@ define i32 @test5(i32 %A) nounwind {
 
 define signext i16 @test6(i16 signext %x) nounwind {
 ; X32-LABEL: test6:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movswl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    imull $26215, %eax, %eax # imm = 0x6667
 ; X32-NEXT:    movl %eax, %ecx
@@ -124,7 +124,7 @@ define signext i16 @test6(i16 signext %x
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test6:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    imull $26215, %edi, %eax # imm = 0x6667
 ; X64-NEXT:    movl %eax, %ecx
 ; X64-NEXT:    shrl $31, %ecx
@@ -139,7 +139,7 @@ entry:
 
 define i32 @test7(i32 %x) nounwind {
 ; X32-LABEL: test7:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    shrl $2, %eax
 ; X32-NEXT:    movl $613566757, %ecx # imm = 0x24924925
@@ -148,7 +148,7 @@ define i32 @test7(i32 %x) nounwind {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test7:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    shrl $2, %edi
 ; X64-NEXT:    imulq $613566757, %rdi, %rax # imm = 0x24924925
@@ -162,7 +162,7 @@ define i32 @test7(i32 %x) nounwind {
 ; PR13326
 define i8 @test8(i8 %x) nounwind {
 ; X32-LABEL: test8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    shrb %al
 ; X32-NEXT:    movzbl %al, %eax
@@ -173,7 +173,7 @@ define i8 @test8(i8 %x) nounwind {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shrb %dil
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    imull $211, %eax, %eax
@@ -187,7 +187,7 @@ define i8 @test8(i8 %x) nounwind {
 
 define i8 @test9(i8 %x) nounwind {
 ; X32-LABEL: test9:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    shrb $2, %al
 ; X32-NEXT:    movzbl %al, %eax
@@ -198,7 +198,7 @@ define i8 @test9(i8 %x) nounwind {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test9:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shrb $2, %dil
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    imull $71, %eax, %eax
@@ -212,7 +212,7 @@ define i8 @test9(i8 %x) nounwind {
 
 define i32 @testsize1(i32 %x) minsize nounwind {
 ; X32-LABEL: testsize1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    pushl $32
 ; X32-NEXT:    popl %ecx
@@ -221,7 +221,7 @@ define i32 @testsize1(i32 %x) minsize no
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: testsize1:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq $32
 ; X64-NEXT:    popq %rcx
 ; X64-NEXT:    movl %edi, %eax
@@ -235,7 +235,7 @@ entry:
 
 define i32 @testsize2(i32 %x) minsize nounwind {
 ; X32-LABEL: testsize2:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    pushl $33
 ; X32-NEXT:    popl %ecx
@@ -244,7 +244,7 @@ define i32 @testsize2(i32 %x) minsize no
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: testsize2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq $33
 ; X64-NEXT:    popq %rcx
 ; X64-NEXT:    movl %edi, %eax
@@ -258,13 +258,13 @@ entry:
 
 define i32 @testsize3(i32 %x) minsize nounwind {
 ; X32-LABEL: testsize3:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    shrl $5, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: testsize3:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    shrl $5, %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
@@ -275,7 +275,7 @@ entry:
 
 define i32 @testsize4(i32 %x) minsize nounwind {
 ; X32-LABEL: testsize4:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    pushl $33
 ; X32-NEXT:    popl %ecx
@@ -284,7 +284,7 @@ define i32 @testsize4(i32 %x) minsize no
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: testsize4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq $33
 ; X64-NEXT:    popq %rcx
 ; X64-NEXT:    xorl %edx, %edx
@@ -298,7 +298,7 @@ entry:
 
 define i64 @PR23590(i64 %x) nounwind {
 ; X32-LABEL: PR23590:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    subl $12, %esp
 ; X32-NEXT:    pushl $0
 ; X32-NEXT:    pushl $12345 # imm = 0x3039
@@ -315,7 +315,7 @@ define i64 @PR23590(i64 %x) nounwind {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: PR23590:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rdi, %rcx
 ; X64-NEXT:    movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F
 ; X64-NEXT:    movq %rcx, %rax

Modified: llvm/trunk/test/CodeGen/X86/divrem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/divrem.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/divrem.ll (original)
+++ llvm/trunk/test/CodeGen/X86/divrem.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define void @si64(i64 %x, i64 %y, i64* %p, i64* %q) nounwind {
 ; X32-LABEL: si64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    pushl %edi
@@ -38,7 +38,7 @@ define void @si64(i64 %x, i64 %y, i64* %
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: si64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdx, %r8
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    cqto
@@ -55,7 +55,7 @@ define void @si64(i64 %x, i64 %y, i64* %
 
 define void @si32(i32 %x, i32 %y, i32* %p, i32* %q) nounwind {
 ; X32-LABEL: si32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esi
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -68,7 +68,7 @@ define void @si32(i32 %x, i32 %y, i32* %
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: si32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdx, %r8
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    cltd
@@ -85,7 +85,7 @@ define void @si32(i32 %x, i32 %y, i32* %
 
 define void @si16(i16 %x, i16 %y, i16* %p, i16* %q) nounwind {
 ; X32-LABEL: si16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esi
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -98,7 +98,7 @@ define void @si16(i16 %x, i16 %y, i16* %
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: si16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdx, %r8
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    cwtd
@@ -115,7 +115,7 @@ define void @si16(i16 %x, i16 %y, i16* %
 
 define void @si8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind {
 ; X32-LABEL: si8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -129,7 +129,7 @@ define void @si8(i8 %x, i8 %y, i8* %p, i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: si8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    cbtw
 ; X64-NEXT:    idivb %sil
@@ -146,7 +146,7 @@ define void @si8(i8 %x, i8 %y, i8* %p, i
 
 define void @ui64(i64 %x, i64 %y, i64* %p, i64* %q) nounwind {
 ; X32-LABEL: ui64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    pushl %edi
@@ -180,7 +180,7 @@ define void @ui64(i64 %x, i64 %y, i64* %
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ui64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdx, %r8
 ; X64-NEXT:    xorl %edx, %edx
 ; X64-NEXT:    movq %rdi, %rax
@@ -197,7 +197,7 @@ define void @ui64(i64 %x, i64 %y, i64* %
 
 define void @ui32(i32 %x, i32 %y, i32* %p, i32* %q) nounwind {
 ; X32-LABEL: ui32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esi
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -210,7 +210,7 @@ define void @ui32(i32 %x, i32 %y, i32* %
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ui32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdx, %r8
 ; X64-NEXT:    xorl %edx, %edx
 ; X64-NEXT:    movl %edi, %eax
@@ -227,7 +227,7 @@ define void @ui32(i32 %x, i32 %y, i32* %
 
 define void @ui16(i16 %x, i16 %y, i16* %p, i16* %q) nounwind {
 ; X32-LABEL: ui16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esi
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -240,7 +240,7 @@ define void @ui16(i16 %x, i16 %y, i16* %
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ui16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdx, %r8
 ; X64-NEXT:    xorl %edx, %edx
 ; X64-NEXT:    movl %edi, %eax
@@ -257,7 +257,7 @@ define void @ui16(i16 %x, i16 %y, i16* %
 
 define void @ui8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind {
 ; X32-LABEL: ui8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -271,7 +271,7 @@ define void @ui8(i8 %x, i8 %y, i8* %p, i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: ui8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X64-NEXT:    divb %sil

Modified: llvm/trunk/test/CodeGen/X86/divrem8_ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/divrem8_ext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/divrem8_ext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/divrem8_ext.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define zeroext i8 @test_udivrem_zext_ah(i8 %x, i8 %y) {
 ; X32-LABEL: test_udivrem_zext_ah:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X32-NEXT:    divb {{[0-9]+}}(%esp)
@@ -14,7 +14,7 @@ define zeroext i8 @test_udivrem_zext_ah(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_udivrem_zext_ah:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X64-NEXT:    divb %sil
@@ -30,7 +30,7 @@ define zeroext i8 @test_udivrem_zext_ah(
 
 define zeroext i8 @test_urem_zext_ah(i8 %x, i8 %y) {
 ; X32-LABEL: test_urem_zext_ah:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X32-NEXT:    divb {{[0-9]+}}(%esp)
@@ -39,7 +39,7 @@ define zeroext i8 @test_urem_zext_ah(i8
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_urem_zext_ah:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X64-NEXT:    divb %sil
@@ -52,7 +52,7 @@ define zeroext i8 @test_urem_zext_ah(i8
 
 define i8 @test_urem_noext_ah(i8 %x, i8 %y) {
 ; X32-LABEL: test_urem_noext_ah:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
@@ -63,7 +63,7 @@ define i8 @test_urem_noext_ah(i8 %x, i8
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_urem_noext_ah:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X64-NEXT:    divb %sil
@@ -78,7 +78,7 @@ define i8 @test_urem_noext_ah(i8 %x, i8
 
 define i64 @test_urem_zext64_ah(i8 %x, i8 %y) {
 ; X32-LABEL: test_urem_zext64_ah:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X32-NEXT:    divb {{[0-9]+}}(%esp)
@@ -87,7 +87,7 @@ define i64 @test_urem_zext64_ah(i8 %x, i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_urem_zext64_ah:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X64-NEXT:    divb %sil
@@ -100,7 +100,7 @@ define i64 @test_urem_zext64_ah(i8 %x, i
 
 define signext i8 @test_sdivrem_sext_ah(i8 %x, i8 %y) {
 ; X32-LABEL: test_sdivrem_sext_ah:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    cbtw
 ; X32-NEXT:    idivb {{[0-9]+}}(%esp)
@@ -110,7 +110,7 @@ define signext i8 @test_sdivrem_sext_ah(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sdivrem_sext_ah:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    cbtw
 ; X64-NEXT:    idivb %sil
@@ -126,7 +126,7 @@ define signext i8 @test_sdivrem_sext_ah(
 
 define signext i8 @test_srem_sext_ah(i8 %x, i8 %y) {
 ; X32-LABEL: test_srem_sext_ah:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    cbtw
 ; X32-NEXT:    idivb {{[0-9]+}}(%esp)
@@ -135,7 +135,7 @@ define signext i8 @test_srem_sext_ah(i8
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_sext_ah:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    cbtw
 ; X64-NEXT:    idivb %sil
@@ -148,7 +148,7 @@ define signext i8 @test_srem_sext_ah(i8
 
 define i8 @test_srem_noext_ah(i8 %x, i8 %y) {
 ; X32-LABEL: test_srem_noext_ah:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X32-NEXT:    cbtw
@@ -159,7 +159,7 @@ define i8 @test_srem_noext_ah(i8 %x, i8
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_noext_ah:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    cbtw
 ; X64-NEXT:    idivb %sil
@@ -174,7 +174,7 @@ define i8 @test_srem_noext_ah(i8 %x, i8
 
 define i64 @test_srem_sext64_ah(i8 %x, i8 %y) {
 ; X32-LABEL: test_srem_sext64_ah:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    cbtw
 ; X32-NEXT:    idivb {{[0-9]+}}(%esp)
@@ -184,7 +184,7 @@ define i64 @test_srem_sext64_ah(i8 %x, i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_sext64_ah:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    cbtw
 ; X64-NEXT:    idivb %sil
@@ -198,7 +198,7 @@ define i64 @test_srem_sext64_ah(i8 %x, i
 
 define i64 @pr25754(i8 %a, i8 %c) {
 ; X32-LABEL: pr25754:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X32-NEXT:    divb {{[0-9]+}}(%esp)
@@ -209,7 +209,7 @@ define i64 @pr25754(i8 %a, i8 %c) {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: pr25754:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X64-NEXT:    divb %sil

Modified: llvm/trunk/test/CodeGen/X86/domain-reassignment.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/domain-reassignment.mir?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/domain-reassignment.mir (original)
+++ llvm/trunk/test/CodeGen/X86/domain-reassignment.mir Mon Dec  4 09:18:51 2017
@@ -110,7 +110,7 @@ stack:
 constants:       
 body:             |
   bb.0.entry:
-    successors: %bb.1.if(0x40000000), %bb.2.else(0x40000000)
+    successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: %edi, %rsi, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5
   
     %10 = COPY %xmm5
@@ -123,11 +123,11 @@ body:             |
     %3 = COPY %edi
     %11 = COPY %3.sub_8bit
     TEST8ri killed %11, 1, implicit-def %eflags
-    JE_1 %bb.2.else, implicit %eflags
-    JMP_1 %bb.1.if
+    JE_1 %bb.2, implicit %eflags
+    JMP_1 %bb.1
   
   bb.1.if:
-    successors: %bb.3.exit(0x80000000)
+    successors: %bb.3(0x80000000)
   
     %14 = VCMPSSZrr %7, %8, 0
 
@@ -137,10 +137,10 @@ body:             |
     
     %15 = COPY %14
     %0 = COPY %15.sub_8bit
-    JMP_1 %bb.3.exit
+    JMP_1 %bb.3
   
   bb.2.else:
-    successors: %bb.3.exit(0x80000000)
+    successors: %bb.3(0x80000000)
     %12 = VCMPSSZrr %9, %10, 0
 
     ; check that cross domain copies are replaced with same domain copies.
@@ -153,11 +153,11 @@ body:             |
   bb.3.exit:
 
     ; check PHI, IMPLICIT_DEF, and INSERT_SUBREG replacers.
-    ; CHECK: %2:vk8 = PHI %1, %bb.2.else, %0, %bb.1.if
+    ; CHECK: %2:vk8 = PHI %1, %bb.2, %0, %bb.1
     ; CHECK: %16:vk32 = COPY %2
     ; CHECK: %18:vk1wm = COPY %16
   
-    %2 = PHI %1, %bb.2.else, %0, %bb.1.if
+    %2 = PHI %1, %bb.2, %0, %bb.1
     %17 = IMPLICIT_DEF
     %16 = INSERT_SUBREG %17, %2, 1
     %18 = COPY %16

Modified: llvm/trunk/test/CodeGen/X86/exedeps-movq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/exedeps-movq.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/exedeps-movq.ll (original)
+++ llvm/trunk/test/CodeGen/X86/exedeps-movq.ll Mon Dec  4 09:18:51 2017
@@ -12,13 +12,13 @@
 
 define void @store_floats(<4 x float> %x, i64* %p) {
 ; SSE-LABEL: store_floats:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addps %xmm0, %xmm0
 ; SSE-NEXT:    movlps %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: store_floats:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vmovlps %xmm0, (%rdi)
 ; AVX-NEXT:    retq
@@ -31,13 +31,13 @@ define void @store_floats(<4 x float> %x
 
 define void @store_double(<2 x double> %x, i64* %p) {
 ; SSE-LABEL: store_double:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addpd %xmm0, %xmm0
 ; SSE-NEXT:    movlpd %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: store_double:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddpd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vmovlpd %xmm0, (%rdi)
 ; AVX-NEXT:    retq
@@ -50,13 +50,13 @@ define void @store_double(<2 x double> %
 
 define void @store_int(<4 x i32> %x, <2 x float>* %p) {
 ; SSE-LABEL: store_int:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd %xmm0, %xmm0
 ; SSE-NEXT:    movq %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: store_int:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vmovq %xmm0, (%rdi)
 ; AVX-NEXT:    retq
@@ -69,13 +69,13 @@ define void @store_int(<4 x i32> %x, <2
 
 define void @store_h_double(<2 x double> %x, i64* %p) {
 ; SSE-LABEL: store_h_double:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addpd %xmm0, %xmm0
 ; SSE-NEXT:    movhpd %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: store_h_double:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddpd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vmovhpd %xmm0, (%rdi)
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/exedepsfix-broadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/exedepsfix-broadcast.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/exedepsfix-broadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/exedepsfix-broadcast.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define <4 x float> @ExeDepsFix_broadcastss(<4 x float> %arg, <4 x float> %arg2) {
 ; CHECK-LABEL: ExeDepsFix_broadcastss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; CHECK-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
@@ -21,7 +21,7 @@ define <4 x float> @ExeDepsFix_broadcast
 
 define <8 x float> @ExeDepsFix_broadcastss256(<8 x float> %arg, <8 x float> %arg2) {
 ; CHECK-LABEL: ExeDepsFix_broadcastss256:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vbroadcastss {{.*}}(%rip), %ymm2
 ; CHECK-NEXT:    vandps %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    vmaxps %ymm1, %ymm0, %ymm0
@@ -36,7 +36,7 @@ define <8 x float> @ExeDepsFix_broadcast
 
 define <4 x float> @ExeDepsFix_broadcastss_inreg(<4 x float> %arg, <4 x float> %arg2, i32 %broadcastvalue) {
 ; CHECK-LABEL: ExeDepsFix_broadcastss_inreg:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovd %edi, %xmm2
 ; CHECK-NEXT:    vpbroadcastd %xmm2, %xmm2
 ; CHECK-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -54,7 +54,7 @@ define <4 x float> @ExeDepsFix_broadcast
 
 define <8 x float> @ExeDepsFix_broadcastss256_inreg(<8 x float> %arg, <8 x float> %arg2, i32 %broadcastvalue) {
 ; CHECK-LABEL: ExeDepsFix_broadcastss256_inreg:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovd %edi, %xmm2
 ; CHECK-NEXT:    vpbroadcastd %xmm2, %ymm2
 ; CHECK-NEXT:    vpand %ymm2, %ymm0, %ymm0
@@ -73,7 +73,7 @@ define <8 x float> @ExeDepsFix_broadcast
 ; In that case the broadcast is directly folded into vandpd.
 define <2 x double> @ExeDepsFix_broadcastsd(<2 x double> %arg, <2 x double> %arg2) {
 ; CHECK-LABEL: ExeDepsFix_broadcastsd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vandpd {{.*}}(%rip), %xmm0, %xmm0
 ; CHECK-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -87,7 +87,7 @@ define <2 x double> @ExeDepsFix_broadcas
 
 define <4 x double> @ExeDepsFix_broadcastsd256(<4 x double> %arg, <4 x double> %arg2) {
 ; CHECK-LABEL: ExeDepsFix_broadcastsd256:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vbroadcastsd {{.*}}(%rip), %ymm2
 ; CHECK-NEXT:    vandpd %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0
@@ -104,7 +104,7 @@ define <4 x double> @ExeDepsFix_broadcas
 ; vpand and there is nothing more you can do to match vmaxpd.
 define <2 x double> @ExeDepsFix_broadcastsd_inreg(<2 x double> %arg, <2 x double> %arg2, i64 %broadcastvalue) {
 ; CHECK-LABEL: ExeDepsFix_broadcastsd_inreg:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovq %rdi, %xmm2
 ; CHECK-NEXT:    vpbroadcastq %xmm2, %xmm2
 ; CHECK-NEXT:    vpand %xmm2, %xmm0, %xmm0
@@ -122,7 +122,7 @@ define <2 x double> @ExeDepsFix_broadcas
 
 define <4 x double> @ExeDepsFix_broadcastsd256_inreg(<4 x double> %arg, <4 x double> %arg2, i64 %broadcastvalue) {
 ; CHECK-LABEL: ExeDepsFix_broadcastsd256_inreg:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovq %rdi, %xmm2
 ; CHECK-NEXT:    vpbroadcastq %xmm2, %ymm2
 ; CHECK-NEXT:    vpand %ymm2, %ymm0, %ymm0

Modified: llvm/trunk/test/CodeGen/X86/extract-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extract-store.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extract-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extract-store.ll Mon Dec  4 09:18:51 2017
@@ -10,42 +10,42 @@
 
 define void @extract_i8_0(i8* nocapture %dst, <16 x i8> %foo) nounwind {
 ; SSE2-X32-LABEL: extract_i8_0:
-; SSE2-X32:       # BB#0:
+; SSE2-X32:       # %bb.0:
 ; SSE2-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE2-X32-NEXT:    movd %xmm0, %ecx
 ; SSE2-X32-NEXT:    movb %cl, (%eax)
 ; SSE2-X32-NEXT:    retl
 ;
 ; SSE2-X64-LABEL: extract_i8_0:
-; SSE2-X64:       # BB#0:
+; SSE2-X64:       # %bb.0:
 ; SSE2-X64-NEXT:    movd %xmm0, %eax
 ; SSE2-X64-NEXT:    movb %al, (%rdi)
 ; SSE2-X64-NEXT:    retq
 ;
 ; SSE41-X32-LABEL: extract_i8_0:
-; SSE41-X32:       # BB#0:
+; SSE41-X32:       # %bb.0:
 ; SSE41-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE41-X32-NEXT:    pextrb $0, %xmm0, (%eax)
 ; SSE41-X32-NEXT:    retl
 ;
 ; SSE41-X64-LABEL: extract_i8_0:
-; SSE41-X64:       # BB#0:
+; SSE41-X64:       # %bb.0:
 ; SSE41-X64-NEXT:    pextrb $0, %xmm0, (%rdi)
 ; SSE41-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_i8_0:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vpextrb $0, %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_i8_0:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vpextrb $0, %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
 ;
 ; SSE-F128-LABEL: extract_i8_0:
-; SSE-F128:       # BB#0:
+; SSE-F128:       # %bb.0:
 ; SSE-F128-NEXT:    movd %xmm0, %eax
 ; SSE-F128-NEXT:    movb %al, (%rdi)
 ; SSE-F128-NEXT:    retq
@@ -56,7 +56,7 @@ define void @extract_i8_0(i8* nocapture
 
 define void @extract_i8_3(i8* nocapture %dst, <16 x i8> %foo) nounwind {
 ; SSE2-X32-LABEL: extract_i8_3:
-; SSE2-X32:       # BB#0:
+; SSE2-X32:       # %bb.0:
 ; SSE2-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE2-X32-NEXT:    movd %xmm0, %ecx
 ; SSE2-X32-NEXT:    shrl $24, %ecx
@@ -64,36 +64,36 @@ define void @extract_i8_3(i8* nocapture
 ; SSE2-X32-NEXT:    retl
 ;
 ; SSE2-X64-LABEL: extract_i8_3:
-; SSE2-X64:       # BB#0:
+; SSE2-X64:       # %bb.0:
 ; SSE2-X64-NEXT:    movd %xmm0, %eax
 ; SSE2-X64-NEXT:    shrl $24, %eax
 ; SSE2-X64-NEXT:    movb %al, (%rdi)
 ; SSE2-X64-NEXT:    retq
 ;
 ; SSE41-X32-LABEL: extract_i8_3:
-; SSE41-X32:       # BB#0:
+; SSE41-X32:       # %bb.0:
 ; SSE41-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE41-X32-NEXT:    pextrb $3, %xmm0, (%eax)
 ; SSE41-X32-NEXT:    retl
 ;
 ; SSE41-X64-LABEL: extract_i8_3:
-; SSE41-X64:       # BB#0:
+; SSE41-X64:       # %bb.0:
 ; SSE41-X64-NEXT:    pextrb $3, %xmm0, (%rdi)
 ; SSE41-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_i8_3:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vpextrb $3, %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_i8_3:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vpextrb $3, %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
 ;
 ; SSE-F128-LABEL: extract_i8_3:
-; SSE-F128:       # BB#0:
+; SSE-F128:       # %bb.0:
 ; SSE-F128-NEXT:    movd %xmm0, %eax
 ; SSE-F128-NEXT:    shrl $24, %eax
 ; SSE-F128-NEXT:    movb %al, (%rdi)
@@ -105,42 +105,42 @@ define void @extract_i8_3(i8* nocapture
 
 define void @extract_i8_15(i8* nocapture %dst, <16 x i8> %foo) nounwind {
 ; SSE2-X32-LABEL: extract_i8_15:
-; SSE2-X32:       # BB#0:
+; SSE2-X32:       # %bb.0:
 ; SSE2-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE2-X32-NEXT:    pextrw $7, %xmm0, %ecx
 ; SSE2-X32-NEXT:    movb %ch, (%eax)
 ; SSE2-X32-NEXT:    retl
 ;
 ; SSE2-X64-LABEL: extract_i8_15:
-; SSE2-X64:       # BB#0:
+; SSE2-X64:       # %bb.0:
 ; SSE2-X64-NEXT:    pextrw $7, %xmm0, %eax
 ; SSE2-X64-NEXT:    movb %ah, (%rdi) # NOREX
 ; SSE2-X64-NEXT:    retq
 ;
 ; SSE41-X32-LABEL: extract_i8_15:
-; SSE41-X32:       # BB#0:
+; SSE41-X32:       # %bb.0:
 ; SSE41-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE41-X32-NEXT:    pextrb $15, %xmm0, (%eax)
 ; SSE41-X32-NEXT:    retl
 ;
 ; SSE41-X64-LABEL: extract_i8_15:
-; SSE41-X64:       # BB#0:
+; SSE41-X64:       # %bb.0:
 ; SSE41-X64-NEXT:    pextrb $15, %xmm0, (%rdi)
 ; SSE41-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_i8_15:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vpextrb $15, %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_i8_15:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vpextrb $15, %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
 ;
 ; SSE-F128-LABEL: extract_i8_15:
-; SSE-F128:       # BB#0:
+; SSE-F128:       # %bb.0:
 ; SSE-F128-NEXT:    pextrw $7, %xmm0, %eax
 ; SSE-F128-NEXT:    movb %ah, (%rdi) # NOREX
 ; SSE-F128-NEXT:    retq
@@ -151,42 +151,42 @@ define void @extract_i8_15(i8* nocapture
 
 define void @extract_i16_0(i16* nocapture %dst, <8 x i16> %foo) nounwind {
 ; SSE2-X32-LABEL: extract_i16_0:
-; SSE2-X32:       # BB#0:
+; SSE2-X32:       # %bb.0:
 ; SSE2-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE2-X32-NEXT:    movd %xmm0, %ecx
 ; SSE2-X32-NEXT:    movw %cx, (%eax)
 ; SSE2-X32-NEXT:    retl
 ;
 ; SSE2-X64-LABEL: extract_i16_0:
-; SSE2-X64:       # BB#0:
+; SSE2-X64:       # %bb.0:
 ; SSE2-X64-NEXT:    movd %xmm0, %eax
 ; SSE2-X64-NEXT:    movw %ax, (%rdi)
 ; SSE2-X64-NEXT:    retq
 ;
 ; SSE41-X32-LABEL: extract_i16_0:
-; SSE41-X32:       # BB#0:
+; SSE41-X32:       # %bb.0:
 ; SSE41-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE41-X32-NEXT:    pextrw $0, %xmm0, (%eax)
 ; SSE41-X32-NEXT:    retl
 ;
 ; SSE41-X64-LABEL: extract_i16_0:
-; SSE41-X64:       # BB#0:
+; SSE41-X64:       # %bb.0:
 ; SSE41-X64-NEXT:    pextrw $0, %xmm0, (%rdi)
 ; SSE41-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_i16_0:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vpextrw $0, %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_i16_0:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vpextrw $0, %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
 ;
 ; SSE-F128-LABEL: extract_i16_0:
-; SSE-F128:       # BB#0:
+; SSE-F128:       # %bb.0:
 ; SSE-F128-NEXT:    movd %xmm0, %eax
 ; SSE-F128-NEXT:    movw %ax, (%rdi)
 ; SSE-F128-NEXT:    retq
@@ -197,42 +197,42 @@ define void @extract_i16_0(i16* nocaptur
 
 define void @extract_i16_7(i16* nocapture %dst, <8 x i16> %foo) nounwind {
 ; SSE2-X32-LABEL: extract_i16_7:
-; SSE2-X32:       # BB#0:
+; SSE2-X32:       # %bb.0:
 ; SSE2-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE2-X32-NEXT:    pextrw $7, %xmm0, %ecx
 ; SSE2-X32-NEXT:    movw %cx, (%eax)
 ; SSE2-X32-NEXT:    retl
 ;
 ; SSE2-X64-LABEL: extract_i16_7:
-; SSE2-X64:       # BB#0:
+; SSE2-X64:       # %bb.0:
 ; SSE2-X64-NEXT:    pextrw $7, %xmm0, %eax
 ; SSE2-X64-NEXT:    movw %ax, (%rdi)
 ; SSE2-X64-NEXT:    retq
 ;
 ; SSE41-X32-LABEL: extract_i16_7:
-; SSE41-X32:       # BB#0:
+; SSE41-X32:       # %bb.0:
 ; SSE41-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE41-X32-NEXT:    pextrw $7, %xmm0, (%eax)
 ; SSE41-X32-NEXT:    retl
 ;
 ; SSE41-X64-LABEL: extract_i16_7:
-; SSE41-X64:       # BB#0:
+; SSE41-X64:       # %bb.0:
 ; SSE41-X64-NEXT:    pextrw $7, %xmm0, (%rdi)
 ; SSE41-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_i16_7:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vpextrw $7, %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_i16_7:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vpextrw $7, %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
 ;
 ; SSE-F128-LABEL: extract_i16_7:
-; SSE-F128:       # BB#0:
+; SSE-F128:       # %bb.0:
 ; SSE-F128-NEXT:    pextrw $7, %xmm0, %eax
 ; SSE-F128-NEXT:    movw %ax, (%rdi)
 ; SSE-F128-NEXT:    retq
@@ -243,24 +243,24 @@ define void @extract_i16_7(i16* nocaptur
 
 define void @extract_i32_0(i32* nocapture %dst, <4 x i32> %foo) nounwind {
 ; SSE-X32-LABEL: extract_i32_0:
-; SSE-X32:       # BB#0:
+; SSE-X32:       # %bb.0:
 ; SSE-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE-X32-NEXT:    movss %xmm0, (%eax)
 ; SSE-X32-NEXT:    retl
 ;
 ; SSE-X64-LABEL: extract_i32_0:
-; SSE-X64:       # BB#0:
+; SSE-X64:       # %bb.0:
 ; SSE-X64-NEXT:    movss %xmm0, (%rdi)
 ; SSE-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_i32_0:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vmovss %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_i32_0:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vmovss %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
   %vecext = extractelement <4 x i32> %foo, i32 0
@@ -270,42 +270,42 @@ define void @extract_i32_0(i32* nocaptur
 
 define void @extract_i32_3(i32* nocapture %dst, <4 x i32> %foo) nounwind {
 ; SSE2-X32-LABEL: extract_i32_3:
-; SSE2-X32:       # BB#0:
+; SSE2-X32:       # %bb.0:
 ; SSE2-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE2-X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE2-X32-NEXT:    movd %xmm0, (%eax)
 ; SSE2-X32-NEXT:    retl
 ;
 ; SSE2-X64-LABEL: extract_i32_3:
-; SSE2-X64:       # BB#0:
+; SSE2-X64:       # %bb.0:
 ; SSE2-X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE2-X64-NEXT:    movd %xmm0, (%rdi)
 ; SSE2-X64-NEXT:    retq
 ;
 ; SSE41-X32-LABEL: extract_i32_3:
-; SSE41-X32:       # BB#0:
+; SSE41-X32:       # %bb.0:
 ; SSE41-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE41-X32-NEXT:    extractps $3, %xmm0, (%eax)
 ; SSE41-X32-NEXT:    retl
 ;
 ; SSE41-X64-LABEL: extract_i32_3:
-; SSE41-X64:       # BB#0:
+; SSE41-X64:       # %bb.0:
 ; SSE41-X64-NEXT:    extractps $3, %xmm0, (%rdi)
 ; SSE41-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_i32_3:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vextractps $3, %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_i32_3:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vextractps $3, %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
 ;
 ; SSE-F128-LABEL: extract_i32_3:
-; SSE-F128:       # BB#0:
+; SSE-F128:       # %bb.0:
 ; SSE-F128-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE-F128-NEXT:    movd %xmm0, (%rdi)
 ; SSE-F128-NEXT:    retq
@@ -316,24 +316,24 @@ define void @extract_i32_3(i32* nocaptur
 
 define void @extract_i64_0(i64* nocapture %dst, <2 x i64> %foo) nounwind {
 ; SSE-X32-LABEL: extract_i64_0:
-; SSE-X32:       # BB#0:
+; SSE-X32:       # %bb.0:
 ; SSE-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE-X32-NEXT:    movlps %xmm0, (%eax)
 ; SSE-X32-NEXT:    retl
 ;
 ; SSE-X64-LABEL: extract_i64_0:
-; SSE-X64:       # BB#0:
+; SSE-X64:       # %bb.0:
 ; SSE-X64-NEXT:    movlps %xmm0, (%rdi)
 ; SSE-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_i64_0:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vmovlps %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_i64_0:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vmovlps %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
   %vecext = extractelement <2 x i64> %foo, i32 0
@@ -343,37 +343,37 @@ define void @extract_i64_0(i64* nocaptur
 
 define void @extract_i64_1(i64* nocapture %dst, <2 x i64> %foo) nounwind {
 ; SSE-X32-LABEL: extract_i64_1:
-; SSE-X32:       # BB#0:
+; SSE-X32:       # %bb.0:
 ; SSE-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE-X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; SSE-X32-NEXT:    movq %xmm0, (%eax)
 ; SSE-X32-NEXT:    retl
 ;
 ; SSE2-X64-LABEL: extract_i64_1:
-; SSE2-X64:       # BB#0:
+; SSE2-X64:       # %bb.0:
 ; SSE2-X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-X64-NEXT:    movq %xmm0, (%rdi)
 ; SSE2-X64-NEXT:    retq
 ;
 ; SSE41-X64-LABEL: extract_i64_1:
-; SSE41-X64:       # BB#0:
+; SSE41-X64:       # %bb.0:
 ; SSE41-X64-NEXT:    pextrq $1, %xmm0, (%rdi)
 ; SSE41-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_i64_1:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; AVX-X32-NEXT:    vmovlps %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_i64_1:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vpextrq $1, %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
 ;
 ; SSE-F128-LABEL: extract_i64_1:
-; SSE-F128:       # BB#0:
+; SSE-F128:       # %bb.0:
 ; SSE-F128-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE-F128-NEXT:    movq %xmm0, (%rdi)
 ; SSE-F128-NEXT:    retq
@@ -384,24 +384,24 @@ define void @extract_i64_1(i64* nocaptur
 
 define void @extract_f32_0(float* nocapture %dst, <4 x float> %foo) nounwind {
 ; SSE-X32-LABEL: extract_f32_0:
-; SSE-X32:       # BB#0:
+; SSE-X32:       # %bb.0:
 ; SSE-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE-X32-NEXT:    movss %xmm0, (%eax)
 ; SSE-X32-NEXT:    retl
 ;
 ; SSE-X64-LABEL: extract_f32_0:
-; SSE-X64:       # BB#0:
+; SSE-X64:       # %bb.0:
 ; SSE-X64-NEXT:    movss %xmm0, (%rdi)
 ; SSE-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_f32_0:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vmovss %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_f32_0:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vmovss %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
   %vecext = extractelement <4 x float> %foo, i32 0
@@ -411,42 +411,42 @@ define void @extract_f32_0(float* nocapt
 
 define void @extract_f32_3(float* nocapture %dst, <4 x float> %foo) nounwind {
 ; SSE2-X32-LABEL: extract_f32_3:
-; SSE2-X32:       # BB#0:
+; SSE2-X32:       # %bb.0:
 ; SSE2-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE2-X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE2-X32-NEXT:    movss %xmm0, (%eax)
 ; SSE2-X32-NEXT:    retl
 ;
 ; SSE2-X64-LABEL: extract_f32_3:
-; SSE2-X64:       # BB#0:
+; SSE2-X64:       # %bb.0:
 ; SSE2-X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE2-X64-NEXT:    movss %xmm0, (%rdi)
 ; SSE2-X64-NEXT:    retq
 ;
 ; SSE41-X32-LABEL: extract_f32_3:
-; SSE41-X32:       # BB#0:
+; SSE41-X32:       # %bb.0:
 ; SSE41-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE41-X32-NEXT:    extractps $3, %xmm0, (%eax)
 ; SSE41-X32-NEXT:    retl
 ;
 ; SSE41-X64-LABEL: extract_f32_3:
-; SSE41-X64:       # BB#0:
+; SSE41-X64:       # %bb.0:
 ; SSE41-X64-NEXT:    extractps $3, %xmm0, (%rdi)
 ; SSE41-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_f32_3:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vextractps $3, %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_f32_3:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vextractps $3, %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
 ;
 ; SSE-F128-LABEL: extract_f32_3:
-; SSE-F128:       # BB#0:
+; SSE-F128:       # %bb.0:
 ; SSE-F128-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE-F128-NEXT:    movss %xmm0, (%rdi)
 ; SSE-F128-NEXT:    retq
@@ -457,24 +457,24 @@ define void @extract_f32_3(float* nocapt
 
 define void @extract_f64_0(double* nocapture %dst, <2 x double> %foo) nounwind {
 ; SSE-X32-LABEL: extract_f64_0:
-; SSE-X32:       # BB#0:
+; SSE-X32:       # %bb.0:
 ; SSE-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE-X32-NEXT:    movlps %xmm0, (%eax)
 ; SSE-X32-NEXT:    retl
 ;
 ; SSE-X64-LABEL: extract_f64_0:
-; SSE-X64:       # BB#0:
+; SSE-X64:       # %bb.0:
 ; SSE-X64-NEXT:    movlps %xmm0, (%rdi)
 ; SSE-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_f64_0:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vmovlps %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_f64_0:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vmovlps %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
   %vecext = extractelement <2 x double> %foo, i32 0
@@ -484,24 +484,24 @@ define void @extract_f64_0(double* nocap
 
 define void @extract_f64_1(double* nocapture %dst, <2 x double> %foo) nounwind {
 ; SSE-X32-LABEL: extract_f64_1:
-; SSE-X32:       # BB#0:
+; SSE-X32:       # %bb.0:
 ; SSE-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE-X32-NEXT:    movhpd %xmm0, (%eax)
 ; SSE-X32-NEXT:    retl
 ;
 ; SSE-X64-LABEL: extract_f64_1:
-; SSE-X64:       # BB#0:
+; SSE-X64:       # %bb.0:
 ; SSE-X64-NEXT:    movhpd %xmm0, (%rdi)
 ; SSE-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_f64_1:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vmovhpd %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_f64_1:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vmovhpd %xmm0, (%rdi)
 ; AVX-X64-NEXT:    retq
   %vecext = extractelement <2 x double> %foo, i32 1
@@ -511,7 +511,7 @@ define void @extract_f64_1(double* nocap
 
 define void @extract_f128_0(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
 ; SSE-X32-LABEL: extract_f128_0:
-; SSE-X32:       # BB#0:
+; SSE-X32:       # %bb.0:
 ; SSE-X32-NEXT:    pushl %edi
 ; SSE-X32-NEXT:    pushl %esi
 ; SSE-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -528,32 +528,32 @@ define void @extract_f128_0(fp128* nocap
 ; SSE-X32-NEXT:    retl
 ;
 ; SSE2-X64-LABEL: extract_f128_0:
-; SSE2-X64:       # BB#0:
+; SSE2-X64:       # %bb.0:
 ; SSE2-X64-NEXT:    movq %rdx, 8(%rdi)
 ; SSE2-X64-NEXT:    movq %rsi, (%rdi)
 ; SSE2-X64-NEXT:    retq
 ;
 ; SSE41-X64-LABEL: extract_f128_0:
-; SSE41-X64:       # BB#0:
+; SSE41-X64:       # %bb.0:
 ; SSE41-X64-NEXT:    movq %rdx, 8(%rdi)
 ; SSE41-X64-NEXT:    movq %rsi, (%rdi)
 ; SSE41-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_f128_0:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    vmovups {{[0-9]+}}(%esp), %xmm0
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vmovups %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_f128_0:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    movq %rdx, 8(%rdi)
 ; AVX-X64-NEXT:    movq %rsi, (%rdi)
 ; AVX-X64-NEXT:    retq
 ;
 ; SSE-F128-LABEL: extract_f128_0:
-; SSE-F128:       # BB#0:
+; SSE-F128:       # %bb.0:
 ; SSE-F128-NEXT:    movaps %xmm0, (%rdi)
 ; SSE-F128-NEXT:    retq
   %vecext = extractelement <2 x fp128> %foo, i32 0
@@ -563,7 +563,7 @@ define void @extract_f128_0(fp128* nocap
 
 define void @extract_f128_1(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
 ; SSE-X32-LABEL: extract_f128_1:
-; SSE-X32:       # BB#0:
+; SSE-X32:       # %bb.0:
 ; SSE-X32-NEXT:    pushl %edi
 ; SSE-X32-NEXT:    pushl %esi
 ; SSE-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -580,32 +580,32 @@ define void @extract_f128_1(fp128* nocap
 ; SSE-X32-NEXT:    retl
 ;
 ; SSE2-X64-LABEL: extract_f128_1:
-; SSE2-X64:       # BB#0:
+; SSE2-X64:       # %bb.0:
 ; SSE2-X64-NEXT:    movq %r8, 8(%rdi)
 ; SSE2-X64-NEXT:    movq %rcx, (%rdi)
 ; SSE2-X64-NEXT:    retq
 ;
 ; SSE41-X64-LABEL: extract_f128_1:
-; SSE41-X64:       # BB#0:
+; SSE41-X64:       # %bb.0:
 ; SSE41-X64-NEXT:    movq %r8, 8(%rdi)
 ; SSE41-X64-NEXT:    movq %rcx, (%rdi)
 ; SSE41-X64-NEXT:    retq
 ;
 ; AVX-X32-LABEL: extract_f128_1:
-; AVX-X32:       # BB#0:
+; AVX-X32:       # %bb.0:
 ; AVX-X32-NEXT:    vmovups {{[0-9]+}}(%esp), %xmm0
 ; AVX-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-X32-NEXT:    vmovups %xmm0, (%eax)
 ; AVX-X32-NEXT:    retl
 ;
 ; AVX-X64-LABEL: extract_f128_1:
-; AVX-X64:       # BB#0:
+; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    movq %r8, 8(%rdi)
 ; AVX-X64-NEXT:    movq %rcx, (%rdi)
 ; AVX-X64-NEXT:    retq
 ;
 ; SSE-F128-LABEL: extract_f128_1:
-; SSE-F128:       # BB#0:
+; SSE-F128:       # %bb.0:
 ; SSE-F128-NEXT:    movaps %xmm1, (%rdi)
 ; SSE-F128-NEXT:    retq
   %vecext = extractelement <2 x fp128> %foo, i32 1
@@ -615,11 +615,11 @@ define void @extract_f128_1(fp128* nocap
 
 define void @extract_i8_undef(i8* nocapture %dst, <16 x i8> %foo) nounwind {
 ; X32-LABEL: extract_i8_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: extract_i8_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %vecext = extractelement <16 x i8> %foo, i32 16 ; undef
   store i8 %vecext, i8* %dst, align 1
@@ -628,11 +628,11 @@ define void @extract_i8_undef(i8* nocapt
 
 define void @extract_i16_undef(i16* nocapture %dst, <8 x i16> %foo) nounwind {
 ; X32-LABEL: extract_i16_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: extract_i16_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %vecext = extractelement <8 x i16> %foo, i32 9 ; undef
   store i16 %vecext, i16* %dst, align 1
@@ -641,11 +641,11 @@ define void @extract_i16_undef(i16* noca
 
 define void @extract_i32_undef(i32* nocapture %dst, <4 x i32> %foo) nounwind {
 ; X32-LABEL: extract_i32_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: extract_i32_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %vecext = extractelement <4 x i32> %foo, i32 6 ; undef
   store i32 %vecext, i32* %dst, align 1
@@ -654,11 +654,11 @@ define void @extract_i32_undef(i32* noca
 
 define void @extract_i64_undef(i64* nocapture %dst, <2 x i64> %foo) nounwind {
 ; X32-LABEL: extract_i64_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: extract_i64_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %vecext = extractelement <2 x i64> %foo, i32 2 ; undef
   store i64 %vecext, i64* %dst, align 1
@@ -667,11 +667,11 @@ define void @extract_i64_undef(i64* noca
 
 define void @extract_f32_undef(float* nocapture %dst, <4 x float> %foo) nounwind {
 ; X32-LABEL: extract_f32_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: extract_f32_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %vecext = extractelement <4 x float> %foo, i32 6 ; undef
   store float %vecext, float* %dst, align 1
@@ -680,11 +680,11 @@ define void @extract_f32_undef(float* no
 
 define void @extract_f64_undef(double* nocapture %dst, <2 x double> %foo) nounwind {
 ; X32-LABEL: extract_f64_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: extract_f64_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %vecext = extractelement <2 x double> %foo, i32 2 ; undef
   store double %vecext, double* %dst, align 1
@@ -693,11 +693,11 @@ define void @extract_f64_undef(double* n
 
 define void @extract_f128_undef(fp128* nocapture %dst, <2 x fp128> %foo) nounwind {
 ; X32-LABEL: extract_f128_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: extract_f128_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %vecext = extractelement <2 x fp128> %foo, i32 2 ; undef
   store fp128 %vecext, fp128* %dst, align 1

Modified: llvm/trunk/test/CodeGen/X86/extractelement-index.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extractelement-index.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extractelement-index.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extractelement-index.ll Mon Dec  4 09:18:51 2017
@@ -10,20 +10,20 @@
 
 define i8 @extractelement_v16i8_1(<16 x i8> %a) nounwind {
 ; SSE2-LABEL: extractelement_v16i8_1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %xmm0, %eax
 ; SSE2-NEXT:    shrl $8, %eax
 ; SSE2-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extractelement_v16i8_1:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrb $1, %xmm0, %eax
 ; SSE41-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v16i8_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpextrb $1, %xmm0, %eax
 ; AVX-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; AVX-NEXT:    retq
@@ -33,20 +33,20 @@ define i8 @extractelement_v16i8_1(<16 x
 
 define i8 @extractelement_v16i8_11(<16 x i8> %a) nounwind {
 ; SSE2-LABEL: extractelement_v16i8_11:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pextrw $5, %xmm0, %eax
 ; SSE2-NEXT:    shrl $8, %eax
 ; SSE2-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extractelement_v16i8_11:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrb $11, %xmm0, %eax
 ; SSE41-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v16i8_11:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpextrb $11, %xmm0, %eax
 ; AVX-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; AVX-NEXT:    retq
@@ -56,19 +56,19 @@ define i8 @extractelement_v16i8_11(<16 x
 
 define i8 @extractelement_v16i8_14(<16 x i8> %a) nounwind {
 ; SSE2-LABEL: extractelement_v16i8_14:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pextrw $7, %xmm0, %eax
 ; SSE2-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extractelement_v16i8_14:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrb $14, %xmm0, %eax
 ; SSE41-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v16i8_14:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpextrb $14, %xmm0, %eax
 ; AVX-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; AVX-NEXT:    retq
@@ -78,20 +78,20 @@ define i8 @extractelement_v16i8_14(<16 x
 
 define i8 @extractelement_v32i8_1(<32 x i8> %a) nounwind {
 ; SSE2-LABEL: extractelement_v32i8_1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %xmm0, %eax
 ; SSE2-NEXT:    shrl $8, %eax
 ; SSE2-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extractelement_v32i8_1:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrb $1, %xmm0, %eax
 ; SSE41-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v32i8_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpextrb $1, %xmm0, %eax
 ; AVX-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; AVX-NEXT:    vzeroupper
@@ -102,20 +102,20 @@ define i8 @extractelement_v32i8_1(<32 x
 
 define i8 @extractelement_v32i8_17(<32 x i8> %a) nounwind {
 ; SSE2-LABEL: extractelement_v32i8_17:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %xmm1, %eax
 ; SSE2-NEXT:    shrl $8, %eax
 ; SSE2-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extractelement_v32i8_17:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrb $1, %xmm1, %eax
 ; SSE41-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: extractelement_v32i8_17:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpextrb $1, %xmm0, %eax
 ; AVX1-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
@@ -123,7 +123,7 @@ define i8 @extractelement_v32i8_17(<32 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: extractelement_v32i8_17:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vpextrb $1, %xmm0, %eax
 ; AVX2-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
@@ -135,13 +135,13 @@ define i8 @extractelement_v32i8_17(<32 x
 
 define i16 @extractelement_v8i16_0(<8 x i16> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v8i16_0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %xmm0, %eax
 ; SSE-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v8i16_0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; AVX-NEXT:    retq
@@ -151,13 +151,13 @@ define i16 @extractelement_v8i16_0(<8 x
 
 define i16 @extractelement_v8i16_3(<8 x i16> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v8i16_3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pextrw $3, %xmm0, %eax
 ; SSE-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v8i16_3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpextrw $3, %xmm0, %eax
 ; AVX-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; AVX-NEXT:    retq
@@ -167,13 +167,13 @@ define i16 @extractelement_v8i16_3(<8 x
 
 define i16 @extractelement_v16i16_0(<16 x i16> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v16i16_0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %xmm0, %eax
 ; SSE-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v16i16_0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; AVX-NEXT:    vzeroupper
@@ -184,13 +184,13 @@ define i16 @extractelement_v16i16_0(<16
 
 define i16 @extractelement_v16i16_13(<16 x i16> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v16i16_13:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pextrw $5, %xmm1, %eax
 ; SSE-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: extractelement_v16i16_13:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpextrw $5, %xmm0, %eax
 ; AVX1-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
@@ -198,7 +198,7 @@ define i16 @extractelement_v16i16_13(<16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: extractelement_v16i16_13:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vpextrw $5, %xmm0, %eax
 ; AVX2-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
@@ -210,12 +210,12 @@ define i16 @extractelement_v16i16_13(<16
 
 define i32 @extractelement_v4i32_0(<4 x i32> %a) nounwind {
 ; SSE-LABEL: extractelement_v4i32_0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %xmm0, %eax
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v4i32_0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    retq
   %b = extractelement <4 x i32> %a, i256 0
@@ -224,18 +224,18 @@ define i32 @extractelement_v4i32_0(<4 x
 
 define i32 @extractelement_v4i32_3(<4 x i32> %a) nounwind {
 ; SSE2-LABEL: extractelement_v4i32_3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE2-NEXT:    movd %xmm0, %eax
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extractelement_v4i32_3:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    extractps $3, %xmm0, %eax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v4i32_3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractps $3, %xmm0, %eax
 ; AVX-NEXT:    retq
   %b = extractelement <4 x i32> %a, i256 3
@@ -244,19 +244,19 @@ define i32 @extractelement_v4i32_3(<4 x
 
 define i32 @extractelement_v8i32_0(<8 x i32> %a) nounwind {
 ; SSE-LABEL: extractelement_v8i32_0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %xmm1, %eax
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: extractelement_v8i32_0:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: extractelement_v8i32_0:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
 ; AVX2-NEXT:    vzeroupper
@@ -267,19 +267,19 @@ define i32 @extractelement_v8i32_0(<8 x
 
 define i32 @extractelement_v8i32_4(<8 x i32> %a) nounwind {
 ; SSE-LABEL: extractelement_v8i32_4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %xmm1, %eax
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: extractelement_v8i32_4:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: extractelement_v8i32_4:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
 ; AVX2-NEXT:    vzeroupper
@@ -290,18 +290,18 @@ define i32 @extractelement_v8i32_4(<8 x
 
 define i32 @extractelement_v8i32_7(<8 x i32> %a) nounwind {
 ; SSE2-LABEL: extractelement_v8i32_7:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
 ; SSE2-NEXT:    movd %xmm0, %eax
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extractelement_v8i32_7:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    extractps $3, %xmm1, %eax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v8i32_7:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX-NEXT:    vextractps $3, %xmm0, %eax
 ; AVX-NEXT:    vzeroupper
@@ -312,12 +312,12 @@ define i32 @extractelement_v8i32_7(<8 x
 
 define i64 @extractelement_v2i64_0(<2 x i64> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v2i64_0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movq %xmm0, %rax
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v2i64_0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovq %xmm0, %rax
 ; AVX-NEXT:    retq
   %b = extractelement <2 x i64> %a, i256 0
@@ -326,18 +326,18 @@ define i64 @extractelement_v2i64_0(<2 x
 
 define i64 @extractelement_v2i64_1(<2 x i64> %a, i256 %i) nounwind {
 ; SSE2-LABEL: extractelement_v2i64_1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extractelement_v2i64_1:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v2i64_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpextrq $1, %xmm0, %rax
 ; AVX-NEXT:    retq
   %b = extractelement <2 x i64> %a, i256 1
@@ -346,18 +346,18 @@ define i64 @extractelement_v2i64_1(<2 x
 
 define i64 @extractelement_v4i64_1(<4 x i64> %a, i256 %i) nounwind {
 ; SSE2-LABEL: extractelement_v4i64_1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extractelement_v4i64_1:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v4i64_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpextrq $1, %xmm0, %rax
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
@@ -367,25 +367,25 @@ define i64 @extractelement_v4i64_1(<4 x
 
 define i64 @extractelement_v4i64_3(<4 x i64> %a, i256 %i) nounwind {
 ; SSE2-LABEL: extractelement_v4i64_3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extractelement_v4i64_3:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: extractelement_v4i64_3:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: extractelement_v4i64_3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
 ; AVX2-NEXT:    vzeroupper
@@ -400,7 +400,7 @@ define i64 @extractelement_v4i64_3(<4 x
 
 define i8 @extractelement_v16i8_var(<16 x i8> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v16i8_var:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andl $15, %edi
 ; SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
@@ -408,7 +408,7 @@ define i8 @extractelement_v16i8_var(<16
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v16i8_var:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    andl $15, %edi
 ; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
@@ -420,7 +420,7 @@ define i8 @extractelement_v16i8_var(<16
 
 define i8 @extractelement_v32i8_var(<32 x i8> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v32i8_var:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pushq %rbp
 ; SSE-NEXT:    movq %rsp, %rbp
 ; SSE-NEXT:    andq $-32, %rsp
@@ -435,7 +435,7 @@ define i8 @extractelement_v32i8_var(<32
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v32i8_var:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    pushq %rbp
 ; AVX-NEXT:    movq %rsp, %rbp
 ; AVX-NEXT:    andq $-32, %rsp
@@ -454,14 +454,14 @@ define i8 @extractelement_v32i8_var(<32
 
 define i16 @extractelement_v8i16_var(<8 x i16> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v8i16_var:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andl $7, %edi
 ; SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE-NEXT:    movzwl -24(%rsp,%rdi,2), %eax
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v8i16_var:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    andl $7, %edi
 ; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX-NEXT:    movzwl -24(%rsp,%rdi,2), %eax
@@ -472,7 +472,7 @@ define i16 @extractelement_v8i16_var(<8
 
 define i16 @extractelement_v16i16_var(<16 x i16> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v16i16_var:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pushq %rbp
 ; SSE-NEXT:    movq %rsp, %rbp
 ; SSE-NEXT:    andq $-32, %rsp
@@ -486,7 +486,7 @@ define i16 @extractelement_v16i16_var(<1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v16i16_var:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    pushq %rbp
 ; AVX-NEXT:    movq %rsp, %rbp
 ; AVX-NEXT:    andq $-32, %rsp
@@ -504,14 +504,14 @@ define i16 @extractelement_v16i16_var(<1
 
 define i32 @extractelement_v4i32_var(<4 x i32> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v4i32_var:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andl $3, %edi
 ; SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE-NEXT:    movl -24(%rsp,%rdi,4), %eax
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v4i32_var:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    andl $3, %edi
 ; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX-NEXT:    movl -24(%rsp,%rdi,4), %eax
@@ -522,7 +522,7 @@ define i32 @extractelement_v4i32_var(<4
 
 define i32 @extractelement_v8i32_var(<8 x i32> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v8i32_var:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pushq %rbp
 ; SSE-NEXT:    movq %rsp, %rbp
 ; SSE-NEXT:    andq $-32, %rsp
@@ -536,7 +536,7 @@ define i32 @extractelement_v8i32_var(<8
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v8i32_var:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    pushq %rbp
 ; AVX-NEXT:    movq %rsp, %rbp
 ; AVX-NEXT:    andq $-32, %rsp
@@ -554,14 +554,14 @@ define i32 @extractelement_v8i32_var(<8
 
 define i64 @extractelement_v2i64_var(<2 x i64> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v2i64_var:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andl $1, %edi
 ; SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE-NEXT:    movq -24(%rsp,%rdi,8), %rax
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v2i64_var:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    andl $1, %edi
 ; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX-NEXT:    movq -24(%rsp,%rdi,8), %rax
@@ -572,7 +572,7 @@ define i64 @extractelement_v2i64_var(<2
 
 define i64 @extractelement_v4i64_var(<4 x i64> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v4i64_var:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pushq %rbp
 ; SSE-NEXT:    movq %rsp, %rbp
 ; SSE-NEXT:    andq $-32, %rsp
@@ -586,7 +586,7 @@ define i64 @extractelement_v4i64_var(<4
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v4i64_var:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    pushq %rbp
 ; AVX-NEXT:    movq %rsp, %rbp
 ; AVX-NEXT:    andq $-32, %rsp
@@ -608,11 +608,11 @@ define i64 @extractelement_v4i64_var(<4
 
 define i8 @extractelement_32i8_m1(<32 x i8> %a) nounwind {
 ; SSE-LABEL: extractelement_32i8_m1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_32i8_m1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %b = extractelement <32 x i8> %a, i256 -1
   ret i8 %b
@@ -620,11 +620,11 @@ define i8 @extractelement_32i8_m1(<32 x
 
 define i16 @extractelement_v16i16_m4(<16 x i16> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v16i16_m4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v16i16_m4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %b = extractelement <16 x i16> %a, i256 -4
   ret i16 %b
@@ -632,11 +632,11 @@ define i16 @extractelement_v16i16_m4(<16
 
 define i32 @extractelement_v8i32_15(<8 x i32> %a) nounwind {
 ; SSE-LABEL: extractelement_v8i32_15:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v8i32_15:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %b = extractelement <8 x i32> %a, i64 15
   ret i32 %b
@@ -644,11 +644,11 @@ define i32 @extractelement_v8i32_15(<8 x
 
 define i64 @extractelement_v4i64_4(<4 x i64> %a, i256 %i) nounwind {
 ; SSE-LABEL: extractelement_v4i64_4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extractelement_v4i64_4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    retq
   %b = extractelement <4 x i64> %a, i256 4
   ret i64 %b

Modified: llvm/trunk/test/CodeGen/X86/extractelement-legalization-store-ordering.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extractelement-legalization-store-ordering.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extractelement-legalization-store-ordering.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extractelement-legalization-store-ordering.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@ target datalayout = "e-m:o-p:32:32-f64:3
 
 define void @test_extractelement_legalization_storereuse(<4 x i32> %a, i32* nocapture %x, i32* nocapture readonly %y, i32 %i) #0 {
 ; CHECK-LABEL: test_extractelement_legalization_storereuse:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    pushl %ebx
 ; CHECK-NEXT:    pushl %edi
 ; CHECK-NEXT:    pushl %esi

Modified: llvm/trunk/test/CodeGen/X86/extractelement-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extractelement-load.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extractelement-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extractelement-load.ll Mon Dec  4 09:18:51 2017
@@ -7,18 +7,18 @@ target datalayout = "e-m:e-i64:64-f80:12
 
 define i32 @t(<2 x i64>* %val) nounwind  {
 ; X32-SSE2-LABEL: t:
-; X32-SSE2:       # BB#0:
+; X32-SSE2:       # %bb.0:
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE2-NEXT:    movl 8(%eax), %eax
 ; X32-SSE2-NEXT:    retl
 ;
 ; X64-SSSE3-LABEL: t:
-; X64-SSSE3:       # BB#0:
+; X64-SSSE3:       # %bb.0:
 ; X64-SSSE3-NEXT:    movl 8(%rdi), %eax
 ; X64-SSSE3-NEXT:    retq
 ;
 ; X64-AVX-LABEL: t:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    movl 8(%rdi), %eax
 ; X64-AVX-NEXT:    retq
   %tmp2 = load <2 x i64>, <2 x i64>* %val, align 16		; <<2 x i64>> [#uses=1]
@@ -31,15 +31,15 @@ define i32 @t(<2 x i64>* %val) nounwind
 ; (Making sure this doesn't crash.)
 define i32 @t2(<8 x i32>* %xp) {
 ; X32-SSE2-LABEL: t2:
-; X32-SSE2:       # BB#0:
+; X32-SSE2:       # %bb.0:
 ; X32-SSE2-NEXT:    retl
 ;
 ; X64-SSSE3-LABEL: t2:
-; X64-SSSE3:       # BB#0:
+; X64-SSSE3:       # %bb.0:
 ; X64-SSSE3-NEXT:    retq
 ;
 ; X64-AVX-LABEL: t2:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    retq
   %x = load <8 x i32>, <8 x i32>* %xp
   %Shuff68 = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32 undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3>
@@ -57,17 +57,17 @@ define i32 @t2(<8 x i32>* %xp) {
 
 define void @t3() {
 ; X32-SSE2-LABEL: t3:
-; X32-SSE2:       # BB#0: # %bb
+; X32-SSE2:       # %bb.0: # %bb
 ; X32-SSE2-NEXT:    movupd (%eax), %xmm0
 ; X32-SSE2-NEXT:    movhpd %xmm0, (%eax)
 ;
 ; X64-SSSE3-LABEL: t3:
-; X64-SSSE3:       # BB#0: # %bb
+; X64-SSSE3:       # %bb.0: # %bb
 ; X64-SSSE3-NEXT:    movddup {{.*#+}} xmm0 = mem[0,0]
 ; X64-SSSE3-NEXT:    movlpd %xmm0, (%rax)
 ;
 ; X64-AVX-LABEL: t3:
-; X64-AVX:       # BB#0: # %bb
+; X64-AVX:       # %bb.0: # %bb
 ; X64-AVX-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
 ; X64-AVX-NEXT:    vmovlpd %xmm0, (%rax)
 bb:
@@ -83,7 +83,7 @@ bb:
 ; second shuffle operand was a post-bitcast type instead of a pre-bitcast type.
 define i64 @t4(<2 x double>* %a) {
 ; X32-SSE2-LABEL: t4:
-; X32-SSE2:       # BB#0:
+; X32-SSE2:       # %bb.0:
 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE2-NEXT:    movapd (%eax), %xmm0
 ; X32-SSE2-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -94,12 +94,12 @@ define i64 @t4(<2 x double>* %a) {
 ; X32-SSE2-NEXT:    retl
 ;
 ; X64-SSSE3-LABEL: t4:
-; X64-SSSE3:       # BB#0:
+; X64-SSSE3:       # %bb.0:
 ; X64-SSSE3-NEXT:    movq (%rdi), %rax
 ; X64-SSSE3-NEXT:    retq
 ;
 ; X64-AVX-LABEL: t4:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    movq (%rdi), %rax
 ; X64-AVX-NEXT:    retq
   %b = load <2 x double>, <2 x double>* %a, align 16

Modified: llvm/trunk/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define float @test_cvtsh_ss(i16 %a0) nounwind {
 ; X32-LABEL: test_cvtsh_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovd %eax, %xmm0
@@ -17,7 +17,7 @@ define float @test_cvtsh_ss(i16 %a0) nou
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_cvtsh_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl %di, %eax
 ; X64-NEXT:    vmovd %eax, %xmm0
 ; X64-NEXT:    vcvtph2ps %xmm0, %xmm0
@@ -37,7 +37,7 @@ define float @test_cvtsh_ss(i16 %a0) nou
 
 define i16 @test_cvtss_sh(float %a0) nounwind {
 ; X32-LABEL: test_cvtss_sh:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X32-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
@@ -47,7 +47,7 @@ define i16 @test_cvtss_sh(float %a0) nou
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_cvtss_sh:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; X64-NEXT:    vcvtps2ph $0, %xmm0, %xmm0
@@ -65,12 +65,12 @@ define i16 @test_cvtss_sh(float %a0) nou
 
 define <4 x float> @test_mm_cvtph_ps(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtph_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtph_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -80,12 +80,12 @@ define <4 x float> @test_mm_cvtph_ps(<2
 
 define <8 x float> @test_mm256_cvtph_ps(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_cvtph_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtph2ps %xmm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cvtph_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtph2ps %xmm0, %ymm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -95,12 +95,12 @@ define <8 x float> @test_mm256_cvtph_ps(
 
 define <2 x i64> @test_mm_cvtps_ph(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtps_ph:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtps2ph $0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtps_ph:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtps2ph $0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %cvt = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0)
@@ -110,13 +110,13 @@ define <2 x i64> @test_mm_cvtps_ph(<4 x
 
 define <2 x i64> @test_mm256_cvtps_ph(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_cvtps_ph:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtps2ph $0, %ymm0, %xmm0
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cvtps_ph:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtps2ph $0, %ymm0, %xmm0
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -6,22 +6,22 @@
 
 define <4 x float> @test_x86_vcvtph2ps_128(<8 x i16> %a0) {
 ; X32-LABEL: test_x86_vcvtph2ps_128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtph2ps %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0xc0]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtph2ps_128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtph2ps %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_128:
-; X32-AVX512VL:       # BB#0:
+; X32-AVX512VL:       # %bb.0:
 ; X32-AVX512VL-NEXT:    vcvtph2ps %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc0]
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_128:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vcvtph2ps %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc0]
 ; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0) ; <<4 x float>> [#uses=1]
@@ -31,24 +31,24 @@ declare <4 x float> @llvm.x86.vcvtph2ps.
 
 define <4 x float> @test_x86_vcvtph2ps_128_m(<8 x i16>* nocapture %a) {
 ; X32-LABEL: test_x86_vcvtph2ps_128_m:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-NEXT:    vcvtph2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x00]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtph2ps_128_m:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtph2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x07]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_128_m:
-; X32-AVX512VL:       # BB#0:
+; X32-AVX512VL:       # %bb.0:
 ; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-AVX512VL-NEXT:    vcvtph2ps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x00]
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_128_m:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vcvtph2ps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x07]
 ; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %load = load <8 x i16>, <8 x i16>* %a
@@ -58,22 +58,22 @@ define <4 x float> @test_x86_vcvtph2ps_1
 
 define <8 x float> @test_x86_vcvtph2ps_256(<8 x i16> %a0) {
 ; X32-LABEL: test_x86_vcvtph2ps_256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtph2ps %xmm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtph2ps_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtph2ps %xmm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_256:
-; X32-AVX512VL:       # BB#0:
+; X32-AVX512VL:       # %bb.0:
 ; X32-AVX512VL-NEXT:    vcvtph2ps %xmm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_256:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vcvtph2ps %xmm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
 ; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0) ; <<8 x float>> [#uses=1]
@@ -83,24 +83,24 @@ declare <8 x float> @llvm.x86.vcvtph2ps.
 
 define <8 x float> @test_x86_vcvtph2ps_256_m(<8 x i16>* nocapture %a) nounwind {
 ; X32-LABEL: test_x86_vcvtph2ps_256_m:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-NEXT:    vcvtph2ps (%eax), %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0x00]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtph2ps_256_m:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtph2ps (%rdi), %ymm0 # encoding: [0xc4,0xe2,0x7d,0x13,0x07]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtph2ps_256_m:
-; X32-AVX512VL:       # BB#0:
+; X32-AVX512VL:       # %bb.0:
 ; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-AVX512VL-NEXT:    vcvtph2ps (%eax), %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0x00]
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtph2ps_256_m:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vcvtph2ps (%rdi), %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x13,0x07]
 ; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %load = load <8 x i16>, <8 x i16>* %a
@@ -110,22 +110,22 @@ define <8 x float> @test_x86_vcvtph2ps_2
 
 define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0) {
 ; X32-LABEL: test_x86_vcvtps2ph_128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtps2ph_128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128:
-; X32-AVX512VL:       # BB#0:
+; X32-AVX512VL:       # %bb.0:
 ; X32-AVX512VL-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x00]
 ; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1]
@@ -135,25 +135,25 @@ declare <8 x i16> @llvm.x86.vcvtps2ph.12
 
 define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) {
 ; X32-LABEL: test_x86_vcvtps2ph_256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
 ; X32-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtps2ph_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_256:
-; X32-AVX512VL:       # BB#0:
+; X32-AVX512VL:       # %bb.0:
 ; X32-AVX512VL-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
 ; X32-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_256:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0xc0,0x00]
 ; X64-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
@@ -164,24 +164,24 @@ declare <8 x i16> @llvm.x86.vcvtps2ph.25
 
 define <4 x float> @test_x86_vcvtps2ph_128_scalar(i64* %ptr) {
 ; X32-LABEL: test_x86_vcvtps2ph_128_scalar:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-NEXT:    vcvtph2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x00]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtps2ph_128_scalar:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtph2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x07]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar:
-; X32-AVX512VL:       # BB#0:
+; X32-AVX512VL:       # %bb.0:
 ; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-AVX512VL-NEXT:    vcvtph2ps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x00]
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vcvtph2ps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x07]
 ; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %load = load i64, i64* %ptr
@@ -194,24 +194,24 @@ define <4 x float> @test_x86_vcvtps2ph_1
 
 define <4 x float> @test_x86_vcvtps2ph_128_scalar2(i64* %ptr) {
 ; X32-LABEL: test_x86_vcvtps2ph_128_scalar2:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-NEXT:    vcvtph2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x00]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtps2ph_128_scalar2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtph2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x79,0x13,0x07]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar2:
-; X32-AVX512VL:       # BB#0:
+; X32-AVX512VL:       # %bb.0:
 ; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-AVX512VL-NEXT:    vcvtph2ps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x00]
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_scalar2:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vcvtph2ps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0x07]
 ; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
   %load = load i64, i64* %ptr
@@ -223,27 +223,27 @@ define <4 x float> @test_x86_vcvtps2ph_1
 
 define void @test_x86_vcvtps2ph_256_m(<8 x i16>* nocapture %d, <8 x float> %a) nounwind {
 ; X32-LABEL: test_x86_vcvtps2ph_256_m:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-NEXT:    vcvtps2ph $3, %ymm0, (%eax) # encoding: [0xc4,0xe3,0x7d,0x1d,0x00,0x03]
 ; X32-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtps2ph_256_m:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    vcvtps2ph $3, %ymm0, (%rdi) # encoding: [0xc4,0xe3,0x7d,0x1d,0x07,0x03]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_256_m:
-; X32-AVX512VL:       # BB#0: # %entry
+; X32-AVX512VL:       # %bb.0: # %entry
 ; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-AVX512VL-NEXT:    vcvtps2ph $3, %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0x00,0x03]
 ; X32-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_256_m:
-; X64-AVX512VL:       # BB#0: # %entry
+; X64-AVX512VL:       # %bb.0: # %entry
 ; X64-AVX512VL-NEXT:    vcvtps2ph $3, %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x1d,0x07,0x03]
 ; X64-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
@@ -255,18 +255,18 @@ entry:
 
 define void @test_x86_vcvtps2ph_128_m(<4 x i16>* nocapture %d, <4 x float> %a) nounwind {
 ; X32-LABEL: test_x86_vcvtps2ph_128_m:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-NEXT:    vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtps2ph_128_m:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m:
-; X32-AVX512VL:       # BB#0: # %entry
+; X32-AVX512VL:       # %bb.0: # %entry
 ; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-AVX512VL-NEXT:    vcvtps2ph $3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x03]
 ; X32-AVX512VL-NEXT:    vpmovzxwd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x33,0xc0]
@@ -275,7 +275,7 @@ define void @test_x86_vcvtps2ph_128_m(<4
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m:
-; X64-AVX512VL:       # BB#0: # %entry
+; X64-AVX512VL:       # %bb.0: # %entry
 ; X64-AVX512VL-NEXT:    vcvtps2ph $3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0xc0,0x03]
 ; X64-AVX512VL-NEXT:    vpmovzxwd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x33,0xc0]
 ; X64-AVX512VL-NEXT:    # xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -290,24 +290,24 @@ entry:
 
 define void @test_x86_vcvtps2ph_128_m2(double* nocapture %hf4x16, <4 x float> %f4x32) #0 {
 ; X32-LABEL: test_x86_vcvtps2ph_128_m2:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-NEXT:    vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtps2ph_128_m2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m2:
-; X32-AVX512VL:       # BB#0: # %entry
+; X32-AVX512VL:       # %bb.0: # %entry
 ; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-AVX512VL-NEXT:    vcvtps2ph $3, %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m2:
-; X64-AVX512VL:       # BB#0: # %entry
+; X64-AVX512VL:       # %bb.0: # %entry
 ; X64-AVX512VL-NEXT:    vcvtps2ph $3, %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
 ; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
 entry:
@@ -320,24 +320,24 @@ entry:
 
 define void @test_x86_vcvtps2ph_128_m3(i64* nocapture %hf4x16, <4 x float> %f4x32) #0 {
 ; X32-LABEL: test_x86_vcvtps2ph_128_m3:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-NEXT:    vcvtps2ph $3, %xmm0, (%eax) # encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
 ; X32-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_vcvtps2ph_128_m3:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    vcvtps2ph $3, %xmm0, (%rdi) # encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
 ; X64-NEXT:    retq # encoding: [0xc3]
 ;
 ; X32-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m3:
-; X32-AVX512VL:       # BB#0: # %entry
+; X32-AVX512VL:       # %bb.0: # %entry
 ; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X32-AVX512VL-NEXT:    vcvtps2ph $3, %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x00,0x03]
 ; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_vcvtps2ph_128_m3:
-; X64-AVX512VL:       # BB#0: # %entry
+; X64-AVX512VL:       # %bb.0: # %entry
 ; X64-AVX512VL-NEXT:    vcvtps2ph $3, %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x1d,0x07,0x03]
 ; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
 entry:

Modified: llvm/trunk/test/CodeGen/X86/f16c-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/f16c-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/f16c-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/f16c-schedule.ll Mon Dec  4 09:18:51 2017
@@ -9,49 +9,49 @@
 
 define <4 x float> @test_vcvtph2ps_128(<8 x i16> %a0, <8 x i16> *%a1) {
 ; GENERIC-LABEL: test_vcvtph2ps_128:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtph2ps (%rdi), %xmm1 # sched: [7:1.00]
 ; GENERIC-NEXT:    vcvtph2ps %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; IVY-LABEL: test_vcvtph2ps_128:
-; IVY:       # BB#0:
+; IVY:       # %bb.0:
 ; IVY-NEXT:    vcvtph2ps (%rdi), %xmm1 # sched: [7:1.00]
 ; IVY-NEXT:    vcvtph2ps %xmm0, %xmm0 # sched: [3:1.00]
 ; IVY-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; IVY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vcvtph2ps_128:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtph2ps (%rdi), %xmm1 # sched: [1:1.00]
 ; HASWELL-NEXT:    vcvtph2ps %xmm0, %xmm0 # sched: [2:1.00]
 ; HASWELL-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vcvtph2ps_128:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtph2ps (%rdi), %xmm1 # sched: [6:1.00]
 ; BROADWELL-NEXT:    vcvtph2ps %xmm0, %xmm0 # sched: [2:1.00]
 ; BROADWELL-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vcvtph2ps_128:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtph2ps (%rdi), %xmm1 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    vcvtph2ps %xmm0, %xmm0 # sched: [5:1.00]
 ; SKYLAKE-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_vcvtph2ps_128:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtph2ps (%rdi), %xmm1 # sched: [8:1.00]
 ; BTVER2-NEXT:    vcvtph2ps %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_vcvtph2ps_128:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtph2ps (%rdi), %xmm1 # sched: [100:?]
 ; ZNVER1-NEXT:    vcvtph2ps %xmm0, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
@@ -66,49 +66,49 @@ declare <4 x float> @llvm.x86.vcvtph2ps.
 
 define <8 x float> @test_vcvtph2ps_256(<8 x i16> %a0, <8 x i16> *%a1) {
 ; GENERIC-LABEL: test_vcvtph2ps_256:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtph2ps (%rdi), %ymm1 # sched: [7:1.00]
 ; GENERIC-NEXT:    vcvtph2ps %xmm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; IVY-LABEL: test_vcvtph2ps_256:
-; IVY:       # BB#0:
+; IVY:       # %bb.0:
 ; IVY-NEXT:    vcvtph2ps (%rdi), %ymm1 # sched: [7:1.00]
 ; IVY-NEXT:    vcvtph2ps %xmm0, %ymm0 # sched: [3:1.00]
 ; IVY-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; IVY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vcvtph2ps_256:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtph2ps (%rdi), %ymm1 # sched: [1:1.00]
 ; HASWELL-NEXT:    vcvtph2ps %xmm0, %ymm0 # sched: [2:1.00]
 ; HASWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vcvtph2ps_256:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtph2ps (%rdi), %ymm1 # sched: [6:1.00]
 ; BROADWELL-NEXT:    vcvtph2ps %xmm0, %ymm0 # sched: [2:1.00]
 ; BROADWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vcvtph2ps_256:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtph2ps (%rdi), %ymm1 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    vcvtph2ps %xmm0, %ymm0 # sched: [7:1.00]
 ; SKYLAKE-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_vcvtph2ps_256:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtph2ps (%rdi), %ymm1 # sched: [8:2.00]
 ; BTVER2-NEXT:    vcvtph2ps %xmm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_vcvtph2ps_256:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtph2ps (%rdi), %ymm1 # sched: [100:?]
 ; ZNVER1-NEXT:    vcvtph2ps %xmm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -123,43 +123,43 @@ declare <8 x float> @llvm.x86.vcvtph2ps.
 
 define <8 x i16> @test_vcvtps2ph_128(<4 x float> %a0, <4 x float> %a1, <4 x i16> *%a2) {
 ; GENERIC-LABEL: test_vcvtps2ph_128:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vcvtps2ph $0, %xmm1, (%rdi) # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; IVY-LABEL: test_vcvtps2ph_128:
-; IVY:       # BB#0:
+; IVY:       # %bb.0:
 ; IVY-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # sched: [3:1.00]
 ; IVY-NEXT:    vcvtps2ph $0, %xmm1, (%rdi) # sched: [7:1.00]
 ; IVY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vcvtps2ph_128:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # sched: [4:1.00]
 ; HASWELL-NEXT:    vcvtps2ph $0, %xmm1, (%rdi) # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vcvtps2ph_128:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # sched: [4:1.00]
 ; BROADWELL-NEXT:    vcvtps2ph $0, %xmm1, (%rdi) # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vcvtps2ph_128:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # sched: [5:1.00]
 ; SKYLAKE-NEXT:    vcvtps2ph $0, %xmm1, (%rdi) # sched: [6:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_vcvtps2ph_128:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vcvtps2ph $0, %xmm1, (%rdi) # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_vcvtps2ph_128:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vcvtps2ph $0, %xmm1, (%rdi) # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -173,48 +173,48 @@ declare <8 x i16> @llvm.x86.vcvtps2ph.12
 
 define <8 x i16> @test_vcvtps2ph_256(<8 x float> %a0, <8 x float> %a1, <8 x i16> *%a2) {
 ; GENERIC-LABEL: test_vcvtps2ph_256:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [7:1.00]
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; IVY-LABEL: test_vcvtps2ph_256:
-; IVY:       # BB#0:
+; IVY:       # %bb.0:
 ; IVY-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [3:1.00]
 ; IVY-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [7:1.00]
 ; IVY-NEXT:    vzeroupper
 ; IVY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vcvtps2ph_256:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [6:1.00]
 ; HASWELL-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [6:1.00]
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vcvtps2ph_256:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [6:1.00]
 ; BROADWELL-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [4:1.00]
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vcvtps2ph_256:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [7:1.00]
 ; SKYLAKE-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [8:1.00]
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_vcvtps2ph_256:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [6:2.00]
 ; BTVER2-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [11:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_vcvtps2ph_256:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [100:?]
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]

Modified: llvm/trunk/test/CodeGen/X86/fadd-combines.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fadd-combines.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fadd-combines.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fadd-combines.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define float @fadd_zero_f32(float %x) #0 {
 ; CHECK-LABEL: fadd_zero_f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %y = fadd float %x, 0.0
   ret float %y
@@ -11,7 +11,7 @@ define float @fadd_zero_f32(float %x) #0
 
 define <4 x float> @fadd_zero_4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fadd_zero_4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %y = fadd <4 x float> %x, zeroinitializer
   ret <4 x float> %y
@@ -20,7 +20,7 @@ define <4 x float> @fadd_zero_4f32(<4 x
 ; CHECK: float 3
 define float @fadd_2const_f32(float %x) #0 {
 ; CHECK-LABEL: fadd_2const_f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd float %x, 1.0
@@ -34,7 +34,7 @@ define float @fadd_2const_f32(float %x)
 ; CHECK: float 5
 define <4 x float> @fadd_2const_4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fadd_2const_4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0>
@@ -45,7 +45,7 @@ define <4 x float> @fadd_2const_4f32(<4
 ; CHECK: float 3
 define float @fadd_x_fmul_x_c_f32(float %x) #0 {
 ; CHECK-LABEL: fadd_x_fmul_x_c_f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fmul float %x, 2.0
@@ -59,7 +59,7 @@ define float @fadd_x_fmul_x_c_f32(float
 ; CHECK: float 5
 define <4 x float> @fadd_x_fmul_x_c_4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fadd_x_fmul_x_c_4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fmul <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0>
@@ -70,7 +70,7 @@ define <4 x float> @fadd_x_fmul_x_c_4f32
 ; CHECK: float 3
 define float @fadd_fmul_x_c_x_f32(float %x) #0 {
 ; CHECK-LABEL: fadd_fmul_x_c_x_f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fmul float %x, 2.0
@@ -84,7 +84,7 @@ define float @fadd_fmul_x_c_x_f32(float
 ; CHECK: float 5
 define <4 x float> @fadd_fmul_x_c_x_4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fadd_fmul_x_c_x_4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fmul <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0>
@@ -95,7 +95,7 @@ define <4 x float> @fadd_fmul_x_c_x_4f32
 ; CHECK: float 4
 define float @fadd_fadd_x_x_fmul_x_c_f32(float %x) #0 {
 ; CHECK-LABEL: fadd_fadd_x_x_fmul_x_c_f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd float %x, %x
@@ -110,7 +110,7 @@ define float @fadd_fadd_x_x_fmul_x_c_f32
 ; CHECK: float 6
 define <4 x float> @fadd_fadd_x_x_fmul_x_c_4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fadd_fadd_x_x_fmul_x_c_4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd <4 x float> %x, %x
@@ -122,7 +122,7 @@ define <4 x float> @fadd_fadd_x_x_fmul_x
 ; CHECK: float 4
 define float @fadd_fmul_x_c_fadd_x_x_f32(float %x) #0 {
 ; CHECK-LABEL: fadd_fmul_x_c_fadd_x_x_f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd float %x, %x
@@ -137,7 +137,7 @@ define float @fadd_fmul_x_c_fadd_x_x_f32
 ; CHECK: float 6
 define <4 x float> @fadd_fmul_x_c_fadd_x_x_4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fadd_fmul_x_c_fadd_x_x_4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd <4 x float> %x, %x
@@ -149,7 +149,7 @@ define <4 x float> @fadd_fmul_x_c_fadd_x
 ; CHECK: float 3
 define float @fadd_x_fadd_x_x_f32(float %x) #0 {
 ; CHECK-LABEL: fadd_x_fadd_x_x_f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd float %x, %x
@@ -163,7 +163,7 @@ define float @fadd_x_fadd_x_x_f32(float
 ; CHECK: float 3
 define <4 x float> @fadd_x_fadd_x_x_4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fadd_x_fadd_x_x_4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd <4 x float> %x, %x
@@ -174,7 +174,7 @@ define <4 x float> @fadd_x_fadd_x_x_4f32
 ; CHECK: float 3
 define float @fadd_fadd_x_x_x_f32(float %x) #0 {
 ; CHECK-LABEL: fadd_fadd_x_x_x_f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd float %x, %x
@@ -188,7 +188,7 @@ define float @fadd_fadd_x_x_x_f32(float
 ; CHECK: float 3
 define <4 x float> @fadd_fadd_x_x_x_4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fadd_fadd_x_x_x_4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd <4 x float> %x, %x
@@ -199,7 +199,7 @@ define <4 x float> @fadd_fadd_x_x_x_4f32
 ; CHECK: float 4
 define float @fadd_fadd_x_x_fadd_x_x_f32(float %x) #0 {
 ; CHECK-LABEL: fadd_fadd_x_x_fadd_x_x_f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd float %x, %x
@@ -213,7 +213,7 @@ define float @fadd_fadd_x_x_fadd_x_x_f32
 ; CHECK: float 4
 define <4 x float> @fadd_fadd_x_x_fadd_x_x_4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fadd_fadd_x_x_fadd_x_x_4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %y = fadd <4 x float> %x, %x

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-cmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-cmp.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define zeroext i1 @fcmp_oeq(float %x, float %y) {
 ; SDAG-LABEL: fcmp_oeq:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpeqss %xmm1, %xmm0
 ; SDAG-NEXT:    movd %xmm0, %eax
 ; SDAG-NEXT:    andl $1, %eax
@@ -14,7 +14,7 @@ define zeroext i1 @fcmp_oeq(float %x, fl
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_oeq:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    sete %al
 ; FAST_NOAVX-NEXT:    setnp %cl
@@ -24,7 +24,7 @@ define zeroext i1 @fcmp_oeq(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oeq:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    sete %al
 ; FAST_AVX-NEXT:    setnp %cl
@@ -38,13 +38,13 @@ define zeroext i1 @fcmp_oeq(float %x, fl
 
 define zeroext i1 @fcmp_ogt(float %x, float %y) {
 ; SDAG-LABEL: fcmp_ogt:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    seta %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ogt:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    seta %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -52,7 +52,7 @@ define zeroext i1 @fcmp_ogt(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ogt:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    seta %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -64,13 +64,13 @@ define zeroext i1 @fcmp_ogt(float %x, fl
 
 define zeroext i1 @fcmp_oge(float %x, float %y) {
 ; SDAG-LABEL: fcmp_oge:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    setae %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_oge:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setae %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -78,7 +78,7 @@ define zeroext i1 @fcmp_oge(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oge:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setae %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -90,13 +90,13 @@ define zeroext i1 @fcmp_oge(float %x, fl
 
 define zeroext i1 @fcmp_olt(float %x, float %y) {
 ; SDAG-LABEL: fcmp_olt:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm1
 ; SDAG-NEXT:    seta %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_olt:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    seta %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -104,7 +104,7 @@ define zeroext i1 @fcmp_olt(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_olt:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    seta %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -116,13 +116,13 @@ define zeroext i1 @fcmp_olt(float %x, fl
 
 define zeroext i1 @fcmp_ole(float %x, float %y) {
 ; SDAG-LABEL: fcmp_ole:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm1
 ; SDAG-NEXT:    setae %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ole:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setae %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -130,7 +130,7 @@ define zeroext i1 @fcmp_ole(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ole:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setae %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -142,13 +142,13 @@ define zeroext i1 @fcmp_ole(float %x, fl
 
 define zeroext i1 @fcmp_one(float %x, float %y) {
 ; SDAG-LABEL: fcmp_one:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    setne %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_one:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setne %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -156,7 +156,7 @@ define zeroext i1 @fcmp_one(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_one:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setne %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -168,13 +168,13 @@ define zeroext i1 @fcmp_one(float %x, fl
 
 define zeroext i1 @fcmp_ord(float %x, float %y) {
 ; SDAG-LABEL: fcmp_ord:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    setnp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ord:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -182,7 +182,7 @@ define zeroext i1 @fcmp_ord(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ord:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -194,13 +194,13 @@ define zeroext i1 @fcmp_ord(float %x, fl
 
 define zeroext i1 @fcmp_uno(float %x, float %y) {
 ; SDAG-LABEL: fcmp_uno:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    setp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_uno:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -208,7 +208,7 @@ define zeroext i1 @fcmp_uno(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_uno:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -220,13 +220,13 @@ define zeroext i1 @fcmp_uno(float %x, fl
 
 define zeroext i1 @fcmp_ueq(float %x, float %y) {
 ; SDAG-LABEL: fcmp_ueq:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    sete %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ueq:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    sete %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -234,7 +234,7 @@ define zeroext i1 @fcmp_ueq(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ueq:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    sete %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -246,13 +246,13 @@ define zeroext i1 @fcmp_ueq(float %x, fl
 
 define zeroext i1 @fcmp_ugt(float %x, float %y) {
 ; SDAG-LABEL: fcmp_ugt:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm1
 ; SDAG-NEXT:    setb %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ugt:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setb %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -260,7 +260,7 @@ define zeroext i1 @fcmp_ugt(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ugt:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setb %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -272,13 +272,13 @@ define zeroext i1 @fcmp_ugt(float %x, fl
 
 define zeroext i1 @fcmp_uge(float %x, float %y) {
 ; SDAG-LABEL: fcmp_uge:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm1
 ; SDAG-NEXT:    setbe %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_uge:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setbe %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -286,7 +286,7 @@ define zeroext i1 @fcmp_uge(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_uge:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setbe %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -298,13 +298,13 @@ define zeroext i1 @fcmp_uge(float %x, fl
 
 define zeroext i1 @fcmp_ult(float %x, float %y) {
 ; SDAG-LABEL: fcmp_ult:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    setb %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ult:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setb %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -312,7 +312,7 @@ define zeroext i1 @fcmp_ult(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ult:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setb %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -324,13 +324,13 @@ define zeroext i1 @fcmp_ult(float %x, fl
 
 define zeroext i1 @fcmp_ule(float %x, float %y) {
 ; SDAG-LABEL: fcmp_ule:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    setbe %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ule:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setbe %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -338,7 +338,7 @@ define zeroext i1 @fcmp_ule(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ule:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setbe %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -350,7 +350,7 @@ define zeroext i1 @fcmp_ule(float %x, fl
 
 define zeroext i1 @fcmp_une(float %x, float %y) {
 ; SDAG-LABEL: fcmp_une:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpneqss %xmm1, %xmm0
 ; SDAG-NEXT:    movd %xmm0, %eax
 ; SDAG-NEXT:    andl $1, %eax
@@ -358,7 +358,7 @@ define zeroext i1 @fcmp_une(float %x, fl
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_une:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setne %al
 ; FAST_NOAVX-NEXT:    setp %cl
@@ -368,7 +368,7 @@ define zeroext i1 @fcmp_une(float %x, fl
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_une:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setne %al
 ; FAST_AVX-NEXT:    setp %cl
@@ -382,13 +382,13 @@ define zeroext i1 @fcmp_une(float %x, fl
 
 define zeroext i1 @icmp_eq(i32 %x, i32 %y) {
 ; SDAG-LABEL: icmp_eq:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpl %esi, %edi
 ; SDAG-NEXT:    sete %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_eq:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    sete %al
 ; FAST-NEXT:    andb $1, %al
@@ -400,13 +400,13 @@ define zeroext i1 @icmp_eq(i32 %x, i32 %
 
 define zeroext i1 @icmp_ne(i32 %x, i32 %y) {
 ; SDAG-LABEL: icmp_ne:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpl %esi, %edi
 ; SDAG-NEXT:    setne %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_ne:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setne %al
 ; FAST-NEXT:    andb $1, %al
@@ -418,13 +418,13 @@ define zeroext i1 @icmp_ne(i32 %x, i32 %
 
 define zeroext i1 @icmp_ugt(i32 %x, i32 %y) {
 ; SDAG-LABEL: icmp_ugt:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpl %esi, %edi
 ; SDAG-NEXT:    seta %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_ugt:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    seta %al
 ; FAST-NEXT:    andb $1, %al
@@ -436,13 +436,13 @@ define zeroext i1 @icmp_ugt(i32 %x, i32
 
 define zeroext i1 @icmp_uge(i32 %x, i32 %y) {
 ; SDAG-LABEL: icmp_uge:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpl %esi, %edi
 ; SDAG-NEXT:    setae %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_uge:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setae %al
 ; FAST-NEXT:    andb $1, %al
@@ -454,13 +454,13 @@ define zeroext i1 @icmp_uge(i32 %x, i32
 
 define zeroext i1 @icmp_ult(i32 %x, i32 %y) {
 ; SDAG-LABEL: icmp_ult:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpl %esi, %edi
 ; SDAG-NEXT:    setb %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_ult:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setb %al
 ; FAST-NEXT:    andb $1, %al
@@ -472,13 +472,13 @@ define zeroext i1 @icmp_ult(i32 %x, i32
 
 define zeroext i1 @icmp_ule(i32 %x, i32 %y) {
 ; SDAG-LABEL: icmp_ule:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpl %esi, %edi
 ; SDAG-NEXT:    setbe %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_ule:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setbe %al
 ; FAST-NEXT:    andb $1, %al
@@ -490,13 +490,13 @@ define zeroext i1 @icmp_ule(i32 %x, i32
 
 define zeroext i1 @icmp_sgt(i32 %x, i32 %y) {
 ; SDAG-LABEL: icmp_sgt:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpl %esi, %edi
 ; SDAG-NEXT:    setg %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_sgt:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setg %al
 ; FAST-NEXT:    andb $1, %al
@@ -508,13 +508,13 @@ define zeroext i1 @icmp_sgt(i32 %x, i32
 
 define zeroext i1 @icmp_sge(i32 %x, i32 %y) {
 ; SDAG-LABEL: icmp_sge:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpl %esi, %edi
 ; SDAG-NEXT:    setge %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_sge:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setge %al
 ; FAST-NEXT:    andb $1, %al
@@ -526,13 +526,13 @@ define zeroext i1 @icmp_sge(i32 %x, i32
 
 define zeroext i1 @icmp_slt(i32 %x, i32 %y) {
 ; SDAG-LABEL: icmp_slt:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpl %esi, %edi
 ; SDAG-NEXT:    setl %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_slt:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setl %al
 ; FAST-NEXT:    andb $1, %al
@@ -544,13 +544,13 @@ define zeroext i1 @icmp_slt(i32 %x, i32
 
 define zeroext i1 @icmp_sle(i32 %x, i32 %y) {
 ; SDAG-LABEL: icmp_sle:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    cmpl %esi, %edi
 ; SDAG-NEXT:    setle %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_sle:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setle %al
 ; FAST-NEXT:    andb $1, %al
@@ -563,13 +563,13 @@ define zeroext i1 @icmp_sle(i32 %x, i32
 ; Test cmp folding and condition optimization.
 define zeroext i1 @fcmp_oeq2(float %x) {
 ; SDAG-LABEL: fcmp_oeq2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm0
 ; SDAG-NEXT:    setnp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_oeq2:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -577,7 +577,7 @@ define zeroext i1 @fcmp_oeq2(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oeq2:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -589,7 +589,7 @@ define zeroext i1 @fcmp_oeq2(float %x) {
 
 define zeroext i1 @fcmp_oeq3(float %x) {
 ; SDAG-LABEL: fcmp_oeq3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    cmpeqss %xmm0, %xmm1
 ; SDAG-NEXT:    movd %xmm1, %eax
@@ -598,7 +598,7 @@ define zeroext i1 @fcmp_oeq3(float %x) {
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_oeq3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    sete %al
@@ -609,7 +609,7 @@ define zeroext i1 @fcmp_oeq3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oeq3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    sete %al
@@ -624,12 +624,12 @@ define zeroext i1 @fcmp_oeq3(float %x) {
 
 define zeroext i1 @fcmp_ogt2(float %x) {
 ; SDAG-LABEL: fcmp_ogt2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: fcmp_ogt2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -640,14 +640,14 @@ define zeroext i1 @fcmp_ogt2(float %x) {
 
 define zeroext i1 @fcmp_ogt3(float %x) {
 ; SDAG-LABEL: fcmp_ogt3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    seta %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ogt3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    seta %al
@@ -656,7 +656,7 @@ define zeroext i1 @fcmp_ogt3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ogt3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    seta %al
@@ -669,13 +669,13 @@ define zeroext i1 @fcmp_ogt3(float %x) {
 
 define zeroext i1 @fcmp_oge2(float %x) {
 ; SDAG-LABEL: fcmp_oge2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm0
 ; SDAG-NEXT:    setnp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_oge2:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -683,7 +683,7 @@ define zeroext i1 @fcmp_oge2(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oge2:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -695,14 +695,14 @@ define zeroext i1 @fcmp_oge2(float %x) {
 
 define zeroext i1 @fcmp_oge3(float %x) {
 ; SDAG-LABEL: fcmp_oge3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    setae %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_oge3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setae %al
@@ -711,7 +711,7 @@ define zeroext i1 @fcmp_oge3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oge3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setae %al
@@ -724,12 +724,12 @@ define zeroext i1 @fcmp_oge3(float %x) {
 
 define zeroext i1 @fcmp_olt2(float %x) {
 ; SDAG-LABEL: fcmp_olt2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: fcmp_olt2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -740,14 +740,14 @@ define zeroext i1 @fcmp_olt2(float %x) {
 
 define zeroext i1 @fcmp_olt3(float %x) {
 ; SDAG-LABEL: fcmp_olt3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm1
 ; SDAG-NEXT:    seta %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_olt3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    seta %al
@@ -756,7 +756,7 @@ define zeroext i1 @fcmp_olt3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_olt3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    seta %al
@@ -769,13 +769,13 @@ define zeroext i1 @fcmp_olt3(float %x) {
 
 define zeroext i1 @fcmp_ole2(float %x) {
 ; SDAG-LABEL: fcmp_ole2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm0
 ; SDAG-NEXT:    setnp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ole2:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -783,7 +783,7 @@ define zeroext i1 @fcmp_ole2(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ole2:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -795,14 +795,14 @@ define zeroext i1 @fcmp_ole2(float %x) {
 
 define zeroext i1 @fcmp_ole3(float %x) {
 ; SDAG-LABEL: fcmp_ole3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm1
 ; SDAG-NEXT:    setae %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ole3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setae %al
@@ -811,7 +811,7 @@ define zeroext i1 @fcmp_ole3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ole3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setae %al
@@ -824,12 +824,12 @@ define zeroext i1 @fcmp_ole3(float %x) {
 
 define zeroext i1 @fcmp_one2(float %x) {
 ; SDAG-LABEL: fcmp_one2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: fcmp_one2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -840,14 +840,14 @@ define zeroext i1 @fcmp_one2(float %x) {
 
 define zeroext i1 @fcmp_one3(float %x) {
 ; SDAG-LABEL: fcmp_one3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    setne %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_one3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setne %al
@@ -856,7 +856,7 @@ define zeroext i1 @fcmp_one3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_one3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setne %al
@@ -869,13 +869,13 @@ define zeroext i1 @fcmp_one3(float %x) {
 
 define zeroext i1 @fcmp_ord2(float %x) {
 ; SDAG-LABEL: fcmp_ord2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm0
 ; SDAG-NEXT:    setnp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ord2:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -883,7 +883,7 @@ define zeroext i1 @fcmp_ord2(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ord2:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -895,13 +895,13 @@ define zeroext i1 @fcmp_ord2(float %x) {
 
 define zeroext i1 @fcmp_ord3(float %x) {
 ; SDAG-LABEL: fcmp_ord3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm0
 ; SDAG-NEXT:    setnp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ord3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -909,7 +909,7 @@ define zeroext i1 @fcmp_ord3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ord3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -921,13 +921,13 @@ define zeroext i1 @fcmp_ord3(float %x) {
 
 define zeroext i1 @fcmp_uno2(float %x) {
 ; SDAG-LABEL: fcmp_uno2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm0
 ; SDAG-NEXT:    setp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_uno2:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -935,7 +935,7 @@ define zeroext i1 @fcmp_uno2(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_uno2:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -947,13 +947,13 @@ define zeroext i1 @fcmp_uno2(float %x) {
 
 define zeroext i1 @fcmp_uno3(float %x) {
 ; SDAG-LABEL: fcmp_uno3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm0
 ; SDAG-NEXT:    setp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_uno3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -961,7 +961,7 @@ define zeroext i1 @fcmp_uno3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_uno3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -973,12 +973,12 @@ define zeroext i1 @fcmp_uno3(float %x) {
 
 define zeroext i1 @fcmp_ueq2(float %x) {
 ; SDAG-LABEL: fcmp_ueq2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    movb $1, %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: fcmp_ueq2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -989,14 +989,14 @@ define zeroext i1 @fcmp_ueq2(float %x) {
 
 define zeroext i1 @fcmp_ueq3(float %x) {
 ; SDAG-LABEL: fcmp_ueq3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    sete %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ueq3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    sete %al
@@ -1005,7 +1005,7 @@ define zeroext i1 @fcmp_ueq3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ueq3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    sete %al
@@ -1018,13 +1018,13 @@ define zeroext i1 @fcmp_ueq3(float %x) {
 
 define zeroext i1 @fcmp_ugt2(float %x) {
 ; SDAG-LABEL: fcmp_ugt2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm0
 ; SDAG-NEXT:    setp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ugt2:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -1032,7 +1032,7 @@ define zeroext i1 @fcmp_ugt2(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ugt2:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -1044,14 +1044,14 @@ define zeroext i1 @fcmp_ugt2(float %x) {
 
 define zeroext i1 @fcmp_ugt3(float %x) {
 ; SDAG-LABEL: fcmp_ugt3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm1
 ; SDAG-NEXT:    setb %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ugt3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setb %al
@@ -1060,7 +1060,7 @@ define zeroext i1 @fcmp_ugt3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ugt3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setb %al
@@ -1073,12 +1073,12 @@ define zeroext i1 @fcmp_ugt3(float %x) {
 
 define zeroext i1 @fcmp_uge2(float %x) {
 ; SDAG-LABEL: fcmp_uge2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    movb $1, %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: fcmp_uge2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -1089,14 +1089,14 @@ define zeroext i1 @fcmp_uge2(float %x) {
 
 define zeroext i1 @fcmp_uge3(float %x) {
 ; SDAG-LABEL: fcmp_uge3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm1
 ; SDAG-NEXT:    setbe %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_uge3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setbe %al
@@ -1105,7 +1105,7 @@ define zeroext i1 @fcmp_uge3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_uge3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setbe %al
@@ -1118,13 +1118,13 @@ define zeroext i1 @fcmp_uge3(float %x) {
 
 define zeroext i1 @fcmp_ult2(float %x) {
 ; SDAG-LABEL: fcmp_ult2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm0
 ; SDAG-NEXT:    setp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ult2:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -1132,7 +1132,7 @@ define zeroext i1 @fcmp_ult2(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ult2:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -1144,14 +1144,14 @@ define zeroext i1 @fcmp_ult2(float %x) {
 
 define zeroext i1 @fcmp_ult3(float %x) {
 ; SDAG-LABEL: fcmp_ult3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    setb %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ult3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setb %al
@@ -1160,7 +1160,7 @@ define zeroext i1 @fcmp_ult3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ult3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setb %al
@@ -1173,12 +1173,12 @@ define zeroext i1 @fcmp_ult3(float %x) {
 
 define zeroext i1 @fcmp_ule2(float %x) {
 ; SDAG-LABEL: fcmp_ule2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    movb $1, %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: fcmp_ule2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -1189,14 +1189,14 @@ define zeroext i1 @fcmp_ule2(float %x) {
 
 define zeroext i1 @fcmp_ule3(float %x) {
 ; SDAG-LABEL: fcmp_ule3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    ucomiss %xmm1, %xmm0
 ; SDAG-NEXT:    setbe %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_ule3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setbe %al
@@ -1205,7 +1205,7 @@ define zeroext i1 @fcmp_ule3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ule3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setbe %al
@@ -1218,13 +1218,13 @@ define zeroext i1 @fcmp_ule3(float %x) {
 
 define zeroext i1 @fcmp_une2(float %x) {
 ; SDAG-LABEL: fcmp_une2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomiss %xmm0, %xmm0
 ; SDAG-NEXT:    setp %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_une2:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
@@ -1232,7 +1232,7 @@ define zeroext i1 @fcmp_une2(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_une2:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
@@ -1244,7 +1244,7 @@ define zeroext i1 @fcmp_une2(float %x) {
 
 define zeroext i1 @fcmp_une3(float %x) {
 ; SDAG-LABEL: fcmp_une3:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorps %xmm1, %xmm1
 ; SDAG-NEXT:    cmpneqss %xmm0, %xmm1
 ; SDAG-NEXT:    movd %xmm1, %eax
@@ -1253,7 +1253,7 @@ define zeroext i1 @fcmp_une3(float %x) {
 ; SDAG-NEXT:    retq
 ;
 ; FAST_NOAVX-LABEL: fcmp_une3:
-; FAST_NOAVX:       ## BB#0:
+; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setne %al
@@ -1264,7 +1264,7 @@ define zeroext i1 @fcmp_une3(float %x) {
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_une3:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setne %al
@@ -1279,12 +1279,12 @@ define zeroext i1 @fcmp_une3(float %x) {
 
 define zeroext i1 @icmp_eq2(i32 %x) {
 ; SDAG-LABEL: icmp_eq2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    movb $1, %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_eq2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -1295,12 +1295,12 @@ define zeroext i1 @icmp_eq2(i32 %x) {
 
 define zeroext i1 @icmp_ne2(i32 %x) {
 ; SDAG-LABEL: icmp_ne2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_ne2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -1311,12 +1311,12 @@ define zeroext i1 @icmp_ne2(i32 %x) {
 
 define zeroext i1 @icmp_ugt2(i32 %x) {
 ; SDAG-LABEL: icmp_ugt2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_ugt2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -1327,12 +1327,12 @@ define zeroext i1 @icmp_ugt2(i32 %x) {
 
 define zeroext i1 @icmp_uge2(i32 %x) {
 ; SDAG-LABEL: icmp_uge2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    movb $1, %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_uge2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -1343,12 +1343,12 @@ define zeroext i1 @icmp_uge2(i32 %x) {
 
 define zeroext i1 @icmp_ult2(i32 %x) {
 ; SDAG-LABEL: icmp_ult2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_ult2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -1359,12 +1359,12 @@ define zeroext i1 @icmp_ult2(i32 %x) {
 
 define zeroext i1 @icmp_ule2(i32 %x) {
 ; SDAG-LABEL: icmp_ule2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    movb $1, %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_ule2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -1375,12 +1375,12 @@ define zeroext i1 @icmp_ule2(i32 %x) {
 
 define zeroext i1 @icmp_sgt2(i32 %x) {
 ; SDAG-LABEL: icmp_sgt2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_sgt2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -1391,12 +1391,12 @@ define zeroext i1 @icmp_sgt2(i32 %x) {
 
 define zeroext i1 @icmp_sge2(i32 %x) {
 ; SDAG-LABEL: icmp_sge2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    movb $1, %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_sge2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -1407,12 +1407,12 @@ define zeroext i1 @icmp_sge2(i32 %x) {
 
 define zeroext i1 @icmp_slt2(i32 %x) {
 ; SDAG-LABEL: icmp_slt2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    xorl %eax, %eax
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_slt2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax
@@ -1423,12 +1423,12 @@ define zeroext i1 @icmp_slt2(i32 %x) {
 
 define zeroext i1 @icmp_sle2(i32 %x) {
 ; SDAG-LABEL: icmp_sle2:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    movb $1, %al
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: icmp_sle2:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    movzbl %al, %eax

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-constpool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-constpool.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-constpool.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-constpool.ll Mon Dec  4 09:18:51 2017
@@ -9,25 +9,25 @@
 ; Make sure fast isel uses rip-relative addressing for the small code model.
 define float @constpool_float(float %x) {
 ; CHECK-LABEL: constpool_float:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    addss %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 ;
 ; LARGE-LABEL: constpool_float:
-; LARGE:       ## BB#0:
+; LARGE:       ## %bb.0:
 ; LARGE-NEXT:    movabsq $LCPI0_0, %rax
 ; LARGE-NEXT:    addss (%rax), %xmm0
 ; LARGE-NEXT:    retq
 ;
 ; AVX-LABEL: constpool_float:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; LARGE_AVX-LABEL: constpool_float:
-; LARGE_AVX:       ## BB#0:
+; LARGE_AVX:       ## %bb.0:
 ; LARGE_AVX-NEXT:    movabsq $LCPI0_0, %rax
 ; LARGE_AVX-NEXT:    vaddss (%rax), %xmm0, %xmm0
 ; LARGE_AVX-NEXT:    retq
@@ -38,25 +38,25 @@ define float @constpool_float(float %x)
 
 define double @constpool_double(double %x) nounwind {
 ; CHECK-LABEL: constpool_double:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; CHECK-NEXT:    addsd %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 ;
 ; LARGE-LABEL: constpool_double:
-; LARGE:       ## BB#0:
+; LARGE:       ## %bb.0:
 ; LARGE-NEXT:    movabsq $LCPI1_0, %rax
 ; LARGE-NEXT:    addsd (%rax), %xmm0
 ; LARGE-NEXT:    retq
 ;
 ; AVX-LABEL: constpool_double:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
 ; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; LARGE_AVX-LABEL: constpool_double:
-; LARGE_AVX:       ## BB#0:
+; LARGE_AVX:       ## %bb.0:
 ; LARGE_AVX-NEXT:    movabsq $LCPI1_0, %rax
 ; LARGE_AVX-NEXT:    vaddsd (%rax), %xmm0, %xmm0
 ; LARGE_AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll Mon Dec  4 09:18:51 2017
@@ -24,12 +24,12 @@
 
 define double @single_to_double_rr(float %x) {
 ; SSE-LABEL: single_to_double_rr:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    cvtss2sd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: single_to_double_rr:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -39,12 +39,12 @@ entry:
 
 define float @double_to_single_rr(double %x) {
 ; SSE-LABEL: double_to_single_rr:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    cvtsd2ss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: double_to_single_rr:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -54,13 +54,13 @@ entry:
 
 define double @single_to_double_rm(float* %x) {
 ; SSE-LABEL: single_to_double_rm:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    cvtss2sd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: single_to_double_rm:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -72,12 +72,12 @@ entry:
 
 define double @single_to_double_rm_optsize(float* %x) optsize {
 ; SSE-LABEL: single_to_double_rm_optsize:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    cvtss2sd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: single_to_double_rm_optsize:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -89,13 +89,13 @@ entry:
 
 define float @double_to_single_rm(double* %x) {
 ; SSE-LABEL: double_to_single_rm:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    cvtsd2ss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: double_to_single_rm:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -107,12 +107,12 @@ entry:
 
 define float @double_to_single_rm_optsize(double* %x) optsize {
 ; SSE-LABEL: double_to_single_rm_optsize:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    cvtsd2ss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: double_to_single_rm_optsize:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll Mon Dec  4 09:18:51 2017
@@ -5,12 +5,12 @@
 
 define double @long_to_double_rr(i64 %a) {
 ; SSE2-LABEL: long_to_double_rr:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    cvtsi2sdq %rdi, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: long_to_double_rr:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -20,13 +20,13 @@ entry:
 
 define double @long_to_double_rm(i64* %a) {
 ; SSE2-LABEL: long_to_double_rm:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq (%rdi), %rax
 ; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: long_to_double_rm:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2sdq (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -37,12 +37,12 @@ entry:
 
 define double @long_to_double_rm_optsize(i64* %a) optsize {
 ; SSE2-LABEL: long_to_double_rm_optsize:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    cvtsi2sdq (%rdi), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: long_to_double_rm_optsize:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2sdq (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -53,12 +53,12 @@ entry:
 
 define float @long_to_float_rr(i64 %a) {
 ; SSE2-LABEL: long_to_float_rr:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    cvtsi2ssq %rdi, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: long_to_float_rr:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -68,13 +68,13 @@ entry:
 
 define float @long_to_float_rm(i64* %a) {
 ; SSE2-LABEL: long_to_float_rm:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq (%rdi), %rax
 ; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: long_to_float_rm:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2ssq (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -85,12 +85,12 @@ entry:
 
 define float @long_to_float_rm_optsize(i64* %a) optsize {
 ; SSE2-LABEL: long_to_float_rm_optsize:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    cvtsi2ssq (%rdi), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: long_to_float_rm_optsize:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2ssq (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll Mon Dec  4 09:18:51 2017
@@ -7,17 +7,17 @@
 
 define double @int_to_double_rr(i32 %a) {
 ; SSE2-LABEL: int_to_double_rr:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    cvtsi2sdl %edi, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: int_to_double_rr:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; SSE2_X86-LABEL: int_to_double_rr:
-; SSE2_X86:       # BB#0: # %entry
+; SSE2_X86:       # %bb.0: # %entry
 ; SSE2_X86-NEXT:    pushl %ebp
 ; SSE2_X86-NEXT:    .cfi_def_cfa_offset 8
 ; SSE2_X86-NEXT:    .cfi_offset %ebp, -8
@@ -34,7 +34,7 @@ define double @int_to_double_rr(i32 %a)
 ; SSE2_X86-NEXT:    retl
 ;
 ; AVX_X86-LABEL: int_to_double_rr:
-; AVX_X86:       # BB#0: # %entry
+; AVX_X86:       # %bb.0: # %entry
 ; AVX_X86-NEXT:    pushl %ebp
 ; AVX_X86-NEXT:    .cfi_def_cfa_offset 8
 ; AVX_X86-NEXT:    .cfi_offset %ebp, -8
@@ -55,18 +55,18 @@ entry:
 
 define double @int_to_double_rm(i32* %a) {
 ; SSE2-LABEL: int_to_double_rm:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl (%rdi), %eax
 ; SSE2-NEXT:    cvtsi2sdl %eax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: int_to_double_rm:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2sdl (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; SSE2_X86-LABEL: int_to_double_rm:
-; SSE2_X86:       # BB#0: # %entry
+; SSE2_X86:       # %bb.0: # %entry
 ; SSE2_X86-NEXT:    pushl %ebp
 ; SSE2_X86-NEXT:    .cfi_def_cfa_offset 8
 ; SSE2_X86-NEXT:    .cfi_offset %ebp, -8
@@ -83,7 +83,7 @@ define double @int_to_double_rm(i32* %a)
 ; SSE2_X86-NEXT:    retl
 ;
 ; AVX_X86-LABEL: int_to_double_rm:
-; AVX_X86:       # BB#0: # %entry
+; AVX_X86:       # %bb.0: # %entry
 ; AVX_X86-NEXT:    pushl %ebp
 ; AVX_X86-NEXT:    .cfi_def_cfa_offset 8
 ; AVX_X86-NEXT:    .cfi_offset %ebp, -8
@@ -106,17 +106,17 @@ entry:
 
 define double @int_to_double_rm_optsize(i32* %a) optsize {
 ; SSE2-LABEL: int_to_double_rm_optsize:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    cvtsi2sdl (%rdi), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: int_to_double_rm_optsize:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2sdl (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; SSE2_X86-LABEL: int_to_double_rm_optsize:
-; SSE2_X86:       # BB#0: # %entry
+; SSE2_X86:       # %bb.0: # %entry
 ; SSE2_X86-NEXT:    pushl %ebp
 ; SSE2_X86-NEXT:    .cfi_def_cfa_offset 8
 ; SSE2_X86-NEXT:    .cfi_offset %ebp, -8
@@ -133,7 +133,7 @@ define double @int_to_double_rm_optsize(
 ; SSE2_X86-NEXT:    retl
 ;
 ; AVX_X86-LABEL: int_to_double_rm_optsize:
-; AVX_X86:       # BB#0: # %entry
+; AVX_X86:       # %bb.0: # %entry
 ; AVX_X86-NEXT:    pushl %ebp
 ; AVX_X86-NEXT:    .cfi_def_cfa_offset 8
 ; AVX_X86-NEXT:    .cfi_offset %ebp, -8
@@ -156,17 +156,17 @@ entry:
 
 define float @int_to_float_rr(i32 %a) {
 ; SSE2-LABEL: int_to_float_rr:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    cvtsi2ssl %edi, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: int_to_float_rr:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; SSE2_X86-LABEL: int_to_float_rr:
-; SSE2_X86:       # BB#0: # %entry
+; SSE2_X86:       # %bb.0: # %entry
 ; SSE2_X86-NEXT:    pushl %eax
 ; SSE2_X86-NEXT:    .cfi_def_cfa_offset 8
 ; SSE2_X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -177,7 +177,7 @@ define float @int_to_float_rr(i32 %a) {
 ; SSE2_X86-NEXT:    retl
 ;
 ; AVX_X86-LABEL: int_to_float_rr:
-; AVX_X86:       # BB#0: # %entry
+; AVX_X86:       # %bb.0: # %entry
 ; AVX_X86-NEXT:    pushl %eax
 ; AVX_X86-NEXT:    .cfi_def_cfa_offset 8
 ; AVX_X86-NEXT:    vcvtsi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -192,18 +192,18 @@ entry:
 
 define float @int_to_float_rm(i32* %a) {
 ; SSE2-LABEL: int_to_float_rm:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl (%rdi), %eax
 ; SSE2-NEXT:    cvtsi2ssl %eax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: int_to_float_rm:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2ssl (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; SSE2_X86-LABEL: int_to_float_rm:
-; SSE2_X86:       # BB#0: # %entry
+; SSE2_X86:       # %bb.0: # %entry
 ; SSE2_X86-NEXT:    pushl %eax
 ; SSE2_X86-NEXT:    .cfi_def_cfa_offset 8
 ; SSE2_X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -214,7 +214,7 @@ define float @int_to_float_rm(i32* %a) {
 ; SSE2_X86-NEXT:    retl
 ;
 ; AVX_X86-LABEL: int_to_float_rm:
-; AVX_X86:       # BB#0: # %entry
+; AVX_X86:       # %bb.0: # %entry
 ; AVX_X86-NEXT:    pushl %eax
 ; AVX_X86-NEXT:    .cfi_def_cfa_offset 8
 ; AVX_X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -231,17 +231,17 @@ entry:
 
 define float @int_to_float_rm_optsize(i32* %a) optsize {
 ; SSE2-LABEL: int_to_float_rm_optsize:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    cvtsi2ssl (%rdi), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: int_to_float_rm_optsize:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vcvtsi2ssl (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; SSE2_X86-LABEL: int_to_float_rm_optsize:
-; SSE2_X86:       # BB#0: # %entry
+; SSE2_X86:       # %bb.0: # %entry
 ; SSE2_X86-NEXT:    pushl %eax
 ; SSE2_X86-NEXT:    .cfi_def_cfa_offset 8
 ; SSE2_X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -252,7 +252,7 @@ define float @int_to_float_rm_optsize(i3
 ; SSE2_X86-NEXT:    retl
 ;
 ; AVX_X86-LABEL: int_to_float_rm_optsize:
-; AVX_X86:       # BB#0: # %entry
+; AVX_X86:       # %bb.0: # %entry
 ; AVX_X86-NEXT:    pushl %eax
 ; AVX_X86-NEXT:    .cfi_def_cfa_offset 8
 ; AVX_X86-NEXT:    movl {{[0-9]+}}(%esp), %eax

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-load-i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-load-i1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-load-i1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-load-i1.ll Mon Dec  4 09:18:51 2017
@@ -3,10 +3,10 @@
 
 define i1 @test_i1(i1* %b) {
 ; CHECK-LABEL: test_i1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testb $1, (%rdi)
 ; CHECK-NEXT:    je .LBB0_2
-; CHECK-NEXT:  # BB#1: # %in
+; CHECK-NEXT:  # %bb.1: # %in
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB0_2: # %out

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-nontemporal.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-nontemporal.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-nontemporal.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-nontemporal.ll Mon Dec  4 09:18:51 2017
@@ -14,7 +14,7 @@
 
 define void @test_nti32(i32* nocapture %ptr, i32 %X) {
 ; ALL-LABEL: test_nti32:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    movntil %esi, (%rdi)
 ; ALL-NEXT:    retq
 entry:
@@ -24,7 +24,7 @@ entry:
 
 define void @test_nti64(i64* nocapture %ptr, i64 %X) {
 ; ALL-LABEL: test_nti64:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    movntiq %rsi, (%rdi)
 ; ALL-NEXT:    retq
 entry:
@@ -34,27 +34,27 @@ entry:
 
 define void @test_ntfloat(float* nocapture %ptr, float %X) {
 ; SSE2-LABEL: test_ntfloat:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movss %xmm0, (%rdi)
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_ntfloat:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movntss %xmm0, (%rdi)
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_ntfloat:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movss %xmm0, (%rdi)
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_ntfloat:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovss %xmm0, (%rdi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_ntfloat:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovss %xmm0, (%rdi)
 ; AVX512-NEXT:    retq
 entry:
@@ -64,27 +64,27 @@ entry:
 
 define void @test_ntdouble(double* nocapture %ptr, double %X) {
 ; SSE2-LABEL: test_ntdouble:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movsd %xmm0, (%rdi)
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_ntdouble:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movntsd %xmm0, (%rdi)
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_ntdouble:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movsd %xmm0, (%rdi)
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_ntdouble:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovsd %xmm0, (%rdi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_ntdouble:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovsd %xmm0, (%rdi)
 ; AVX512-NEXT:    retq
 entry:
@@ -98,7 +98,7 @@ entry:
 
 define void @test_mmx(x86_mmx* nocapture %a0, x86_mmx* nocapture %a1) {
 ; ALL-LABEL: test_mmx:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    movq (%rdi), %mm0
 ; ALL-NEXT:    psrlq $3, %mm0
 ; ALL-NEXT:    movntq %mm0, (%rsi)
@@ -117,17 +117,17 @@ declare x86_mmx @llvm.x86.mmx.psrli.q(x8
 
 define void @test_nt4xfloat(<4 x float>* nocapture %ptr, <4 x float> %X) {
 ; SSE-LABEL: test_nt4xfloat:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntps %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt4xfloat:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntps %xmm0, (%rdi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt4xfloat:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntps %xmm0, (%rdi)
 ; AVX512-NEXT:    retq
 entry:
@@ -137,17 +137,17 @@ entry:
 
 define void @test_nt2xdouble(<2 x double>* nocapture %ptr, <2 x double> %X) {
 ; SSE-LABEL: test_nt2xdouble:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntpd %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt2xdouble:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntpd %xmm0, (%rdi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt2xdouble:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntpd %xmm0, (%rdi)
 ; AVX512-NEXT:    retq
 entry:
@@ -157,17 +157,17 @@ entry:
 
 define void @test_nt16xi8(<16 x i8>* nocapture %ptr, <16 x i8> %X) {
 ; SSE-LABEL: test_nt16xi8:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt16xi8:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %xmm0, (%rdi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt16xi8:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdq %xmm0, (%rdi)
 ; AVX512-NEXT:    retq
 entry:
@@ -177,17 +177,17 @@ entry:
 
 define void @test_nt8xi16(<8 x i16>* nocapture %ptr, <8 x i16> %X) {
 ; SSE-LABEL: test_nt8xi16:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt8xi16:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %xmm0, (%rdi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt8xi16:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdq %xmm0, (%rdi)
 ; AVX512-NEXT:    retq
 entry:
@@ -197,17 +197,17 @@ entry:
 
 define void @test_nt4xi32(<4 x i32>* nocapture %ptr, <4 x i32> %X) {
 ; SSE-LABEL: test_nt4xi32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt4xi32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %xmm0, (%rdi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt4xi32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdq %xmm0, (%rdi)
 ; AVX512-NEXT:    retq
 entry:
@@ -217,17 +217,17 @@ entry:
 
 define void @test_nt2xi64(<2 x i64>* nocapture %ptr, <2 x i64> %X) {
 ; SSE-LABEL: test_nt2xi64:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt2xi64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %xmm0, (%rdi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt2xi64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdq %xmm0, (%rdi)
 ; AVX512-NEXT:    retq
 entry:
@@ -241,27 +241,27 @@ entry:
 
 define <4 x float> @test_load_nt4xfloat(<4 x float>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt4xfloat:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt4xfloat:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movaps (%rdi), %xmm0
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt4xfloat:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_load_nt4xfloat:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt4xfloat:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -271,27 +271,27 @@ entry:
 
 define <2 x double> @test_load_nt2xdouble(<2 x double>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt2xdouble:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movapd (%rdi), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt2xdouble:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movapd (%rdi), %xmm0
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt2xdouble:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_load_nt2xdouble:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt2xdouble:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -301,17 +301,17 @@ entry:
 
 define <16 x i8> @test_load_nt16xi8(<16 x i8>* nocapture %ptr) {
 ; SSE-LABEL: test_load_nt16xi8:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_load_nt16xi8:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt16xi8:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -321,17 +321,17 @@ entry:
 
 define <8 x i16> @test_load_nt8xi16(<8 x i16>* nocapture %ptr) {
 ; SSE-LABEL: test_load_nt8xi16:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_load_nt8xi16:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt8xi16:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -341,17 +341,17 @@ entry:
 
 define <4 x i32> @test_load_nt4xi32(<4 x i32>* nocapture %ptr) {
 ; SSE-LABEL: test_load_nt4xi32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_load_nt4xi32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt4xi32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -361,17 +361,17 @@ entry:
 
 define <2 x i64> @test_load_nt2xi64(<2 x i64>* nocapture %ptr) {
 ; SSE-LABEL: test_load_nt2xi64:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_load_nt2xi64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt2xi64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -385,19 +385,19 @@ entry:
 
 define void @test_nt8xfloat(<8 x float>* nocapture %ptr, <8 x float> %X) {
 ; SSE-LABEL: test_nt8xfloat:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntps %xmm0, (%rdi)
 ; SSE-NEXT:    movntps %xmm1, 16(%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt8xfloat:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntps %ymm0, (%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt8xfloat:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntps %ymm0, (%rdi)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -408,19 +408,19 @@ entry:
 
 define void @test_nt4xdouble(<4 x double>* nocapture %ptr, <4 x double> %X) {
 ; SSE-LABEL: test_nt4xdouble:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntpd %xmm0, (%rdi)
 ; SSE-NEXT:    movntpd %xmm1, 16(%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt4xdouble:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntpd %ymm0, (%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt4xdouble:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntpd %ymm0, (%rdi)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -431,19 +431,19 @@ entry:
 
 define void @test_nt32xi8(<32 x i8>* nocapture %ptr, <32 x i8> %X) {
 ; SSE-LABEL: test_nt32xi8:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    movntdq %xmm1, 16(%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt32xi8:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt32xi8:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -454,19 +454,19 @@ entry:
 
 define void @test_nt16xi16(<16 x i16>* nocapture %ptr, <16 x i16> %X) {
 ; SSE-LABEL: test_nt16xi16:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    movntdq %xmm1, 16(%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt16xi16:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt16xi16:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -477,19 +477,19 @@ entry:
 
 define void @test_nt8xi32(<8 x i32>* nocapture %ptr, <8 x i32> %X) {
 ; SSE-LABEL: test_nt8xi32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    movntdq %xmm1, 16(%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt8xi32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt8xi32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -500,19 +500,19 @@ entry:
 
 define void @test_nt4xi64(<4 x i64>* nocapture %ptr, <4 x i64> %X) {
 ; SSE-LABEL: test_nt4xi64:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    movntdq %xmm1, 16(%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt4xi64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt4xi64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -527,25 +527,25 @@ entry:
 
 define <8 x float> @test_load_nt8xfloat(<8 x float>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt8xfloat:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt8xfloat:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movaps (%rdi), %xmm0
 ; SSE4A-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt8xfloat:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt8xfloat:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -554,12 +554,12 @@ define <8 x float> @test_load_nt8xfloat(
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt8xfloat:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt8xfloat:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -569,25 +569,25 @@ entry:
 
 define <4 x double> @test_load_nt4xdouble(<4 x double>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt4xdouble:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movapd (%rdi), %xmm0
 ; SSE2-NEXT:    movapd 16(%rdi), %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt4xdouble:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movapd (%rdi), %xmm0
 ; SSE4A-NEXT:    movapd 16(%rdi), %xmm1
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt4xdouble:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt4xdouble:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -596,12 +596,12 @@ define <4 x double> @test_load_nt4xdoubl
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt4xdouble:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt4xdouble:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -611,25 +611,25 @@ entry:
 
 define <32 x i8> @test_load_nt32xi8(<32 x i8>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt32xi8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt32xi8:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movaps (%rdi), %xmm0
 ; SSE4A-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt32xi8:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt32xi8:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -638,12 +638,12 @@ define <32 x i8> @test_load_nt32xi8(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt32xi8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt32xi8:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -653,25 +653,25 @@ entry:
 
 define <16 x i16> @test_load_nt16xi16(<16 x i16>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt16xi16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt16xi16:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movaps (%rdi), %xmm0
 ; SSE4A-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt16xi16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt16xi16:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -680,12 +680,12 @@ define <16 x i16> @test_load_nt16xi16(<1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt16xi16:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt16xi16:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -695,25 +695,25 @@ entry:
 
 define <8 x i32> @test_load_nt8xi32(<8 x i32>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt8xi32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt8xi32:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movaps (%rdi), %xmm0
 ; SSE4A-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt8xi32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt8xi32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -722,12 +722,12 @@ define <8 x i32> @test_load_nt8xi32(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt8xi32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt8xi32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -737,25 +737,25 @@ entry:
 
 define <4 x i64> @test_load_nt4xi64(<4 x i64>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt4xi64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt4xi64:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movaps (%rdi), %xmm0
 ; SSE4A-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt4xi64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt4xi64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -764,12 +764,12 @@ define <4 x i64> @test_load_nt4xi64(<4 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt4xi64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt4xi64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX512-NEXT:    retq
 entry:
@@ -783,7 +783,7 @@ entry:
 
 define void @test_nt16xfloat(<16 x float>* nocapture %ptr, <16 x float> %X) {
 ; SSE-LABEL: test_nt16xfloat:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntps %xmm0, (%rdi)
 ; SSE-NEXT:    movntps %xmm1, 16(%rdi)
 ; SSE-NEXT:    movntps %xmm2, 32(%rdi)
@@ -791,14 +791,14 @@ define void @test_nt16xfloat(<16 x float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt16xfloat:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntps %ymm0, (%rdi)
 ; AVX-NEXT:    vmovntps %ymm1, 32(%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt16xfloat:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntps %zmm0, (%rdi)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -809,7 +809,7 @@ entry:
 
 define void @test_nt8xdouble(<8 x double>* nocapture %ptr, <8 x double> %X) {
 ; SSE-LABEL: test_nt8xdouble:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntpd %xmm0, (%rdi)
 ; SSE-NEXT:    movntpd %xmm1, 16(%rdi)
 ; SSE-NEXT:    movntpd %xmm2, 32(%rdi)
@@ -817,14 +817,14 @@ define void @test_nt8xdouble(<8 x double
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt8xdouble:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntpd %ymm0, (%rdi)
 ; AVX-NEXT:    vmovntpd %ymm1, 32(%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt8xdouble:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntpd %zmm0, (%rdi)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -835,7 +835,7 @@ entry:
 
 define void @test_nt64xi8(<64 x i8>* nocapture %ptr, <64 x i8> %X) {
 ; SSE-LABEL: test_nt64xi8:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    movntdq %xmm1, 16(%rdi)
 ; SSE-NEXT:    movntdq %xmm2, 32(%rdi)
@@ -843,28 +843,28 @@ define void @test_nt64xi8(<64 x i8>* noc
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt64xi8:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX-NEXT:    vmovntdq %ymm1, 32(%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512VL-LABEL: test_nt64xi8:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX512VL-NEXT:    vmovntdq %ymm1, 32(%rdi)
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512F-LABEL: test_nt64xi8:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX512F-NEXT:    vmovntdq %ymm1, 32(%rdi)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_nt64xi8:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vmovntdq %zmm0, (%rdi)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -875,7 +875,7 @@ entry:
 
 define void @test_nt32xi16(<32 x i16>* nocapture %ptr, <32 x i16> %X) {
 ; SSE-LABEL: test_nt32xi16:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    movntdq %xmm1, 16(%rdi)
 ; SSE-NEXT:    movntdq %xmm2, 32(%rdi)
@@ -883,28 +883,28 @@ define void @test_nt32xi16(<32 x i16>* n
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt32xi16:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX-NEXT:    vmovntdq %ymm1, 32(%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512VL-LABEL: test_nt32xi16:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX512VL-NEXT:    vmovntdq %ymm1, 32(%rdi)
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512F-LABEL: test_nt32xi16:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX512F-NEXT:    vmovntdq %ymm1, 32(%rdi)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_nt32xi16:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vmovntdq %zmm0, (%rdi)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -915,7 +915,7 @@ entry:
 
 define void @test_nt16xi32(<16 x i32>* nocapture %ptr, <16 x i32> %X) {
 ; SSE-LABEL: test_nt16xi32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    movntdq %xmm1, 16(%rdi)
 ; SSE-NEXT:    movntdq %xmm2, 32(%rdi)
@@ -923,14 +923,14 @@ define void @test_nt16xi32(<16 x i32>* n
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt16xi32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX-NEXT:    vmovntdq %ymm1, 32(%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt16xi32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdq %zmm0, (%rdi)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -941,7 +941,7 @@ entry:
 
 define void @test_nt8xi64(<8 x i64>* nocapture %ptr, <8 x i64> %X) {
 ; SSE-LABEL: test_nt8xi64:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movntdq %xmm0, (%rdi)
 ; SSE-NEXT:    movntdq %xmm1, 16(%rdi)
 ; SSE-NEXT:    movntdq %xmm2, 32(%rdi)
@@ -949,14 +949,14 @@ define void @test_nt8xi64(<8 x i64>* noc
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_nt8xi64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovntdq %ymm0, (%rdi)
 ; AVX-NEXT:    vmovntdq %ymm1, 32(%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_nt8xi64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdq %zmm0, (%rdi)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -971,7 +971,7 @@ entry:
 
 define <16 x float> @test_load_nt16xfloat(<16 x float>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt16xfloat:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE2-NEXT:    movaps 32(%rdi), %xmm2
@@ -979,7 +979,7 @@ define <16 x float> @test_load_nt16xfloa
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt16xfloat:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movaps (%rdi), %xmm0
 ; SSE4A-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE4A-NEXT:    movaps 32(%rdi), %xmm2
@@ -987,7 +987,7 @@ define <16 x float> @test_load_nt16xfloa
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt16xfloat:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    movntdqa 32(%rdi), %xmm2
@@ -995,7 +995,7 @@ define <16 x float> @test_load_nt16xfloa
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt16xfloat:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -1009,13 +1009,13 @@ define <16 x float> @test_load_nt16xfloa
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt16xfloat:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vmovntdqa 32(%rdi), %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt16xfloat:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %zmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -1025,7 +1025,7 @@ entry:
 
 define <8 x double> @test_load_nt8xdouble(<8 x double>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt8xdouble:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movapd (%rdi), %xmm0
 ; SSE2-NEXT:    movapd 16(%rdi), %xmm1
 ; SSE2-NEXT:    movapd 32(%rdi), %xmm2
@@ -1033,7 +1033,7 @@ define <8 x double> @test_load_nt8xdoubl
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt8xdouble:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movapd (%rdi), %xmm0
 ; SSE4A-NEXT:    movapd 16(%rdi), %xmm1
 ; SSE4A-NEXT:    movapd 32(%rdi), %xmm2
@@ -1041,7 +1041,7 @@ define <8 x double> @test_load_nt8xdoubl
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt8xdouble:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    movntdqa 32(%rdi), %xmm2
@@ -1049,7 +1049,7 @@ define <8 x double> @test_load_nt8xdoubl
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt8xdouble:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -1063,13 +1063,13 @@ define <8 x double> @test_load_nt8xdoubl
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt8xdouble:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vmovntdqa 32(%rdi), %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt8xdouble:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %zmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -1079,7 +1079,7 @@ entry:
 
 define <64 x i8> @test_load_nt64xi8(<64 x i8>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt64xi8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE2-NEXT:    movaps 32(%rdi), %xmm2
@@ -1087,7 +1087,7 @@ define <64 x i8> @test_load_nt64xi8(<64
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt64xi8:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movaps (%rdi), %xmm0
 ; SSE4A-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE4A-NEXT:    movaps 32(%rdi), %xmm2
@@ -1095,7 +1095,7 @@ define <64 x i8> @test_load_nt64xi8(<64
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt64xi8:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    movntdqa 32(%rdi), %xmm2
@@ -1103,7 +1103,7 @@ define <64 x i8> @test_load_nt64xi8(<64
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt64xi8:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -1117,25 +1117,25 @@ define <64 x i8> @test_load_nt64xi8(<64
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt64xi8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vmovntdqa 32(%rdi), %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512VL-LABEL: test_load_nt64xi8:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovntdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512F-LABEL: test_load_nt64xi8:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovntdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_load_nt64xi8:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vmovntdqa (%rdi), %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -1145,7 +1145,7 @@ entry:
 
 define <32 x i16> @test_load_nt32xi16(<32 x i16>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt32xi16:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE2-NEXT:    movaps 32(%rdi), %xmm2
@@ -1153,7 +1153,7 @@ define <32 x i16> @test_load_nt32xi16(<3
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt32xi16:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movaps (%rdi), %xmm0
 ; SSE4A-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE4A-NEXT:    movaps 32(%rdi), %xmm2
@@ -1161,7 +1161,7 @@ define <32 x i16> @test_load_nt32xi16(<3
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt32xi16:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    movntdqa 32(%rdi), %xmm2
@@ -1169,7 +1169,7 @@ define <32 x i16> @test_load_nt32xi16(<3
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt32xi16:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -1183,25 +1183,25 @@ define <32 x i16> @test_load_nt32xi16(<3
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt32xi16:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vmovntdqa 32(%rdi), %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512VL-LABEL: test_load_nt32xi16:
-; AVX512VL:       # BB#0: # %entry
+; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovntdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512F-LABEL: test_load_nt32xi16:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovntdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_load_nt32xi16:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vmovntdqa (%rdi), %zmm0
 ; AVX512BW-NEXT:    retq
 entry:
@@ -1211,7 +1211,7 @@ entry:
 
 define <16 x i32> @test_load_nt16xi32(<16 x i32>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt16xi32:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE2-NEXT:    movaps 32(%rdi), %xmm2
@@ -1219,7 +1219,7 @@ define <16 x i32> @test_load_nt16xi32(<1
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt16xi32:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movaps (%rdi), %xmm0
 ; SSE4A-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE4A-NEXT:    movaps 32(%rdi), %xmm2
@@ -1227,7 +1227,7 @@ define <16 x i32> @test_load_nt16xi32(<1
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt16xi32:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    movntdqa 32(%rdi), %xmm2
@@ -1235,7 +1235,7 @@ define <16 x i32> @test_load_nt16xi32(<1
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt16xi32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -1249,13 +1249,13 @@ define <16 x i32> @test_load_nt16xi32(<1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt16xi32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vmovntdqa 32(%rdi), %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt16xi32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %zmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -1265,7 +1265,7 @@ entry:
 
 define <8 x i64> @test_load_nt8xi64(<8 x i64>* nocapture %ptr) {
 ; SSE2-LABEL: test_load_nt8xi64:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movaps (%rdi), %xmm0
 ; SSE2-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE2-NEXT:    movaps 32(%rdi), %xmm2
@@ -1273,7 +1273,7 @@ define <8 x i64> @test_load_nt8xi64(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSE4A-LABEL: test_load_nt8xi64:
-; SSE4A:       # BB#0: # %entry
+; SSE4A:       # %bb.0: # %entry
 ; SSE4A-NEXT:    movaps (%rdi), %xmm0
 ; SSE4A-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE4A-NEXT:    movaps 32(%rdi), %xmm2
@@ -1281,7 +1281,7 @@ define <8 x i64> @test_load_nt8xi64(<8 x
 ; SSE4A-NEXT:    retq
 ;
 ; SSE41-LABEL: test_load_nt8xi64:
-; SSE41:       # BB#0: # %entry
+; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    movntdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movntdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    movntdqa 32(%rdi), %xmm2
@@ -1289,7 +1289,7 @@ define <8 x i64> @test_load_nt8xi64(<8 x
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_load_nt8xi64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovntdqa (%rdi), %xmm0
 ; AVX1-NEXT:    # implicit-def: %ymm1
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm1
@@ -1303,13 +1303,13 @@ define <8 x i64> @test_load_nt8xi64(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_load_nt8xi64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovntdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vmovntdqa 32(%rdi), %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_load_nt8xi64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovntdqa (%rdi), %zmm0
 ; AVX512-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-select-cmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-select-cmov.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-select-cmov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-select-cmov.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroext %b) {
 ; CHECK-LABEL: select_cmov_i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    cmovew %dx, %si
 ; CHECK-NEXT:    movzwl %si, %eax
@@ -18,7 +18,7 @@ define zeroext i16 @select_cmov_i16(i1 z
 
 define zeroext i16 @select_cmp_cmov_i16(i16 zeroext %a, i16 zeroext %b) {
 ; CHECK-LABEL: select_cmp_cmov_i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpw %si, %di
 ; CHECK-NEXT:    cmovbw %di, %si
 ; CHECK-NEXT:    movzwl %si, %eax
@@ -30,7 +30,7 @@ define zeroext i16 @select_cmp_cmov_i16(
 
 define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) {
 ; CHECK-LABEL: select_cmov_i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    cmovel %edx, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -41,7 +41,7 @@ define i32 @select_cmov_i32(i1 zeroext %
 
 define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
 ; CHECK-LABEL: select_cmp_cmov_i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpl %esi, %edi
 ; CHECK-NEXT:    cmovbl %edi, %esi
 ; CHECK-NEXT:    movl %esi, %eax
@@ -53,7 +53,7 @@ define i32 @select_cmp_cmov_i32(i32 %a,
 
 define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) {
 ; CHECK-LABEL: select_cmov_i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    cmoveq %rdx, %rsi
 ; CHECK-NEXT:    movq %rsi, %rax
@@ -64,7 +64,7 @@ define i64 @select_cmov_i64(i1 zeroext %
 
 define i64 @select_cmp_cmov_i64(i64 %a, i64 %b) {
 ; CHECK-LABEL: select_cmp_cmov_i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    cmovbq %rdi, %rsi
 ; CHECK-NEXT:    movq %rsi, %rax

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-select-cmov2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-select-cmov2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-select-cmov2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-select-cmov2.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define i64 @select_fcmp_false_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_fcmp_false_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rax
 ; CHECK-NEXT:    retq
   %1 = fcmp false double %a, %b
@@ -18,7 +18,7 @@ define i64 @select_fcmp_false_cmov(doubl
 
 define i64 @select_fcmp_oeq_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; SDAG-LABEL: select_fcmp_oeq_cmov:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomisd %xmm1, %xmm0
 ; SDAG-NEXT:    cmovneq %rsi, %rdi
 ; SDAG-NEXT:    cmovpq %rsi, %rdi
@@ -26,7 +26,7 @@ define i64 @select_fcmp_oeq_cmov(double
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: select_fcmp_oeq_cmov:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    ucomisd %xmm1, %xmm0
 ; FAST-NEXT:    setnp %al
 ; FAST-NEXT:    sete %cl
@@ -36,7 +36,7 @@ define i64 @select_fcmp_oeq_cmov(double
 ; FAST-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_oeq_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    sete %cl
@@ -51,14 +51,14 @@ define i64 @select_fcmp_oeq_cmov(double
 
 define i64 @select_fcmp_ogt_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_ogt_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm1, %xmm0
 ; NOAVX-NEXT:    cmovbeq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_ogt_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm1, %xmm0
 ; FAST_AVX-NEXT:    cmovbeq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -70,14 +70,14 @@ define i64 @select_fcmp_ogt_cmov(double
 
 define i64 @select_fcmp_oge_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_oge_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm1, %xmm0
 ; NOAVX-NEXT:    cmovbq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_oge_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm1, %xmm0
 ; FAST_AVX-NEXT:    cmovbq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -89,14 +89,14 @@ define i64 @select_fcmp_oge_cmov(double
 
 define i64 @select_fcmp_olt_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_olt_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm0, %xmm1
 ; NOAVX-NEXT:    cmovbeq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_olt_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm0, %xmm1
 ; FAST_AVX-NEXT:    cmovbeq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -108,14 +108,14 @@ define i64 @select_fcmp_olt_cmov(double
 
 define i64 @select_fcmp_ole_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_ole_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm0, %xmm1
 ; NOAVX-NEXT:    cmovbq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_ole_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm0, %xmm1
 ; FAST_AVX-NEXT:    cmovbq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -127,14 +127,14 @@ define i64 @select_fcmp_ole_cmov(double
 
 define i64 @select_fcmp_one_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_one_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm1, %xmm0
 ; NOAVX-NEXT:    cmoveq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_one_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm1, %xmm0
 ; FAST_AVX-NEXT:    cmoveq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -146,14 +146,14 @@ define i64 @select_fcmp_one_cmov(double
 
 define i64 @select_fcmp_ord_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_ord_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm1, %xmm0
 ; NOAVX-NEXT:    cmovpq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_ord_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm1, %xmm0
 ; FAST_AVX-NEXT:    cmovpq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -165,14 +165,14 @@ define i64 @select_fcmp_ord_cmov(double
 
 define i64 @select_fcmp_uno_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_uno_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm1, %xmm0
 ; NOAVX-NEXT:    cmovnpq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_uno_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm1, %xmm0
 ; FAST_AVX-NEXT:    cmovnpq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -184,14 +184,14 @@ define i64 @select_fcmp_uno_cmov(double
 
 define i64 @select_fcmp_ueq_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_ueq_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm1, %xmm0
 ; NOAVX-NEXT:    cmovneq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_ueq_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm1, %xmm0
 ; FAST_AVX-NEXT:    cmovneq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -203,14 +203,14 @@ define i64 @select_fcmp_ueq_cmov(double
 
 define i64 @select_fcmp_ugt_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_ugt_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm0, %xmm1
 ; NOAVX-NEXT:    cmovaeq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_ugt_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm0, %xmm1
 ; FAST_AVX-NEXT:    cmovaeq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -222,14 +222,14 @@ define i64 @select_fcmp_ugt_cmov(double
 
 define i64 @select_fcmp_uge_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_uge_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm0, %xmm1
 ; NOAVX-NEXT:    cmovaq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_uge_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm0, %xmm1
 ; FAST_AVX-NEXT:    cmovaq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -241,14 +241,14 @@ define i64 @select_fcmp_uge_cmov(double
 
 define i64 @select_fcmp_ult_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_ult_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm1, %xmm0
 ; NOAVX-NEXT:    cmovaeq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_ult_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm1, %xmm0
 ; FAST_AVX-NEXT:    cmovaeq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -260,14 +260,14 @@ define i64 @select_fcmp_ult_cmov(double
 
 define i64 @select_fcmp_ule_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; NOAVX-LABEL: select_fcmp_ule_cmov:
-; NOAVX:       ## BB#0:
+; NOAVX:       ## %bb.0:
 ; NOAVX-NEXT:    ucomisd %xmm1, %xmm0
 ; NOAVX-NEXT:    cmovaq %rsi, %rdi
 ; NOAVX-NEXT:    movq %rdi, %rax
 ; NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_ule_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm1, %xmm0
 ; FAST_AVX-NEXT:    cmovaq %rsi, %rdi
 ; FAST_AVX-NEXT:    movq %rdi, %rax
@@ -279,7 +279,7 @@ define i64 @select_fcmp_ule_cmov(double
 
 define i64 @select_fcmp_une_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; SDAG-LABEL: select_fcmp_une_cmov:
-; SDAG:       ## BB#0:
+; SDAG:       ## %bb.0:
 ; SDAG-NEXT:    ucomisd %xmm1, %xmm0
 ; SDAG-NEXT:    cmovneq %rdi, %rsi
 ; SDAG-NEXT:    cmovpq %rdi, %rsi
@@ -287,7 +287,7 @@ define i64 @select_fcmp_une_cmov(double
 ; SDAG-NEXT:    retq
 ;
 ; FAST-LABEL: select_fcmp_une_cmov:
-; FAST:       ## BB#0:
+; FAST:       ## %bb.0:
 ; FAST-NEXT:    ucomisd %xmm1, %xmm0
 ; FAST-NEXT:    setp %al
 ; FAST-NEXT:    setne %cl
@@ -297,7 +297,7 @@ define i64 @select_fcmp_une_cmov(double
 ; FAST-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: select_fcmp_une_cmov:
-; FAST_AVX:       ## BB#0:
+; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomisd %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    setne %cl
@@ -312,7 +312,7 @@ define i64 @select_fcmp_une_cmov(double
 
 define i64 @select_fcmp_true_cmov(double %a, double %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_fcmp_true_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    retq
   %1 = fcmp true double %a, %b
@@ -322,7 +322,7 @@ define i64 @select_fcmp_true_cmov(double
 
 define i64 @select_icmp_eq_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_icmp_eq_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    cmovneq %rcx, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax
@@ -334,7 +334,7 @@ define i64 @select_icmp_eq_cmov(i64 %a,
 
 define i64 @select_icmp_ne_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_icmp_ne_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    cmoveq %rcx, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax
@@ -346,7 +346,7 @@ define i64 @select_icmp_ne_cmov(i64 %a,
 
 define i64 @select_icmp_ugt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_icmp_ugt_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    cmovbeq %rcx, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax
@@ -359,7 +359,7 @@ define i64 @select_icmp_ugt_cmov(i64 %a,
 
 define i64 @select_icmp_uge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_icmp_uge_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    cmovbq %rcx, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax
@@ -371,7 +371,7 @@ define i64 @select_icmp_uge_cmov(i64 %a,
 
 define i64 @select_icmp_ult_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_icmp_ult_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    cmovaeq %rcx, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax
@@ -383,7 +383,7 @@ define i64 @select_icmp_ult_cmov(i64 %a,
 
 define i64 @select_icmp_ule_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_icmp_ule_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    cmovaq %rcx, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax
@@ -395,7 +395,7 @@ define i64 @select_icmp_ule_cmov(i64 %a,
 
 define i64 @select_icmp_sgt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_icmp_sgt_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    cmovleq %rcx, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax
@@ -407,7 +407,7 @@ define i64 @select_icmp_sgt_cmov(i64 %a,
 
 define i64 @select_icmp_sge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_icmp_sge_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    cmovlq %rcx, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax
@@ -419,7 +419,7 @@ define i64 @select_icmp_sge_cmov(i64 %a,
 
 define i64 @select_icmp_slt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_icmp_slt_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    cmovgeq %rcx, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax
@@ -431,7 +431,7 @@ define i64 @select_icmp_slt_cmov(i64 %a,
 
 define i64 @select_icmp_sle_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: select_icmp_sle_cmov:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    cmovgq %rcx, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll Mon Dec  4 09:18:51 2017
@@ -7,17 +7,17 @@
 
 define float @select_fcmp_one_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_one_f32:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    ucomiss %xmm1, %xmm0
 ; SSE-NEXT:    jne LBB0_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm3, %xmm2
 ; SSE-NEXT:  LBB0_2:
 ; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_one_f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vcmpneq_oqss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
@@ -28,17 +28,17 @@ define float @select_fcmp_one_f32(float
 
 define double @select_fcmp_one_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_one_f64:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    ucomisd %xmm1, %xmm0
 ; SSE-NEXT:    jne LBB1_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm3, %xmm2
 ; SSE-NEXT:  LBB1_2:
 ; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_one_f64:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vcmpneq_oqsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
@@ -49,19 +49,19 @@ define double @select_fcmp_one_f64(doubl
 
 define float @select_icmp_eq_f32(i64 %a, i64 %b, float %c, float %d) {
 ; SSE-LABEL: select_icmp_eq_f32:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpq %rsi, %rdi
 ; SSE-NEXT:    je LBB2_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:  LBB2_2:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_icmp_eq_f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    cmpq %rsi, %rdi
 ; AVX-NEXT:    je LBB2_2
-; AVX-NEXT:  ## BB#1:
+; AVX-NEXT:  ## %bb.1:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:  LBB2_2:
 ; AVX-NEXT:    retq
@@ -72,19 +72,19 @@ define float @select_icmp_eq_f32(i64 %a,
 
 define float @select_icmp_ne_f32(i64 %a, i64 %b, float %c, float %d) {
 ; SSE-LABEL: select_icmp_ne_f32:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpq %rsi, %rdi
 ; SSE-NEXT:    jne LBB3_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:  LBB3_2:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_icmp_ne_f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    cmpq %rsi, %rdi
 ; AVX-NEXT:    jne LBB3_2
-; AVX-NEXT:  ## BB#1:
+; AVX-NEXT:  ## %bb.1:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:  LBB3_2:
 ; AVX-NEXT:    retq
@@ -95,19 +95,19 @@ define float @select_icmp_ne_f32(i64 %a,
 
 define float @select_icmp_ugt_f32(i64 %a, i64 %b, float %c, float %d) {
 ; SSE-LABEL: select_icmp_ugt_f32:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpq %rsi, %rdi
 ; SSE-NEXT:    ja LBB4_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:  LBB4_2:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_icmp_ugt_f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    cmpq %rsi, %rdi
 ; AVX-NEXT:    ja LBB4_2
-; AVX-NEXT:  ## BB#1:
+; AVX-NEXT:  ## %bb.1:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:  LBB4_2:
 ; AVX-NEXT:    retq
@@ -118,19 +118,19 @@ define float @select_icmp_ugt_f32(i64 %a
 
 define float @select_icmp_uge_f32(i64 %a, i64 %b, float %c, float %d) {
 ; SSE-LABEL: select_icmp_uge_f32:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpq %rsi, %rdi
 ; SSE-NEXT:    jae LBB5_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:  LBB5_2:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_icmp_uge_f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    cmpq %rsi, %rdi
 ; AVX-NEXT:    jae LBB5_2
-; AVX-NEXT:  ## BB#1:
+; AVX-NEXT:  ## %bb.1:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:  LBB5_2:
 ; AVX-NEXT:    retq
@@ -141,19 +141,19 @@ define float @select_icmp_uge_f32(i64 %a
 
 define float @select_icmp_ult_f32(i64 %a, i64 %b, float %c, float %d) {
 ; SSE-LABEL: select_icmp_ult_f32:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpq %rsi, %rdi
 ; SSE-NEXT:    jb LBB6_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:  LBB6_2:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_icmp_ult_f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    cmpq %rsi, %rdi
 ; AVX-NEXT:    jb LBB6_2
-; AVX-NEXT:  ## BB#1:
+; AVX-NEXT:  ## %bb.1:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:  LBB6_2:
 ; AVX-NEXT:    retq
@@ -164,19 +164,19 @@ define float @select_icmp_ult_f32(i64 %a
 
 define float @select_icmp_ule_f32(i64 %a, i64 %b, float %c, float %d) {
 ; SSE-LABEL: select_icmp_ule_f32:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpq %rsi, %rdi
 ; SSE-NEXT:    jbe LBB7_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:  LBB7_2:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_icmp_ule_f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    cmpq %rsi, %rdi
 ; AVX-NEXT:    jbe LBB7_2
-; AVX-NEXT:  ## BB#1:
+; AVX-NEXT:  ## %bb.1:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:  LBB7_2:
 ; AVX-NEXT:    retq
@@ -187,19 +187,19 @@ define float @select_icmp_ule_f32(i64 %a
 
 define float @select_icmp_sgt_f32(i64 %a, i64 %b, float %c, float %d) {
 ; SSE-LABEL: select_icmp_sgt_f32:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpq %rsi, %rdi
 ; SSE-NEXT:    jg LBB8_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:  LBB8_2:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_icmp_sgt_f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    cmpq %rsi, %rdi
 ; AVX-NEXT:    jg LBB8_2
-; AVX-NEXT:  ## BB#1:
+; AVX-NEXT:  ## %bb.1:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:  LBB8_2:
 ; AVX-NEXT:    retq
@@ -210,19 +210,19 @@ define float @select_icmp_sgt_f32(i64 %a
 
 define float @select_icmp_sge_f32(i64 %a, i64 %b, float %c, float %d) {
 ; SSE-LABEL: select_icmp_sge_f32:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpq %rsi, %rdi
 ; SSE-NEXT:    jge LBB9_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:  LBB9_2:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_icmp_sge_f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    cmpq %rsi, %rdi
 ; AVX-NEXT:    jge LBB9_2
-; AVX-NEXT:  ## BB#1:
+; AVX-NEXT:  ## %bb.1:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:  LBB9_2:
 ; AVX-NEXT:    retq
@@ -233,19 +233,19 @@ define float @select_icmp_sge_f32(i64 %a
 
 define float @select_icmp_slt_f32(i64 %a, i64 %b, float %c, float %d) {
 ; SSE-LABEL: select_icmp_slt_f32:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpq %rsi, %rdi
 ; SSE-NEXT:    jl LBB10_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:  LBB10_2:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_icmp_slt_f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    cmpq %rsi, %rdi
 ; AVX-NEXT:    jl LBB10_2
-; AVX-NEXT:  ## BB#1:
+; AVX-NEXT:  ## %bb.1:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:  LBB10_2:
 ; AVX-NEXT:    retq
@@ -256,19 +256,19 @@ define float @select_icmp_slt_f32(i64 %a
 
 define float @select_icmp_sle_f32(i64 %a, i64 %b, float %c, float %d) {
 ; SSE-LABEL: select_icmp_sle_f32:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpq %rsi, %rdi
 ; SSE-NEXT:    jle LBB11_2
-; SSE-NEXT:  ## BB#1:
+; SSE-NEXT:  ## %bb.1:
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:  LBB11_2:
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_icmp_sle_f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    cmpq %rsi, %rdi
 ; AVX-NEXT:    jle LBB11_2
-; AVX-NEXT:  ## BB#1:
+; AVX-NEXT:  ## %bb.1:
 ; AVX-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX-NEXT:  LBB11_2:
 ; AVX-NEXT:    retq
@@ -279,10 +279,10 @@ define float @select_icmp_sle_f32(i64 %a
 
 define i8 @select_icmp_sle_i8(i64 %a, i64 %b, i8 %c, i8 %d) {
 ; CHECK-LABEL: select_icmp_sle_i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    jle LBB12_2
-; CHECK-NEXT:  ## BB#1:
+; CHECK-NEXT:  ## %bb.1:
 ; CHECK-NEXT:    movl %ecx, %edx
 ; CHECK-NEXT:  LBB12_2:
 ; CHECK-NEXT:    movl %edx, %eax

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_oeq_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpeqss %xmm1, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm0
@@ -18,13 +18,13 @@ define float @select_fcmp_oeq_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_oeq_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpeqss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_oeq_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpeqss %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -36,7 +36,7 @@ define float @select_fcmp_oeq_f32(float
 
 define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_oeq_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpeqsd %xmm1, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm0
@@ -44,13 +44,13 @@ define double @select_fcmp_oeq_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_oeq_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpeqsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_oeq_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpeqsd %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0
@@ -62,7 +62,7 @@ define double @select_fcmp_oeq_f64(doubl
 
 define float @select_fcmp_ogt_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_ogt_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpltss %xmm0, %xmm1
 ; SSE-NEXT:    andps %xmm1, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm1
@@ -71,13 +71,13 @@ define float @select_fcmp_ogt_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ogt_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpltss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ogt_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpltss %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -89,7 +89,7 @@ define float @select_fcmp_ogt_f32(float
 
 define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_ogt_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpltsd %xmm0, %xmm1
 ; SSE-NEXT:    andpd %xmm1, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm1
@@ -98,13 +98,13 @@ define double @select_fcmp_ogt_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ogt_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpltsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ogt_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpltsd %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0
@@ -116,7 +116,7 @@ define double @select_fcmp_ogt_f64(doubl
 
 define float @select_fcmp_oge_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_oge_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpless %xmm0, %xmm1
 ; SSE-NEXT:    andps %xmm1, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm1
@@ -125,13 +125,13 @@ define float @select_fcmp_oge_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_oge_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpless %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_oge_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpless %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -143,7 +143,7 @@ define float @select_fcmp_oge_f32(float
 
 define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_oge_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmplesd %xmm0, %xmm1
 ; SSE-NEXT:    andpd %xmm1, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm1
@@ -152,13 +152,13 @@ define double @select_fcmp_oge_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_oge_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmplesd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_oge_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmplesd %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0
@@ -170,7 +170,7 @@ define double @select_fcmp_oge_f64(doubl
 
 define float @select_fcmp_olt_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_olt_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpltss %xmm1, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm0
@@ -178,13 +178,13 @@ define float @select_fcmp_olt_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_olt_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpltss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_olt_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpltss %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -196,7 +196,7 @@ define float @select_fcmp_olt_f32(float
 
 define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_olt_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpltsd %xmm1, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm0
@@ -204,13 +204,13 @@ define double @select_fcmp_olt_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_olt_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpltsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_olt_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpltsd %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0
@@ -222,7 +222,7 @@ define double @select_fcmp_olt_f64(doubl
 
 define float @select_fcmp_ole_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_ole_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpless %xmm1, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm0
@@ -230,13 +230,13 @@ define float @select_fcmp_ole_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ole_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpless %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ole_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpless %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -248,7 +248,7 @@ define float @select_fcmp_ole_f32(float
 
 define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_ole_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmplesd %xmm1, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm0
@@ -256,13 +256,13 @@ define double @select_fcmp_ole_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ole_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmplesd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ole_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmplesd %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0
@@ -274,7 +274,7 @@ define double @select_fcmp_ole_f64(doubl
 
 define float @select_fcmp_ord_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_ord_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpordss %xmm1, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm0
@@ -282,13 +282,13 @@ define float @select_fcmp_ord_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ord_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpordss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ord_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpordss %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -300,7 +300,7 @@ define float @select_fcmp_ord_f32(float
 
 define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_ord_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpordsd %xmm1, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm0
@@ -308,13 +308,13 @@ define double @select_fcmp_ord_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ord_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpordsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ord_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpordsd %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0
@@ -326,7 +326,7 @@ define double @select_fcmp_ord_f64(doubl
 
 define float @select_fcmp_uno_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_uno_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpunordss %xmm1, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm0
@@ -334,13 +334,13 @@ define float @select_fcmp_uno_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_uno_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpunordss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_uno_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpunordss %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -352,7 +352,7 @@ define float @select_fcmp_uno_f32(float
 
 define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_uno_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpunordsd %xmm1, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm0
@@ -360,13 +360,13 @@ define double @select_fcmp_uno_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_uno_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpunordsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_uno_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpunordsd %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0
@@ -378,7 +378,7 @@ define double @select_fcmp_uno_f64(doubl
 
 define float @select_fcmp_ugt_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_ugt_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpnless %xmm1, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm0
@@ -386,13 +386,13 @@ define float @select_fcmp_ugt_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ugt_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpnless %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ugt_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpnless %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -404,7 +404,7 @@ define float @select_fcmp_ugt_f32(float
 
 define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_ugt_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpnlesd %xmm1, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm0
@@ -412,13 +412,13 @@ define double @select_fcmp_ugt_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ugt_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpnlesd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ugt_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpnlesd %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0
@@ -430,7 +430,7 @@ define double @select_fcmp_ugt_f64(doubl
 
 define float @select_fcmp_uge_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_uge_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpnltss %xmm1, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm0
@@ -438,13 +438,13 @@ define float @select_fcmp_uge_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_uge_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpnltss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_uge_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpnltss %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -456,7 +456,7 @@ define float @select_fcmp_uge_f32(float
 
 define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_uge_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpnltsd %xmm1, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm0
@@ -464,13 +464,13 @@ define double @select_fcmp_uge_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_uge_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpnltsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_uge_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpnltsd %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0
@@ -482,7 +482,7 @@ define double @select_fcmp_uge_f64(doubl
 
 define float @select_fcmp_ult_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_ult_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpnless %xmm0, %xmm1
 ; SSE-NEXT:    andps %xmm1, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm1
@@ -491,13 +491,13 @@ define float @select_fcmp_ult_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ult_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpnless %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ult_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpnless %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -509,7 +509,7 @@ define float @select_fcmp_ult_f32(float
 
 define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_ult_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpnlesd %xmm0, %xmm1
 ; SSE-NEXT:    andpd %xmm1, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm1
@@ -518,13 +518,13 @@ define double @select_fcmp_ult_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ult_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpnlesd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ult_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpnlesd %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0
@@ -536,7 +536,7 @@ define double @select_fcmp_ult_f64(doubl
 
 define float @select_fcmp_ule_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_ule_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpnltss %xmm0, %xmm1
 ; SSE-NEXT:    andps %xmm1, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm1
@@ -545,13 +545,13 @@ define float @select_fcmp_ule_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ule_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpnltss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ule_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpnltss %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -563,7 +563,7 @@ define float @select_fcmp_ule_f32(float
 
 define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_ule_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpnltsd %xmm0, %xmm1
 ; SSE-NEXT:    andpd %xmm1, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm1
@@ -572,13 +572,13 @@ define double @select_fcmp_ule_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ule_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpnltsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_ule_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpnltsd %xmm0, %xmm1, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0
@@ -590,7 +590,7 @@ define double @select_fcmp_ule_f64(doubl
 
 define float @select_fcmp_une_f32(float %a, float %b, float %c, float %d) {
 ; SSE-LABEL: select_fcmp_une_f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpneqss %xmm1, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm2
 ; SSE-NEXT:    andnps %xmm3, %xmm0
@@ -598,13 +598,13 @@ define float @select_fcmp_une_f32(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_une_f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpneqss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_une_f32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpneqss %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovss %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovaps %xmm3, %xmm0
@@ -616,7 +616,7 @@ define float @select_fcmp_une_f32(float
 
 define double @select_fcmp_une_f64(double %a, double %b, double %c, double %d) {
 ; SSE-LABEL: select_fcmp_une_f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    cmpneqsd %xmm1, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm2
 ; SSE-NEXT:    andnpd %xmm3, %xmm0
@@ -624,13 +624,13 @@ define double @select_fcmp_une_f64(doubl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_une_f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpneqsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: select_fcmp_une_f64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vcmpneqsd %xmm1, %xmm0, %k1
 ; AVX512-NEXT:    vmovsd %xmm2, %xmm0, %xmm3 {%k1}
 ; AVX512-NEXT:    vmovapd %xmm3, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-sext-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-sext-zext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-sext-zext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-sext-zext.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i8 @test1(i8 %x) nounwind {
 ; X32-LABEL: test1:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $1, %al
 ; X32-NEXT:    negb %al
@@ -12,7 +12,7 @@ define i8 @test1(i8 %x) nounwind {
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test1:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    andb $1, %dil
 ; X64-NEXT:    negb %dil
 ; X64-NEXT:    movl %edi, %eax
@@ -25,7 +25,7 @@ define i8 @test1(i8 %x) nounwind {
 
 define i16 @test2(i16 %x) nounwind {
 ; X32-LABEL: test2:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andb $1, %al
 ; X32-NEXT:    negb %al
@@ -35,7 +35,7 @@ define i16 @test2(i16 %x) nounwind {
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test2:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    andb $1, %dil
 ; X64-NEXT:    negb %dil
 ; X64-NEXT:    movsbl %dil, %eax
@@ -49,7 +49,7 @@ define i16 @test2(i16 %x) nounwind {
 
 define i32 @test3(i32 %x) nounwind {
 ; X32-LABEL: test3:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andb $1, %al
 ; X32-NEXT:    negb %al
@@ -58,7 +58,7 @@ define i32 @test3(i32 %x) nounwind {
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test3:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    andb $1, %dil
 ; X64-NEXT:    negb %dil
 ; X64-NEXT:    movsbl %dil, %eax
@@ -71,7 +71,7 @@ define i32 @test3(i32 %x) nounwind {
 
 define i32 @test4(i32 %x) nounwind {
 ; X32-LABEL: test4:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andb $1, %al
 ; X32-NEXT:    negb %al
@@ -80,7 +80,7 @@ define i32 @test4(i32 %x) nounwind {
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test4:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    andb $1, %dil
 ; X64-NEXT:    negb %dil
 ; X64-NEXT:    movsbl %dil, %eax
@@ -93,14 +93,14 @@ define i32 @test4(i32 %x) nounwind {
 
 define i8 @test5(i8 %x) nounwind {
 ; X32-LABEL: test5:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $1, %al
 ; X32-NEXT:    retl
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test5:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    andb $1, %dil
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
@@ -112,7 +112,7 @@ define i8 @test5(i8 %x) nounwind {
 
 define i16 @test6(i16 %x) nounwind {
 ; X32-LABEL: test6:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andb $1, %al
 ; X32-NEXT:    movzbl %al, %eax
@@ -121,7 +121,7 @@ define i16 @test6(i16 %x) nounwind {
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test6:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    andb $1, %dil
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -134,7 +134,7 @@ define i16 @test6(i16 %x) nounwind {
 
 define i32 @test7(i32 %x) nounwind {
 ; X32-LABEL: test7:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andb $1, %al
 ; X32-NEXT:    movzbl %al, %eax
@@ -142,7 +142,7 @@ define i32 @test7(i32 %x) nounwind {
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test7:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    andb $1, %dil
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    retq
@@ -154,7 +154,7 @@ define i32 @test7(i32 %x) nounwind {
 
 define i32 @test8(i32 %x) nounwind {
 ; X32-LABEL: test8:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andb $1, %al
 ; X32-NEXT:    movzbl %al, %eax
@@ -162,7 +162,7 @@ define i32 @test8(i32 %x) nounwind {
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test8:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    andb $1, %dil
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    retq
@@ -174,14 +174,14 @@ define i32 @test8(i32 %x) nounwind {
 
 define i16 @test9(i8 %x) nounwind {
 ; X32-LABEL: test9:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
 ; X32-NEXT:    retl
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test9:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movsbl %dil, %eax
 ; X64-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
 ; X64-NEXT:    retq
@@ -192,13 +192,13 @@ define i16 @test9(i8 %x) nounwind {
 
 define i32 @test10(i8 %x) nounwind {
 ; X32-LABEL: test10:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test10:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movsbl %dil, %eax
 ; X64-NEXT:    retq
 ; X64-NEXT:    ## -- End function
@@ -208,7 +208,7 @@ define i32 @test10(i8 %x) nounwind {
 
 define i64 @test11(i8 %x) nounwind {
 ; X32-LABEL: test11:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl %eax, %edx
 ; X32-NEXT:    sarl $31, %edx
@@ -216,7 +216,7 @@ define i64 @test11(i8 %x) nounwind {
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test11:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movsbq %dil, %rax
 ; X64-NEXT:    retq
 ; X64-NEXT:    ## -- End function
@@ -226,14 +226,14 @@ define i64 @test11(i8 %x) nounwind {
 
 define i16 @test12(i8 %x) nounwind {
 ; X32-LABEL: test12:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
 ; X32-NEXT:    retl
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test12:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
 ; X64-NEXT:    retq
@@ -244,13 +244,13 @@ define i16 @test12(i8 %x) nounwind {
 
 define i32 @test13(i8 %x) nounwind {
 ; X32-LABEL: test13:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test13:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    retq
 ; X64-NEXT:    ## -- End function
@@ -260,14 +260,14 @@ define i32 @test13(i8 %x) nounwind {
 
 define i64 @test14(i8 %x) nounwind {
 ; X32-LABEL: test14:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    xorl %edx, %edx
 ; X32-NEXT:    retl
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test14:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    retq
 ; X64-NEXT:    ## -- End function
@@ -277,13 +277,13 @@ define i64 @test14(i8 %x) nounwind {
 
 define i32 @test15(i16 %x) nounwind {
 ; X32-LABEL: test15:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movswl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test15:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movswl %di, %eax
 ; X64-NEXT:    retq
 ; X64-NEXT:    ## -- End function
@@ -293,7 +293,7 @@ define i32 @test15(i16 %x) nounwind {
 
 define i64 @test16(i16 %x) nounwind {
 ; X32-LABEL: test16:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movswl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl %eax, %edx
 ; X32-NEXT:    sarl $31, %edx
@@ -301,7 +301,7 @@ define i64 @test16(i16 %x) nounwind {
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test16:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movswq %di, %rax
 ; X64-NEXT:    retq
 ; X64-NEXT:    ## -- End function
@@ -311,13 +311,13 @@ define i64 @test16(i16 %x) nounwind {
 
 define i32 @test17(i16 %x) nounwind {
 ; X32-LABEL: test17:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test17:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movzwl %di, %eax
 ; X64-NEXT:    retq
 ; X64-NEXT:    ## -- End function
@@ -327,14 +327,14 @@ define i32 @test17(i16 %x) nounwind {
 
 define i64 @test18(i16 %x) nounwind {
 ; X32-LABEL: test18:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    xorl %edx, %edx
 ; X32-NEXT:    retl
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test18:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movzwl %di, %eax
 ; X64-NEXT:    retq
 ; X64-NEXT:    ## -- End function
@@ -344,7 +344,7 @@ define i64 @test18(i16 %x) nounwind {
 
 define i64 @test19(i32 %x) nounwind {
 ; X32-LABEL: test19:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl %eax, %edx
 ; X32-NEXT:    sarl $31, %edx
@@ -352,7 +352,7 @@ define i64 @test19(i32 %x) nounwind {
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test19:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movslq %edi, %rax
 ; X64-NEXT:    retq
 ; X64-NEXT:    ## -- End function
@@ -362,14 +362,14 @@ define i64 @test19(i32 %x) nounwind {
 
 define i64 @test20(i32 %x) nounwind {
 ; X32-LABEL: test20:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    xorl %edx, %edx
 ; X32-NEXT:    retl
 ; X32-NEXT:    ## -- End function
 ;
 ; X64-LABEL: test20:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
 ; X64-NEXT:    ## -- End function

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-shift.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-shift.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i8 @shl_i8(i8 %a, i8 %b) {
 ; CHECK-LABEL: shl_i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    shlb %cl, %dil
 ; CHECK-NEXT:    movl %edi, %eax
@@ -14,7 +14,7 @@ define i8 @shl_i8(i8 %a, i8 %b) {
 
 define i16 @shl_i16(i16 %a, i16 %b) {
 ; CHECK-LABEL: shl_i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    ## kill: %cl<def> %cx<kill>
 ; CHECK-NEXT:    shlw %cl, %di
@@ -26,7 +26,7 @@ define i16 @shl_i16(i16 %a, i16 %b) {
 
 define i32 @shl_i32(i32 %a, i32 %b) {
 ; CHECK-LABEL: shl_i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    ## kill: %cl<def> %ecx<kill>
 ; CHECK-NEXT:    shll %cl, %edi
@@ -38,7 +38,7 @@ define i32 @shl_i32(i32 %a, i32 %b) {
 
 define i64 @shl_i64(i64 %a, i64 %b) {
 ; CHECK-LABEL: shl_i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rcx
 ; CHECK-NEXT:    ## kill: %cl<def> %rcx<kill>
 ; CHECK-NEXT:    shlq %cl, %rdi
@@ -50,7 +50,7 @@ define i64 @shl_i64(i64 %a, i64 %b) {
 
 define i8 @lshr_i8(i8 %a, i8 %b) {
 ; CHECK-LABEL: lshr_i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    shrb %cl, %dil
 ; CHECK-NEXT:    movl %edi, %eax
@@ -61,7 +61,7 @@ define i8 @lshr_i8(i8 %a, i8 %b) {
 
 define i16 @lshr_i16(i16 %a, i16 %b) {
 ; CHECK-LABEL: lshr_i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    ## kill: %cl<def> %cx<kill>
 ; CHECK-NEXT:    shrw %cl, %di
@@ -73,7 +73,7 @@ define i16 @lshr_i16(i16 %a, i16 %b) {
 
 define i32 @lshr_i32(i32 %a, i32 %b) {
 ; CHECK-LABEL: lshr_i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    ## kill: %cl<def> %ecx<kill>
 ; CHECK-NEXT:    shrl %cl, %edi
@@ -85,7 +85,7 @@ define i32 @lshr_i32(i32 %a, i32 %b) {
 
 define i64 @lshr_i64(i64 %a, i64 %b) {
 ; CHECK-LABEL: lshr_i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rcx
 ; CHECK-NEXT:    ## kill: %cl<def> %rcx<kill>
 ; CHECK-NEXT:    shrq %cl, %rdi
@@ -97,7 +97,7 @@ define i64 @lshr_i64(i64 %a, i64 %b) {
 
 define i8 @ashr_i8(i8 %a, i8 %b) {
 ; CHECK-LABEL: ashr_i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    sarb %cl, %dil
 ; CHECK-NEXT:    movl %edi, %eax
@@ -108,7 +108,7 @@ define i8 @ashr_i8(i8 %a, i8 %b) {
 
 define i16 @ashr_i16(i16 %a, i16 %b) {
 ; CHECK-LABEL: ashr_i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    ## kill: %cl<def> %cx<kill>
 ; CHECK-NEXT:    sarw %cl, %di
@@ -120,7 +120,7 @@ define i16 @ashr_i16(i16 %a, i16 %b) {
 
 define i32 @ashr_i32(i32 %a, i32 %b) {
 ; CHECK-LABEL: ashr_i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    ## kill: %cl<def> %ecx<kill>
 ; CHECK-NEXT:    sarl %cl, %edi
@@ -132,7 +132,7 @@ define i32 @ashr_i32(i32 %a, i32 %b) {
 
 define i64 @ashr_i64(i64 %a, i64 %b) {
 ; CHECK-LABEL: ashr_i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq %rsi, %rcx
 ; CHECK-NEXT:    ## kill: %cl<def> %rcx<kill>
 ; CHECK-NEXT:    sarq %cl, %rdi
@@ -144,7 +144,7 @@ define i64 @ashr_i64(i64 %a, i64 %b) {
 
 define i8 @shl_imm1_i8(i8 %a) {
 ; CHECK-LABEL: shl_imm1_i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shlb $1, %dil
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -154,7 +154,7 @@ define i8 @shl_imm1_i8(i8 %a) {
 
 define i16 @shl_imm1_i16(i16 %a) {
 ; CHECK-LABEL: shl_imm1_i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    leal (,%rdi,2), %eax
 ; CHECK-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -165,7 +165,7 @@ define i16 @shl_imm1_i16(i16 %a) {
 
 define i32 @shl_imm1_i32(i32 %a) {
 ; CHECK-LABEL: shl_imm1_i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    leal (,%rdi,2), %eax
 ; CHECK-NEXT:    retq
@@ -175,7 +175,7 @@ define i32 @shl_imm1_i32(i32 %a) {
 
 define i64 @shl_imm1_i64(i64 %a) {
 ; CHECK-LABEL: shl_imm1_i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    leaq (,%rdi,2), %rax
 ; CHECK-NEXT:    retq
   %c = shl i64 %a, 1
@@ -184,7 +184,7 @@ define i64 @shl_imm1_i64(i64 %a) {
 
 define i8 @lshr_imm1_i8(i8 %a) {
 ; CHECK-LABEL: lshr_imm1_i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shrb $1, %dil
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -194,7 +194,7 @@ define i8 @lshr_imm1_i8(i8 %a) {
 
 define i16 @lshr_imm1_i16(i16 %a) {
 ; CHECK-LABEL: lshr_imm1_i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shrw $1, %di
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -204,7 +204,7 @@ define i16 @lshr_imm1_i16(i16 %a) {
 
 define i32 @lshr_imm1_i32(i32 %a) {
 ; CHECK-LABEL: lshr_imm1_i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shrl $1, %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -214,7 +214,7 @@ define i32 @lshr_imm1_i32(i32 %a) {
 
 define i64 @lshr_imm1_i64(i64 %a) {
 ; CHECK-LABEL: lshr_imm1_i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shrq $1, %rdi
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    retq
@@ -224,7 +224,7 @@ define i64 @lshr_imm1_i64(i64 %a) {
 
 define i8 @ashr_imm1_i8(i8 %a) {
 ; CHECK-LABEL: ashr_imm1_i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    sarb $1, %dil
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -234,7 +234,7 @@ define i8 @ashr_imm1_i8(i8 %a) {
 
 define i16 @ashr_imm1_i16(i16 %a) {
 ; CHECK-LABEL: ashr_imm1_i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    sarw $1, %di
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -244,7 +244,7 @@ define i16 @ashr_imm1_i16(i16 %a) {
 
 define i32 @ashr_imm1_i32(i32 %a) {
 ; CHECK-LABEL: ashr_imm1_i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    sarl $1, %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -254,7 +254,7 @@ define i32 @ashr_imm1_i32(i32 %a) {
 
 define i64 @ashr_imm1_i64(i64 %a) {
 ; CHECK-LABEL: ashr_imm1_i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    sarq $1, %rdi
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    retq
@@ -264,7 +264,7 @@ define i64 @ashr_imm1_i64(i64 %a) {
 
 define i8 @shl_imm4_i8(i8 %a) {
 ; CHECK-LABEL: shl_imm4_i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shlb $4, %dil
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -274,7 +274,7 @@ define i8 @shl_imm4_i8(i8 %a) {
 
 define i16 @shl_imm4_i16(i16 %a) {
 ; CHECK-LABEL: shl_imm4_i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shlw $4, %di
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -284,7 +284,7 @@ define i16 @shl_imm4_i16(i16 %a) {
 
 define i32 @shl_imm4_i32(i32 %a) {
 ; CHECK-LABEL: shl_imm4_i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shll $4, %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -294,7 +294,7 @@ define i32 @shl_imm4_i32(i32 %a) {
 
 define i64 @shl_imm4_i64(i64 %a) {
 ; CHECK-LABEL: shl_imm4_i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shlq $4, %rdi
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    retq
@@ -304,7 +304,7 @@ define i64 @shl_imm4_i64(i64 %a) {
 
 define i8 @lshr_imm4_i8(i8 %a) {
 ; CHECK-LABEL: lshr_imm4_i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shrb $4, %dil
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -314,7 +314,7 @@ define i8 @lshr_imm4_i8(i8 %a) {
 
 define i16 @lshr_imm4_i16(i16 %a) {
 ; CHECK-LABEL: lshr_imm4_i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shrw $4, %di
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -324,7 +324,7 @@ define i16 @lshr_imm4_i16(i16 %a) {
 
 define i32 @lshr_imm4_i32(i32 %a) {
 ; CHECK-LABEL: lshr_imm4_i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shrl $4, %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -334,7 +334,7 @@ define i32 @lshr_imm4_i32(i32 %a) {
 
 define i64 @lshr_imm4_i64(i64 %a) {
 ; CHECK-LABEL: lshr_imm4_i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    shrq $4, %rdi
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    retq
@@ -344,7 +344,7 @@ define i64 @lshr_imm4_i64(i64 %a) {
 
 define i8 @ashr_imm4_i8(i8 %a) {
 ; CHECK-LABEL: ashr_imm4_i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    sarb $4, %dil
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -354,7 +354,7 @@ define i8 @ashr_imm4_i8(i8 %a) {
 
 define i16 @ashr_imm4_i16(i16 %a) {
 ; CHECK-LABEL: ashr_imm4_i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    sarw $4, %di
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -364,7 +364,7 @@ define i16 @ashr_imm4_i16(i16 %a) {
 
 define i32 @ashr_imm4_i32(i32 %a) {
 ; CHECK-LABEL: ashr_imm4_i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    sarl $4, %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -374,7 +374,7 @@ define i32 @ashr_imm4_i32(i32 %a) {
 
 define i64 @ashr_imm4_i64(i64 %a) {
 ; CHECK-LABEL: ashr_imm4_i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    sarq $4, %rdi
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-store.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-store.ll Mon Dec  4 09:18:51 2017
@@ -10,13 +10,13 @@
 
 define i32 @test_store_32(i32* nocapture %addr, i32 %value) {
 ; ALL32-LABEL: test_store_32:
-; ALL32:       # BB#0: # %entry
+; ALL32:       # %bb.0: # %entry
 ; ALL32-NEXT:    movl %esi, (%rdi)
 ; ALL32-NEXT:    movl %esi, %eax
 ; ALL32-NEXT:    retq
 ;
 ; ALL64-LABEL: test_store_32:
-; ALL64:       # BB#0: # %entry
+; ALL64:       # %bb.0: # %entry
 ; ALL64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; ALL64-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; ALL64-NEXT:    movl %eax, (%ecx)
@@ -28,13 +28,13 @@ entry:
 
 define i16 @test_store_16(i16* nocapture %addr, i16 %value) {
 ; ALL32-LABEL: test_store_16:
-; ALL32:       # BB#0: # %entry
+; ALL32:       # %bb.0: # %entry
 ; ALL32-NEXT:    movw %si, (%rdi)
 ; ALL32-NEXT:    movl %esi, %eax
 ; ALL32-NEXT:    retq
 ;
 ; ALL64-LABEL: test_store_16:
-; ALL64:       # BB#0: # %entry
+; ALL64:       # %bb.0: # %entry
 ; ALL64-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; ALL64-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; ALL64-NEXT:    movw %ax, (%ecx)
@@ -46,39 +46,39 @@ entry:
 
 define <4 x i32> @test_store_4xi32(<4 x i32>* nocapture %addr, <4 x i32> %value, <4 x i32> %value2) {
 ; SSE32-LABEL: test_store_4xi32:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    paddd %xmm1, %xmm0
 ; SSE32-NEXT:    movdqu %xmm0, (%rdi)
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_4xi32:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE64-NEXT:    paddd %xmm1, %xmm0
 ; SSE64-NEXT:    movdqu %xmm0, (%eax)
 ; SSE64-NEXT:    retl
 ;
 ; AVXONLY32-LABEL: test_store_4xi32:
-; AVXONLY32:       # BB#0:
+; AVXONLY32:       # %bb.0:
 ; AVXONLY32-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVXONLY32-NEXT:    vmovdqu %xmm0, (%rdi)
 ; AVXONLY32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_4xi32:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX64-NEXT:    vmovdqu %xmm0, (%eax)
 ; AVX64-NEXT:    retl
 ;
 ; KNL32-LABEL: test_store_4xi32:
-; KNL32:       # BB#0:
+; KNL32:       # %bb.0:
 ; KNL32-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; KNL32-NEXT:    vmovdqu %xmm0, (%rdi)
 ; KNL32-NEXT:    retq
 ;
 ; SKX32-LABEL: test_store_4xi32:
-; SKX32:       # BB#0:
+; SKX32:       # %bb.0:
 ; SKX32-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; SKX32-NEXT:    vmovdqu %xmm0, (%rdi)
 ; SKX32-NEXT:    retq
@@ -89,39 +89,39 @@ define <4 x i32> @test_store_4xi32(<4 x
 
 define <4 x i32> @test_store_4xi32_aligned(<4 x i32>* nocapture %addr, <4 x i32> %value, <4 x i32> %value2) {
 ; SSE32-LABEL: test_store_4xi32_aligned:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    paddd %xmm1, %xmm0
 ; SSE32-NEXT:    movdqa %xmm0, (%rdi)
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_4xi32_aligned:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE64-NEXT:    paddd %xmm1, %xmm0
 ; SSE64-NEXT:    movdqa %xmm0, (%eax)
 ; SSE64-NEXT:    retl
 ;
 ; AVXONLY32-LABEL: test_store_4xi32_aligned:
-; AVXONLY32:       # BB#0:
+; AVXONLY32:       # %bb.0:
 ; AVXONLY32-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVXONLY32-NEXT:    vmovdqa %xmm0, (%rdi)
 ; AVXONLY32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_4xi32_aligned:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX64-NEXT:    vmovdqa %xmm0, (%eax)
 ; AVX64-NEXT:    retl
 ;
 ; KNL32-LABEL: test_store_4xi32_aligned:
-; KNL32:       # BB#0:
+; KNL32:       # %bb.0:
 ; KNL32-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; KNL32-NEXT:    vmovdqa %xmm0, (%rdi)
 ; KNL32-NEXT:    retq
 ;
 ; SKX32-LABEL: test_store_4xi32_aligned:
-; SKX32:       # BB#0:
+; SKX32:       # %bb.0:
 ; SKX32-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; SKX32-NEXT:    vmovdqa %xmm0, (%rdi)
 ; SKX32-NEXT:    retq
@@ -132,23 +132,23 @@ define <4 x i32> @test_store_4xi32_align
 
 define <4 x float> @test_store_4xf32(<4 x float>* nocapture %addr, <4 x float> %value) {
 ; SSE32-LABEL: test_store_4xf32:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    movups %xmm0, (%rdi)
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_4xf32:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE64-NEXT:    movups %xmm0, (%eax)
 ; SSE64-NEXT:    retl
 ;
 ; AVX32-LABEL: test_store_4xf32:
-; AVX32:       # BB#0:
+; AVX32:       # %bb.0:
 ; AVX32-NEXT:    vmovups %xmm0, (%rdi)
 ; AVX32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_4xf32:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vmovups %xmm0, (%eax)
 ; AVX64-NEXT:    retl
@@ -158,23 +158,23 @@ define <4 x float> @test_store_4xf32(<4
 
 define <4 x float> @test_store_4xf32_aligned(<4 x float>* nocapture %addr, <4 x float> %value) {
 ; SSE32-LABEL: test_store_4xf32_aligned:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    movaps %xmm0, (%rdi)
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_4xf32_aligned:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE64-NEXT:    movaps %xmm0, (%eax)
 ; SSE64-NEXT:    retl
 ;
 ; AVX32-LABEL: test_store_4xf32_aligned:
-; AVX32:       # BB#0:
+; AVX32:       # %bb.0:
 ; AVX32-NEXT:    vmovaps %xmm0, (%rdi)
 ; AVX32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_4xf32_aligned:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vmovaps %xmm0, (%eax)
 ; AVX64-NEXT:    retl
@@ -184,26 +184,26 @@ define <4 x float> @test_store_4xf32_ali
 
 define <2 x double> @test_store_2xf64(<2 x double>* nocapture %addr, <2 x double> %value, <2 x double> %value2) {
 ; SSE32-LABEL: test_store_2xf64:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    addpd %xmm1, %xmm0
 ; SSE32-NEXT:    movupd %xmm0, (%rdi)
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_2xf64:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE64-NEXT:    addpd %xmm1, %xmm0
 ; SSE64-NEXT:    movupd %xmm0, (%eax)
 ; SSE64-NEXT:    retl
 ;
 ; AVX32-LABEL: test_store_2xf64:
-; AVX32:       # BB#0:
+; AVX32:       # %bb.0:
 ; AVX32-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 ; AVX32-NEXT:    vmovupd %xmm0, (%rdi)
 ; AVX32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_2xf64:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 ; AVX64-NEXT:    vmovupd %xmm0, (%eax)
@@ -215,26 +215,26 @@ define <2 x double> @test_store_2xf64(<2
 
 define <2 x double> @test_store_2xf64_aligned(<2 x double>* nocapture %addr, <2 x double> %value, <2 x double> %value2) {
 ; SSE32-LABEL: test_store_2xf64_aligned:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    addpd %xmm1, %xmm0
 ; SSE32-NEXT:    movapd %xmm0, (%rdi)
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_2xf64_aligned:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE64-NEXT:    addpd %xmm1, %xmm0
 ; SSE64-NEXT:    movapd %xmm0, (%eax)
 ; SSE64-NEXT:    retl
 ;
 ; AVX32-LABEL: test_store_2xf64_aligned:
-; AVX32:       # BB#0:
+; AVX32:       # %bb.0:
 ; AVX32-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 ; AVX32-NEXT:    vmovapd %xmm0, (%rdi)
 ; AVX32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_2xf64_aligned:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 ; AVX64-NEXT:    vmovapd %xmm0, (%eax)
@@ -246,25 +246,25 @@ define <2 x double> @test_store_2xf64_al
 
 define <8 x i32> @test_store_8xi32(<8 x i32>* nocapture %addr, <8 x i32> %value) {
 ; SSE32-LABEL: test_store_8xi32:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    movups %xmm0, (%rdi)
 ; SSE32-NEXT:    movups %xmm1, 16(%rdi)
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_8xi32:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE64-NEXT:    movups %xmm0, (%eax)
 ; SSE64-NEXT:    movups %xmm1, 16(%eax)
 ; SSE64-NEXT:    retl
 ;
 ; AVX32-LABEL: test_store_8xi32:
-; AVX32:       # BB#0:
+; AVX32:       # %bb.0:
 ; AVX32-NEXT:    vmovups %ymm0, (%rdi)
 ; AVX32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_8xi32:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vmovups %ymm0, (%eax)
 ; AVX64-NEXT:    retl
@@ -274,25 +274,25 @@ define <8 x i32> @test_store_8xi32(<8 x
 
 define <8 x i32> @test_store_8xi32_aligned(<8 x i32>* nocapture %addr, <8 x i32> %value) {
 ; SSE32-LABEL: test_store_8xi32_aligned:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    movaps %xmm0, (%rdi)
 ; SSE32-NEXT:    movaps %xmm1, 16(%rdi)
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_8xi32_aligned:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE64-NEXT:    movaps %xmm0, (%eax)
 ; SSE64-NEXT:    movaps %xmm1, 16(%eax)
 ; SSE64-NEXT:    retl
 ;
 ; AVX32-LABEL: test_store_8xi32_aligned:
-; AVX32:       # BB#0:
+; AVX32:       # %bb.0:
 ; AVX32-NEXT:    vmovaps %ymm0, (%rdi)
 ; AVX32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_8xi32_aligned:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vmovaps %ymm0, (%eax)
 ; AVX64-NEXT:    retl
@@ -302,25 +302,25 @@ define <8 x i32> @test_store_8xi32_align
 
 define <8 x float> @test_store_8xf32(<8 x float>* nocapture %addr, <8 x float> %value) {
 ; SSE32-LABEL: test_store_8xf32:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    movups %xmm0, (%rdi)
 ; SSE32-NEXT:    movups %xmm1, 16(%rdi)
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_8xf32:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE64-NEXT:    movups %xmm0, (%eax)
 ; SSE64-NEXT:    movups %xmm1, 16(%eax)
 ; SSE64-NEXT:    retl
 ;
 ; AVX32-LABEL: test_store_8xf32:
-; AVX32:       # BB#0:
+; AVX32:       # %bb.0:
 ; AVX32-NEXT:    vmovups %ymm0, (%rdi)
 ; AVX32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_8xf32:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vmovups %ymm0, (%eax)
 ; AVX64-NEXT:    retl
@@ -330,25 +330,25 @@ define <8 x float> @test_store_8xf32(<8
 
 define <8 x float> @test_store_8xf32_aligned(<8 x float>* nocapture %addr, <8 x float> %value) {
 ; SSE32-LABEL: test_store_8xf32_aligned:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    movaps %xmm0, (%rdi)
 ; SSE32-NEXT:    movaps %xmm1, 16(%rdi)
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_8xf32_aligned:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE64-NEXT:    movaps %xmm0, (%eax)
 ; SSE64-NEXT:    movaps %xmm1, 16(%eax)
 ; SSE64-NEXT:    retl
 ;
 ; AVX32-LABEL: test_store_8xf32_aligned:
-; AVX32:       # BB#0:
+; AVX32:       # %bb.0:
 ; AVX32-NEXT:    vmovaps %ymm0, (%rdi)
 ; AVX32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_8xf32_aligned:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vmovaps %ymm0, (%eax)
 ; AVX64-NEXT:    retl
@@ -358,7 +358,7 @@ define <8 x float> @test_store_8xf32_ali
 
 define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double> %value, <4 x double> %value2) {
 ; SSE32-LABEL: test_store_4xf64:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    addpd %xmm3, %xmm1
 ; SSE32-NEXT:    addpd %xmm2, %xmm0
 ; SSE32-NEXT:    movupd %xmm0, (%rdi)
@@ -366,7 +366,7 @@ define <4 x double> @test_store_4xf64(<4
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_4xf64:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    subl $12, %esp
 ; SSE64-NEXT:    .cfi_def_cfa_offset 16
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -378,13 +378,13 @@ define <4 x double> @test_store_4xf64(<4
 ; SSE64-NEXT:    retl
 ;
 ; AVX32-LABEL: test_store_4xf64:
-; AVX32:       # BB#0:
+; AVX32:       # %bb.0:
 ; AVX32-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; AVX32-NEXT:    vmovupd %ymm0, (%rdi)
 ; AVX32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_4xf64:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; AVX64-NEXT:    vmovupd %ymm0, (%eax)
@@ -396,7 +396,7 @@ define <4 x double> @test_store_4xf64(<4
 
 define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4 x double> %value, <4 x double> %value2) {
 ; SSE32-LABEL: test_store_4xf64_aligned:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    addpd %xmm3, %xmm1
 ; SSE32-NEXT:    addpd %xmm2, %xmm0
 ; SSE32-NEXT:    movapd %xmm0, (%rdi)
@@ -404,7 +404,7 @@ define <4 x double> @test_store_4xf64_al
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_4xf64_aligned:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    subl $12, %esp
 ; SSE64-NEXT:    .cfi_def_cfa_offset 16
 ; SSE64-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -416,13 +416,13 @@ define <4 x double> @test_store_4xf64_al
 ; SSE64-NEXT:    retl
 ;
 ; AVX32-LABEL: test_store_4xf64_aligned:
-; AVX32:       # BB#0:
+; AVX32:       # %bb.0:
 ; AVX32-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; AVX32-NEXT:    vmovapd %ymm0, (%rdi)
 ; AVX32-NEXT:    retq
 ;
 ; AVX64-LABEL: test_store_4xf64_aligned:
-; AVX64:       # BB#0:
+; AVX64:       # %bb.0:
 ; AVX64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX64-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; AVX64-NEXT:    vmovapd %ymm0, (%eax)
@@ -434,7 +434,7 @@ define <4 x double> @test_store_4xf64_al
 
 define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %value) {
 ; SSE32-LABEL: test_store_16xi32:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    movups %xmm0, (%rdi)
 ; SSE32-NEXT:    movups %xmm1, 16(%rdi)
 ; SSE32-NEXT:    movups %xmm2, 32(%rdi)
@@ -442,7 +442,7 @@ define <16 x i32> @test_store_16xi32(<16
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_16xi32:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    subl $12, %esp
 ; SSE64-NEXT:    .cfi_def_cfa_offset 16
 ; SSE64-NEXT:    movaps {{[0-9]+}}(%esp), %xmm3
@@ -455,25 +455,25 @@ define <16 x i32> @test_store_16xi32(<16
 ; SSE64-NEXT:    retl
 ;
 ; AVXONLY32-LABEL: test_store_16xi32:
-; AVXONLY32:       # BB#0:
+; AVXONLY32:       # %bb.0:
 ; AVXONLY32-NEXT:    vmovups %ymm0, (%rdi)
 ; AVXONLY32-NEXT:    vmovups %ymm1, 32(%rdi)
 ; AVXONLY32-NEXT:    retq
 ;
 ; AVXONLY64-LABEL: test_store_16xi32:
-; AVXONLY64:       # BB#0:
+; AVXONLY64:       # %bb.0:
 ; AVXONLY64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVXONLY64-NEXT:    vmovups %ymm0, (%eax)
 ; AVXONLY64-NEXT:    vmovups %ymm1, 32(%eax)
 ; AVXONLY64-NEXT:    retl
 ;
 ; AVX51232-LABEL: test_store_16xi32:
-; AVX51232:       # BB#0:
+; AVX51232:       # %bb.0:
 ; AVX51232-NEXT:    vmovups %zmm0, (%rdi)
 ; AVX51232-NEXT:    retq
 ;
 ; AVX51264-LABEL: test_store_16xi32:
-; AVX51264:       # BB#0:
+; AVX51264:       # %bb.0:
 ; AVX51264-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX51264-NEXT:    vmovups %zmm0, (%eax)
 ; AVX51264-NEXT:    retl
@@ -483,7 +483,7 @@ define <16 x i32> @test_store_16xi32(<16
 
 define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x i32> %value) {
 ; SSE32-LABEL: test_store_16xi32_aligned:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    movaps %xmm0, (%rdi)
 ; SSE32-NEXT:    movaps %xmm1, 16(%rdi)
 ; SSE32-NEXT:    movaps %xmm2, 32(%rdi)
@@ -491,7 +491,7 @@ define <16 x i32> @test_store_16xi32_ali
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_16xi32_aligned:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    subl $12, %esp
 ; SSE64-NEXT:    .cfi_def_cfa_offset 16
 ; SSE64-NEXT:    movaps {{[0-9]+}}(%esp), %xmm3
@@ -504,25 +504,25 @@ define <16 x i32> @test_store_16xi32_ali
 ; SSE64-NEXT:    retl
 ;
 ; AVXONLY32-LABEL: test_store_16xi32_aligned:
-; AVXONLY32:       # BB#0:
+; AVXONLY32:       # %bb.0:
 ; AVXONLY32-NEXT:    vmovaps %ymm0, (%rdi)
 ; AVXONLY32-NEXT:    vmovaps %ymm1, 32(%rdi)
 ; AVXONLY32-NEXT:    retq
 ;
 ; AVXONLY64-LABEL: test_store_16xi32_aligned:
-; AVXONLY64:       # BB#0:
+; AVXONLY64:       # %bb.0:
 ; AVXONLY64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVXONLY64-NEXT:    vmovaps %ymm0, (%eax)
 ; AVXONLY64-NEXT:    vmovaps %ymm1, 32(%eax)
 ; AVXONLY64-NEXT:    retl
 ;
 ; AVX51232-LABEL: test_store_16xi32_aligned:
-; AVX51232:       # BB#0:
+; AVX51232:       # %bb.0:
 ; AVX51232-NEXT:    vmovaps %zmm0, (%rdi)
 ; AVX51232-NEXT:    retq
 ;
 ; AVX51264-LABEL: test_store_16xi32_aligned:
-; AVX51264:       # BB#0:
+; AVX51264:       # %bb.0:
 ; AVX51264-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX51264-NEXT:    vmovaps %zmm0, (%eax)
 ; AVX51264-NEXT:    retl
@@ -532,7 +532,7 @@ define <16 x i32> @test_store_16xi32_ali
 
 define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x float> %value) {
 ; SSE32-LABEL: test_store_16xf32:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    movups %xmm0, (%rdi)
 ; SSE32-NEXT:    movups %xmm1, 16(%rdi)
 ; SSE32-NEXT:    movups %xmm2, 32(%rdi)
@@ -540,7 +540,7 @@ define <16 x float> @test_store_16xf32(<
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_16xf32:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    subl $12, %esp
 ; SSE64-NEXT:    .cfi_def_cfa_offset 16
 ; SSE64-NEXT:    movaps {{[0-9]+}}(%esp), %xmm3
@@ -553,25 +553,25 @@ define <16 x float> @test_store_16xf32(<
 ; SSE64-NEXT:    retl
 ;
 ; AVXONLY32-LABEL: test_store_16xf32:
-; AVXONLY32:       # BB#0:
+; AVXONLY32:       # %bb.0:
 ; AVXONLY32-NEXT:    vmovups %ymm0, (%rdi)
 ; AVXONLY32-NEXT:    vmovups %ymm1, 32(%rdi)
 ; AVXONLY32-NEXT:    retq
 ;
 ; AVXONLY64-LABEL: test_store_16xf32:
-; AVXONLY64:       # BB#0:
+; AVXONLY64:       # %bb.0:
 ; AVXONLY64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVXONLY64-NEXT:    vmovups %ymm0, (%eax)
 ; AVXONLY64-NEXT:    vmovups %ymm1, 32(%eax)
 ; AVXONLY64-NEXT:    retl
 ;
 ; AVX51232-LABEL: test_store_16xf32:
-; AVX51232:       # BB#0:
+; AVX51232:       # %bb.0:
 ; AVX51232-NEXT:    vmovups %zmm0, (%rdi)
 ; AVX51232-NEXT:    retq
 ;
 ; AVX51264-LABEL: test_store_16xf32:
-; AVX51264:       # BB#0:
+; AVX51264:       # %bb.0:
 ; AVX51264-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX51264-NEXT:    vmovups %zmm0, (%eax)
 ; AVX51264-NEXT:    retl
@@ -581,7 +581,7 @@ define <16 x float> @test_store_16xf32(<
 
 define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <16 x float> %value) {
 ; SSE32-LABEL: test_store_16xf32_aligned:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    movaps %xmm0, (%rdi)
 ; SSE32-NEXT:    movaps %xmm1, 16(%rdi)
 ; SSE32-NEXT:    movaps %xmm2, 32(%rdi)
@@ -589,7 +589,7 @@ define <16 x float> @test_store_16xf32_a
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_16xf32_aligned:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    subl $12, %esp
 ; SSE64-NEXT:    .cfi_def_cfa_offset 16
 ; SSE64-NEXT:    movaps {{[0-9]+}}(%esp), %xmm3
@@ -602,25 +602,25 @@ define <16 x float> @test_store_16xf32_a
 ; SSE64-NEXT:    retl
 ;
 ; AVXONLY32-LABEL: test_store_16xf32_aligned:
-; AVXONLY32:       # BB#0:
+; AVXONLY32:       # %bb.0:
 ; AVXONLY32-NEXT:    vmovaps %ymm0, (%rdi)
 ; AVXONLY32-NEXT:    vmovaps %ymm1, 32(%rdi)
 ; AVXONLY32-NEXT:    retq
 ;
 ; AVXONLY64-LABEL: test_store_16xf32_aligned:
-; AVXONLY64:       # BB#0:
+; AVXONLY64:       # %bb.0:
 ; AVXONLY64-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVXONLY64-NEXT:    vmovaps %ymm0, (%eax)
 ; AVXONLY64-NEXT:    vmovaps %ymm1, 32(%eax)
 ; AVXONLY64-NEXT:    retl
 ;
 ; AVX51232-LABEL: test_store_16xf32_aligned:
-; AVX51232:       # BB#0:
+; AVX51232:       # %bb.0:
 ; AVX51232-NEXT:    vmovaps %zmm0, (%rdi)
 ; AVX51232-NEXT:    retq
 ;
 ; AVX51264-LABEL: test_store_16xf32_aligned:
-; AVX51264:       # BB#0:
+; AVX51264:       # %bb.0:
 ; AVX51264-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX51264-NEXT:    vmovaps %zmm0, (%eax)
 ; AVX51264-NEXT:    retl
@@ -630,7 +630,7 @@ define <16 x float> @test_store_16xf32_a
 
 define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double> %value, <8 x double> %value2) {
 ; SSE32-LABEL: test_store_8xf64:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    addpd %xmm7, %xmm3
 ; SSE32-NEXT:    addpd %xmm6, %xmm2
 ; SSE32-NEXT:    addpd %xmm5, %xmm1
@@ -642,7 +642,7 @@ define <8 x double> @test_store_8xf64(<8
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_8xf64:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    subl $12, %esp
 ; SSE64-NEXT:    .cfi_def_cfa_offset 16
 ; SSE64-NEXT:    movapd {{[0-9]+}}(%esp), %xmm3
@@ -659,7 +659,7 @@ define <8 x double> @test_store_8xf64(<8
 ; SSE64-NEXT:    retl
 ;
 ; AVXONLY32-LABEL: test_store_8xf64:
-; AVXONLY32:       # BB#0:
+; AVXONLY32:       # %bb.0:
 ; AVXONLY32-NEXT:    vaddpd %ymm3, %ymm1, %ymm1
 ; AVXONLY32-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
 ; AVXONLY32-NEXT:    vmovupd %ymm0, (%rdi)
@@ -667,7 +667,7 @@ define <8 x double> @test_store_8xf64(<8
 ; AVXONLY32-NEXT:    retq
 ;
 ; AVXONLY64-LABEL: test_store_8xf64:
-; AVXONLY64:       # BB#0:
+; AVXONLY64:       # %bb.0:
 ; AVXONLY64-NEXT:    pushl %ebp
 ; AVXONLY64-NEXT:    .cfi_def_cfa_offset 8
 ; AVXONLY64-NEXT:    .cfi_offset %ebp, -8
@@ -685,13 +685,13 @@ define <8 x double> @test_store_8xf64(<8
 ; AVXONLY64-NEXT:    retl
 ;
 ; AVX51232-LABEL: test_store_8xf64:
-; AVX51232:       # BB#0:
+; AVX51232:       # %bb.0:
 ; AVX51232-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
 ; AVX51232-NEXT:    vmovupd %zmm0, (%rdi)
 ; AVX51232-NEXT:    retq
 ;
 ; AVX51264-LABEL: test_store_8xf64:
-; AVX51264:       # BB#0:
+; AVX51264:       # %bb.0:
 ; AVX51264-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX51264-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
 ; AVX51264-NEXT:    vmovupd %zmm0, (%eax)
@@ -703,7 +703,7 @@ define <8 x double> @test_store_8xf64(<8
 
 define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8 x double> %value, <8 x double> %value2) {
 ; SSE32-LABEL: test_store_8xf64_aligned:
-; SSE32:       # BB#0:
+; SSE32:       # %bb.0:
 ; SSE32-NEXT:    addpd %xmm7, %xmm3
 ; SSE32-NEXT:    addpd %xmm6, %xmm2
 ; SSE32-NEXT:    addpd %xmm5, %xmm1
@@ -715,7 +715,7 @@ define <8 x double> @test_store_8xf64_al
 ; SSE32-NEXT:    retq
 ;
 ; SSE64-LABEL: test_store_8xf64_aligned:
-; SSE64:       # BB#0:
+; SSE64:       # %bb.0:
 ; SSE64-NEXT:    subl $12, %esp
 ; SSE64-NEXT:    .cfi_def_cfa_offset 16
 ; SSE64-NEXT:    movapd {{[0-9]+}}(%esp), %xmm3
@@ -732,7 +732,7 @@ define <8 x double> @test_store_8xf64_al
 ; SSE64-NEXT:    retl
 ;
 ; AVXONLY32-LABEL: test_store_8xf64_aligned:
-; AVXONLY32:       # BB#0:
+; AVXONLY32:       # %bb.0:
 ; AVXONLY32-NEXT:    vaddpd %ymm3, %ymm1, %ymm1
 ; AVXONLY32-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
 ; AVXONLY32-NEXT:    vmovapd %ymm0, (%rdi)
@@ -740,7 +740,7 @@ define <8 x double> @test_store_8xf64_al
 ; AVXONLY32-NEXT:    retq
 ;
 ; AVXONLY64-LABEL: test_store_8xf64_aligned:
-; AVXONLY64:       # BB#0:
+; AVXONLY64:       # %bb.0:
 ; AVXONLY64-NEXT:    pushl %ebp
 ; AVXONLY64-NEXT:    .cfi_def_cfa_offset 8
 ; AVXONLY64-NEXT:    .cfi_offset %ebp, -8
@@ -758,13 +758,13 @@ define <8 x double> @test_store_8xf64_al
 ; AVXONLY64-NEXT:    retl
 ;
 ; AVX51232-LABEL: test_store_8xf64_aligned:
-; AVX51232:       # BB#0:
+; AVX51232:       # %bb.0:
 ; AVX51232-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
 ; AVX51232-NEXT:    vmovapd %zmm0, (%rdi)
 ; AVX51232-NEXT:    retq
 ;
 ; AVX51264-LABEL: test_store_8xf64_aligned:
-; AVX51264:       # BB#0:
+; AVX51264:       # %bb.0:
 ; AVX51264-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX51264-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
 ; AVX51264-NEXT:    vmovapd %zmm0, (%eax)

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-vecload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-vecload.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-vecload.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-vecload.ll Mon Dec  4 09:18:51 2017
@@ -9,22 +9,22 @@
 
 define <16 x i8> @test_v16i8(<16 x i8>* %V) {
 ; SSE-LABEL: test_v16i8:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v16i8:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v16i8:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v16i8:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -34,22 +34,22 @@ entry:
 
 define <8 x i16> @test_v8i16(<8 x i16>* %V) {
 ; SSE-LABEL: test_v8i16:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v8i16:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v8i16:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v8i16:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -59,22 +59,22 @@ entry:
 
 define <4 x i32> @test_v4i32(<4 x i32>* %V) {
 ; SSE-LABEL: test_v4i32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v4i32:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v4i32:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v4i32:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -84,22 +84,22 @@ entry:
 
 define <2 x i64> @test_v2i64(<2 x i64>* %V) {
 ; SSE-LABEL: test_v2i64:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v2i64:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v2i64:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v2i64:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -109,22 +109,22 @@ entry:
 
 define <16 x i8> @test_v16i8_unaligned(<16 x i8>* %V) {
 ; SSE-LABEL: test_v16i8_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqu (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v16i8_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqu (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v16i8_unaligned:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqu (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v16i8_unaligned:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqu64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -134,22 +134,22 @@ entry:
 
 define <8 x i16> @test_v8i16_unaligned(<8 x i16>* %V) {
 ; SSE-LABEL: test_v8i16_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqu (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v8i16_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqu (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v8i16_unaligned:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqu (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v8i16_unaligned:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqu64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -159,22 +159,22 @@ entry:
 
 define <4 x i32> @test_v4i32_unaligned(<4 x i32>* %V) {
 ; SSE-LABEL: test_v4i32_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqu (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v4i32_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqu (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v4i32_unaligned:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqu (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v4i32_unaligned:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqu64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -184,22 +184,22 @@ entry:
 
 define <2 x i64> @test_v2i64_unaligned(<2 x i64>* %V) {
 ; SSE-LABEL: test_v2i64_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqu (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v2i64_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqu (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v2i64_unaligned:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqu (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v2i64_unaligned:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqu64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -209,12 +209,12 @@ entry:
 
 define <4 x float> @test_v4f32(<4 x float>* %V) {
 ; SSE-LABEL: test_v4f32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v4f32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovaps (%rdi), %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -224,12 +224,12 @@ entry:
 
 define <2 x double> @test_v2f64(<2 x double>* %V) {
 ; SSE-LABEL: test_v2f64:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movapd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v2f64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovapd (%rdi), %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -239,12 +239,12 @@ entry:
 
 define <4 x float> @test_v4f32_unaligned(<4 x float>* %V) {
 ; SSE-LABEL: test_v4f32_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v4f32_unaligned:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovups (%rdi), %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -254,12 +254,12 @@ entry:
 
 define <2 x double> @test_v2f64_unaligned(<2 x double>* %V) {
 ; SSE-LABEL: test_v2f64_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movupd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v2f64_unaligned:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovupd (%rdi), %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -269,22 +269,22 @@ entry:
 
 define <16 x i8> @test_v16i8_abi_alignment(<16 x i8>* %V) {
 ; SSE-LABEL: test_v16i8_abi_alignment:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v16i8_abi_alignment:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v16i8_abi_alignment:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v16i8_abi_alignment:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -294,22 +294,22 @@ entry:
 
 define <8 x i16> @test_v8i16_abi_alignment(<8 x i16>* %V) {
 ; SSE-LABEL: test_v8i16_abi_alignment:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v8i16_abi_alignment:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v8i16_abi_alignment:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v8i16_abi_alignment:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -319,22 +319,22 @@ entry:
 
 define <4 x i32> @test_v4i32_abi_alignment(<4 x i32>* %V) {
 ; SSE-LABEL: test_v4i32_abi_alignment:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v4i32_abi_alignment:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v4i32_abi_alignment:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v4i32_abi_alignment:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -344,22 +344,22 @@ entry:
 
 define <2 x i64> @test_v2i64_abi_alignment(<2 x i64>* %V) {
 ; SSE-LABEL: test_v2i64_abi_alignment:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movdqa (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v2i64_abi_alignment:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v2i64_abi_alignment:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v2i64_abi_alignment:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %xmm0
 ; SKX-NEXT:    retq
 entry:
@@ -369,12 +369,12 @@ entry:
 
 define <4 x float> @test_v4f32_abi_alignment(<4 x float>* %V) {
 ; SSE-LABEL: test_v4f32_abi_alignment:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v4f32_abi_alignment:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovaps (%rdi), %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -384,12 +384,12 @@ entry:
 
 define <2 x double> @test_v2f64_abi_alignment(<2 x double>* %V) {
 ; SSE-LABEL: test_v2f64_abi_alignment:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movapd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v2f64_abi_alignment:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovapd (%rdi), %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -399,23 +399,23 @@ entry:
 
 define <32 x i8> @test_v32i8(<32 x i8>* %V) {
 ; SSE-LABEL: test_v32i8:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v32i8:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v32i8:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %ymm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v32i8:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %ymm0
 ; SKX-NEXT:    retq
 entry:
@@ -425,23 +425,23 @@ entry:
 
 define <16 x i16> @test_v16i16(<16 x i16>* %V) {
 ; SSE-LABEL: test_v16i16:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v16i16:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v16i16:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %ymm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v16i16:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %ymm0
 ; SKX-NEXT:    retq
 entry:
@@ -451,23 +451,23 @@ entry:
 
 define <8 x i32> @test_v8i32(<8 x i32>* %V) {
 ; SSE-LABEL: test_v8i32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v8i32:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v8i32:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %ymm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v8i32:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %ymm0
 ; SKX-NEXT:    retq
 entry:
@@ -477,23 +477,23 @@ entry:
 
 define <4 x i64> @test_v4i64(<4 x i64>* %V) {
 ; SSE-LABEL: test_v4i64:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v4i64:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v4i64:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqa (%rdi), %ymm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v4i64:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %ymm0
 ; SKX-NEXT:    retq
 entry:
@@ -503,23 +503,23 @@ entry:
 
 define <32 x i8> @test_v32i8_unaligned(<32 x i8>* %V) {
 ; SSE-LABEL: test_v32i8_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    movups 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v32i8_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v32i8_unaligned:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqu (%rdi), %ymm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v32i8_unaligned:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqu64 (%rdi), %ymm0
 ; SKX-NEXT:    retq
 entry:
@@ -529,23 +529,23 @@ entry:
 
 define <16 x i16> @test_v16i16_unaligned(<16 x i16>* %V) {
 ; SSE-LABEL: test_v16i16_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    movups 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v16i16_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v16i16_unaligned:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqu (%rdi), %ymm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v16i16_unaligned:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqu64 (%rdi), %ymm0
 ; SKX-NEXT:    retq
 entry:
@@ -555,23 +555,23 @@ entry:
 
 define <8 x i32> @test_v8i32_unaligned(<8 x i32>* %V) {
 ; SSE-LABEL: test_v8i32_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    movups 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v8i32_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v8i32_unaligned:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqu (%rdi), %ymm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v8i32_unaligned:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqu64 (%rdi), %ymm0
 ; SKX-NEXT:    retq
 entry:
@@ -581,23 +581,23 @@ entry:
 
 define <4 x i64> @test_v4i64_unaligned(<4 x i64>* %V) {
 ; SSE-LABEL: test_v4i64_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    movups 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v4i64_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v4i64_unaligned:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovdqu (%rdi), %ymm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v4i64_unaligned:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqu64 (%rdi), %ymm0
 ; SKX-NEXT:    retq
 entry:
@@ -607,13 +607,13 @@ entry:
 
 define <8 x float> @test_v8f32(<8 x float>* %V) {
 ; SSE-LABEL: test_v8f32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v8f32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX-NEXT:    retq
 entry:
@@ -623,13 +623,13 @@ entry:
 
 define <4 x double> @test_v4f64(<4 x double>* %V) {
 ; SSE-LABEL: test_v4f64:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movapd (%rdi), %xmm0
 ; SSE-NEXT:    movapd 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v4f64:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovapd (%rdi), %ymm0
 ; AVX-NEXT:    retq
 entry:
@@ -639,13 +639,13 @@ entry:
 
 define <8 x float> @test_v8f32_unaligned(<8 x float>* %V) {
 ; SSE-LABEL: test_v8f32_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    movups 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v8f32_unaligned:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovups (%rdi), %ymm0
 ; AVX-NEXT:    retq
 entry:
@@ -655,13 +655,13 @@ entry:
 
 define <4 x double> @test_v4f64_unaligned(<4 x double>* %V) {
 ; SSE-LABEL: test_v4f64_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movupd (%rdi), %xmm0
 ; SSE-NEXT:    movupd 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v4f64_unaligned:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovupd (%rdi), %ymm0
 ; AVX-NEXT:    retq
 entry:
@@ -671,7 +671,7 @@ entry:
 
 define <64 x i8> @test_v64i8(<64 x i8>* %V) {
 ; SSE-LABEL: test_v64i8:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE-NEXT:    movaps 32(%rdi), %xmm2
@@ -679,19 +679,19 @@ define <64 x i8> @test_v64i8(<64 x i8>*
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v64i8:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovaps (%rdi), %ymm0
 ; AVXONLY-NEXT:    vmovaps 32(%rdi), %ymm1
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v64i8:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovaps (%rdi), %ymm0
 ; KNL-NEXT:    vmovaps 32(%rdi), %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v64i8:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; SKX-NEXT:    retq
 entry:
@@ -701,7 +701,7 @@ entry:
 
 define <32 x i16> @test_v32i16(<32 x i16>* %V) {
 ; SSE-LABEL: test_v32i16:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE-NEXT:    movaps 32(%rdi), %xmm2
@@ -709,19 +709,19 @@ define <32 x i16> @test_v32i16(<32 x i16
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v32i16:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovaps (%rdi), %ymm0
 ; AVXONLY-NEXT:    vmovaps 32(%rdi), %ymm1
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v32i16:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovaps (%rdi), %ymm0
 ; KNL-NEXT:    vmovaps 32(%rdi), %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v32i16:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; SKX-NEXT:    retq
 entry:
@@ -731,7 +731,7 @@ entry:
 
 define <16 x i32> @test_v16i32(<16 x i32>* %V) {
 ; SSE-LABEL: test_v16i32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE-NEXT:    movaps 32(%rdi), %xmm2
@@ -739,13 +739,13 @@ define <16 x i32> @test_v16i32(<16 x i32
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v16i32:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovaps (%rdi), %ymm0
 ; AVXONLY-NEXT:    vmovaps 32(%rdi), %ymm1
 ; AVXONLY-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16i32:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -755,7 +755,7 @@ entry:
 
 define <8 x i64> @test_v8i64(<8 x i64>* %V) {
 ; SSE-LABEL: test_v8i64:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE-NEXT:    movaps 32(%rdi), %xmm2
@@ -763,13 +763,13 @@ define <8 x i64> @test_v8i64(<8 x i64>*
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v8i64:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovaps (%rdi), %ymm0
 ; AVXONLY-NEXT:    vmovaps 32(%rdi), %ymm1
 ; AVXONLY-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8i64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -779,7 +779,7 @@ entry:
 
 define <64 x i8> @test_v64i8_unaligned(<64 x i8>* %V) {
 ; SSE-LABEL: test_v64i8_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    movups 16(%rdi), %xmm1
 ; SSE-NEXT:    movups 32(%rdi), %xmm2
@@ -787,19 +787,19 @@ define <64 x i8> @test_v64i8_unaligned(<
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v64i8_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovups (%rdi), %ymm0
 ; AVXONLY-NEXT:    vmovups 32(%rdi), %ymm1
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v64i8_unaligned:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovups (%rdi), %ymm0
 ; KNL-NEXT:    vmovups 32(%rdi), %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v64i8_unaligned:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqu64 (%rdi), %zmm0
 ; SKX-NEXT:    retq
 entry:
@@ -809,7 +809,7 @@ entry:
 
 define <32 x i16> @test_v32i16_unaligned(<32 x i16>* %V) {
 ; SSE-LABEL: test_v32i16_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    movups 16(%rdi), %xmm1
 ; SSE-NEXT:    movups 32(%rdi), %xmm2
@@ -817,19 +817,19 @@ define <32 x i16> @test_v32i16_unaligned
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v32i16_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovups (%rdi), %ymm0
 ; AVXONLY-NEXT:    vmovups 32(%rdi), %ymm1
 ; AVXONLY-NEXT:    retq
 ;
 ; KNL-LABEL: test_v32i16_unaligned:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vmovups (%rdi), %ymm0
 ; KNL-NEXT:    vmovups 32(%rdi), %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v32i16_unaligned:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovdqu64 (%rdi), %zmm0
 ; SKX-NEXT:    retq
 entry:
@@ -839,7 +839,7 @@ entry:
 
 define <16 x i32> @test_v16i32_unaligned(<16 x i32>* %V) {
 ; SSE-LABEL: test_v16i32_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    movups 16(%rdi), %xmm1
 ; SSE-NEXT:    movups 32(%rdi), %xmm2
@@ -847,13 +847,13 @@ define <16 x i32> @test_v16i32_unaligned
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v16i32_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovups (%rdi), %ymm0
 ; AVXONLY-NEXT:    vmovups 32(%rdi), %ymm1
 ; AVXONLY-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16i32_unaligned:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovdqu64 (%rdi), %zmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -863,7 +863,7 @@ entry:
 
 define <8 x i64> @test_v8i64_unaligned(<8 x i64>* %V) {
 ; SSE-LABEL: test_v8i64_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    movups 16(%rdi), %xmm1
 ; SSE-NEXT:    movups 32(%rdi), %xmm2
@@ -871,13 +871,13 @@ define <8 x i64> @test_v8i64_unaligned(<
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v8i64_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovups (%rdi), %ymm0
 ; AVXONLY-NEXT:    vmovups 32(%rdi), %ymm1
 ; AVXONLY-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8i64_unaligned:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovdqu64 (%rdi), %zmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -887,13 +887,13 @@ entry:
 
 define <8 x float> @test_v16f32(<8 x float>* %V) {
 ; SSE-LABEL: test_v16f32:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movaps (%rdi), %xmm0
 ; SSE-NEXT:    movaps 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v16f32:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX-NEXT:    retq
 entry:
@@ -903,7 +903,7 @@ entry:
 
 define <8 x double> @test_v8f64(<8 x double>* %V) {
 ; SSE-LABEL: test_v8f64:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movapd (%rdi), %xmm0
 ; SSE-NEXT:    movapd 16(%rdi), %xmm1
 ; SSE-NEXT:    movapd 32(%rdi), %xmm2
@@ -911,13 +911,13 @@ define <8 x double> @test_v8f64(<8 x dou
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v8f64:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovapd (%rdi), %ymm0
 ; AVXONLY-NEXT:    vmovapd 32(%rdi), %ymm1
 ; AVXONLY-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8f64:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovapd (%rdi), %zmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -927,7 +927,7 @@ entry:
 
 define <16 x float> @test_v16f32_unaligned(<16 x float>* %V) {
 ; SSE-LABEL: test_v16f32_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    movups 16(%rdi), %xmm1
 ; SSE-NEXT:    movups 32(%rdi), %xmm2
@@ -935,13 +935,13 @@ define <16 x float> @test_v16f32_unalign
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v16f32_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovups (%rdi), %ymm0
 ; AVXONLY-NEXT:    vmovups 32(%rdi), %ymm1
 ; AVXONLY-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16f32_unaligned:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovups (%rdi), %zmm0
 ; AVX512-NEXT:    retq
 entry:
@@ -951,7 +951,7 @@ entry:
 
 define <8 x double> @test_v8f64_unaligned(<8 x double>* %V) {
 ; SSE-LABEL: test_v8f64_unaligned:
-; SSE:       # BB#0: # %entry
+; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movupd (%rdi), %xmm0
 ; SSE-NEXT:    movupd 16(%rdi), %xmm1
 ; SSE-NEXT:    movupd 32(%rdi), %xmm2
@@ -959,13 +959,13 @@ define <8 x double> @test_v8f64_unaligne
 ; SSE-NEXT:    retq
 ;
 ; AVXONLY-LABEL: test_v8f64_unaligned:
-; AVXONLY:       # BB#0: # %entry
+; AVXONLY:       # %bb.0: # %entry
 ; AVXONLY-NEXT:    vmovupd (%rdi), %ymm0
 ; AVXONLY-NEXT:    vmovupd 32(%rdi), %ymm1
 ; AVXONLY-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8f64_unaligned:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovupd (%rdi), %zmm0
 ; AVX512-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/fastisel-softfloat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fastisel-softfloat.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fastisel-softfloat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fastisel-softfloat.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-linux-gn
 
 define float @pr26522(float %pat) #0 {
 ; CHECK-LABEL: pr26522:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   ret float %pat

Modified: llvm/trunk/test/CodeGen/X86/fdiv-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fdiv-combine.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fdiv-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fdiv-combine.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define float @div1_arcp(float %x, float %y, float %z) {
 ; CHECK-LABEL: div1_arcp:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    divss %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %div1 = fdiv arcp float %x, %y
@@ -18,7 +18,7 @@ define float @div1_arcp(float %x, float
 
 define float @div2_arcp_all(float %x, float %y, float %z) {
 ; CHECK-LABEL: div2_arcp_all:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    divss %xmm2, %xmm3
 ; CHECK-NEXT:    mulss %xmm3, %xmm0
@@ -35,7 +35,7 @@ define float @div2_arcp_all(float %x, fl
 
 define float @div2_arcp_partial1(float %x, float %y, float %z) {
 ; CHECK-LABEL: div2_arcp_partial1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    divss %xmm2, %xmm0
 ; CHECK-NEXT:    mulss %xmm1, %xmm0
 ; CHECK-NEXT:    divss %xmm2, %xmm0
@@ -50,7 +50,7 @@ define float @div2_arcp_partial1(float %
 
 define float @div2_arcp_partial2(float %x, float %y, float %z) {
 ; CHECK-LABEL: div2_arcp_partial2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    divss %xmm2, %xmm0
 ; CHECK-NEXT:    mulss %xmm1, %xmm0
 ; CHECK-NEXT:    divss %xmm2, %xmm0
@@ -65,7 +65,7 @@ define float @div2_arcp_partial2(float %
 
 define float @div2_arcp_partial3(float %x, float %y, float %z) {
 ; CHECK-LABEL: div2_arcp_partial3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    divss %xmm2, %xmm3
 ; CHECK-NEXT:    mulss %xmm3, %xmm0
@@ -83,7 +83,7 @@ define float @div2_arcp_partial3(float %
 
 define double @div3_arcp(double %x, double %y, double %z) {
 ; CHECK-LABEL: div3_arcp:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movsd{{.*#+}} xmm2 = mem[0],zero
 ; CHECK-NEXT:    divsd %xmm1, %xmm2
 ; CHECK-NEXT:    mulsd %xmm2, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/fdiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fdiv.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fdiv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fdiv.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 define double @exact(double %x) {
 ; Exact division by a constant converted to multiplication.
 ; CHECK-LABEL: exact:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulsd {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %div = fdiv double %x, 2.0
@@ -14,7 +14,7 @@ define double @exact(double %x) {
 define double @inexact(double %x) {
 ; Inexact division by a constant converted to multiplication.
 ; CHECK-LABEL: inexact:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulsd {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %div = fdiv double %x, 0x41DFFFFFFFC00000
@@ -24,7 +24,7 @@ define double @inexact(double %x) {
 define double @funky(double %x) {
 ; No conversion to multiplication if too funky.
 ; CHECK-LABEL: funky:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorpd %xmm1, %xmm1
 ; CHECK-NEXT:    divsd %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -35,7 +35,7 @@ define double @funky(double %x) {
 define double @denormal1(double %x) {
 ; Don't generate multiplication by a denormal.
 ; CHECK-LABEL: denormal1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    divsd {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %div = fdiv double %x, 0x7FD0000000000001
@@ -45,7 +45,7 @@ define double @denormal1(double %x) {
 define double @denormal2(double %x) {
 ; Don't generate multiplication by a denormal.
 ; CHECK-LABEL: denormal2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    divsd {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %div = fdiv double %x, 0x7FEFFFFFFFFFFFFF
@@ -56,7 +56,7 @@ define double @denormal2(double %x) {
 
 define float @double_negative(float %x, float %y) #0 {
 ; CHECK-LABEL: double_negative:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    divss %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %neg1 = fsub float -0.0, %x

Modified: llvm/trunk/test/CodeGen/X86/fixup-bw-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fixup-bw-copy.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fixup-bw-copy.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fixup-bw-copy.ll Mon Dec  4 09:18:51 2017
@@ -8,17 +8,17 @@ target datalayout = "e-m:o-p:32:32-f64:3
 
 define i8 @test_movb(i8 %a0) {
 ; BWON64-LABEL: test_movb:
-; BWON64:       # BB#0:
+; BWON64:       # %bb.0:
 ; BWON64-NEXT:    movl %edi, %eax
 ; BWON64-NEXT:    retq
 ;
 ; BWOFF64-LABEL: test_movb:
-; BWOFF64:       # BB#0:
+; BWOFF64:       # %bb.0:
 ; BWOFF64-NEXT:    movb %dil, %al
 ; BWOFF64-NEXT:    retq
 ;
 ; X32-LABEL: test_movb:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    retl
   ret i8 %a0
@@ -26,22 +26,22 @@ define i8 @test_movb(i8 %a0) {
 
 define i16 @test_movw(i16 %a0) {
 ; BWON64-LABEL: test_movw:
-; BWON64:       # BB#0:
+; BWON64:       # %bb.0:
 ; BWON64-NEXT:    movl %edi, %eax
 ; BWON64-NEXT:    retq
 ;
 ; BWOFF64-LABEL: test_movw:
-; BWOFF64:       # BB#0:
+; BWOFF64:       # %bb.0:
 ; BWOFF64-NEXT:    movw %di, %ax
 ; BWOFF64-NEXT:    retq
 ;
 ; BWON32-LABEL: test_movw:
-; BWON32:       # BB#0:
+; BWON32:       # %bb.0:
 ; BWON32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; BWON32-NEXT:    retl
 ;
 ; BWOFF32-LABEL: test_movw:
-; BWOFF32:       # BB#0:
+; BWOFF32:       # %bb.0:
 ; BWOFF32-NEXT:    movw {{[0-9]+}}(%esp), %ax
 ; BWOFF32-NEXT:    retl
   ret i16 %a0
@@ -50,7 +50,7 @@ define i16 @test_movw(i16 %a0) {
 ; Verify we don't mess with H-reg copies (only generated in 32-bit mode).
 define i8 @test_movb_hreg(i16 %a0) {
 ; X64-LABEL: test_movb_hreg:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    shrl $8, %eax
 ; X64-NEXT:    addb %dil, %al
@@ -58,7 +58,7 @@ define i8 @test_movb_hreg(i16 %a0) {
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_movb_hreg:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    addb %al, %ah
 ; X32-NEXT:    movb %ah, %al

Modified: llvm/trunk/test/CodeGen/X86/fma-commute-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-commute-x86.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-commute-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma-commute-x86.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@ attributes #0 = { nounwind }
 declare <4 x float> @llvm.x86.fma.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fmadd_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_baa_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm1
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfmadd213ss %xmm1, %xmm1, %xmm0
@@ -19,7 +19,7 @@ define <4 x float> @test_x86_fmadd_baa_s
 
 define <4 x float> @test_x86_fmadd_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_aba_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfmadd132ss (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -29,7 +29,7 @@ define <4 x float> @test_x86_fmadd_aba_s
 
 define <4 x float> @test_x86_fmadd_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_bba_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfmadd213ss (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -40,7 +40,7 @@ define <4 x float> @test_x86_fmadd_bba_s
 declare <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_baa_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfmadd132ps (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -50,7 +50,7 @@ define <4 x float> @test_x86_fmadd_baa_p
 
 define <4 x float> @test_x86_fmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_aba_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfmadd231ps (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -60,7 +60,7 @@ define <4 x float> @test_x86_fmadd_aba_p
 
 define <4 x float> @test_x86_fmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_bba_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfmadd213ps (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -71,7 +71,7 @@ define <4 x float> @test_x86_fmadd_bba_p
 declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
 define <8 x float> @test_x86_fmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_baa_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA-NEXT:    vfmadd132ps (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -81,7 +81,7 @@ define <8 x float> @test_x86_fmadd_baa_p
 
 define <8 x float> @test_x86_fmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_aba_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA-NEXT:    vfmadd231ps (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -91,7 +91,7 @@ define <8 x float> @test_x86_fmadd_aba_p
 
 define <8 x float> @test_x86_fmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_bba_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %ymm0
 ; FMA-NEXT:    vfmadd213ps (%rcx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -102,7 +102,7 @@ define <8 x float> @test_x86_fmadd_bba_p
 declare <2 x double> @llvm.x86.fma.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fmadd_baa_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_baa_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm1
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfmadd213sd %xmm1, %xmm1, %xmm0
@@ -113,7 +113,7 @@ define <2 x double> @test_x86_fmadd_baa_
 
 define <2 x double> @test_x86_fmadd_aba_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_aba_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfmadd132sd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -123,7 +123,7 @@ define <2 x double> @test_x86_fmadd_aba_
 
 define <2 x double> @test_x86_fmadd_bba_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_bba_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfmadd213sd (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -134,7 +134,7 @@ define <2 x double> @test_x86_fmadd_bba_
 declare <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_baa_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfmadd132pd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -144,7 +144,7 @@ define <2 x double> @test_x86_fmadd_baa_
 
 define <2 x double> @test_x86_fmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_aba_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfmadd231pd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -154,7 +154,7 @@ define <2 x double> @test_x86_fmadd_aba_
 
 define <2 x double> @test_x86_fmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_bba_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfmadd213pd (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -165,7 +165,7 @@ define <2 x double> @test_x86_fmadd_bba_
 declare <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
 define <4 x double> @test_x86_fmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_baa_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA-NEXT:    vfmadd132pd (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -175,7 +175,7 @@ define <4 x double> @test_x86_fmadd_baa_
 
 define <4 x double> @test_x86_fmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_aba_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA-NEXT:    vfmadd231pd (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -185,7 +185,7 @@ define <4 x double> @test_x86_fmadd_aba_
 
 define <4 x double> @test_x86_fmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmadd_bba_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %ymm0
 ; FMA-NEXT:    vfmadd213pd (%rcx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -197,7 +197,7 @@ define <4 x double> @test_x86_fmadd_bba_
 declare <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fnmadd_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_baa_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm1
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfnmadd213ss %xmm1, %xmm1, %xmm0
@@ -208,7 +208,7 @@ define <4 x float> @test_x86_fnmadd_baa_
 
 define <4 x float> @test_x86_fnmadd_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_aba_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfnmadd132ss (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -218,7 +218,7 @@ define <4 x float> @test_x86_fnmadd_aba_
 
 define <4 x float> @test_x86_fnmadd_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_bba_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfnmadd213ss (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -229,7 +229,7 @@ define <4 x float> @test_x86_fnmadd_bba_
 declare <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fnmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_baa_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfnmadd132ps (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -239,7 +239,7 @@ define <4 x float> @test_x86_fnmadd_baa_
 
 define <4 x float> @test_x86_fnmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_aba_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfnmadd231ps (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -249,7 +249,7 @@ define <4 x float> @test_x86_fnmadd_aba_
 
 define <4 x float> @test_x86_fnmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_bba_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfnmadd213ps (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -260,7 +260,7 @@ define <4 x float> @test_x86_fnmadd_bba_
 declare <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
 define <8 x float> @test_x86_fnmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_baa_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA-NEXT:    vfnmadd132ps (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -270,7 +270,7 @@ define <8 x float> @test_x86_fnmadd_baa_
 
 define <8 x float> @test_x86_fnmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_aba_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA-NEXT:    vfnmadd231ps (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -280,7 +280,7 @@ define <8 x float> @test_x86_fnmadd_aba_
 
 define <8 x float> @test_x86_fnmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_bba_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %ymm0
 ; FMA-NEXT:    vfnmadd213ps (%rcx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -291,7 +291,7 @@ define <8 x float> @test_x86_fnmadd_bba_
 declare <2 x double> @llvm.x86.fma.vfnmadd.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fnmadd_baa_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_baa_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm1
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfnmadd213sd %xmm1, %xmm1, %xmm0
@@ -302,7 +302,7 @@ define <2 x double> @test_x86_fnmadd_baa
 
 define <2 x double> @test_x86_fnmadd_aba_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_aba_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfnmadd132sd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -312,7 +312,7 @@ define <2 x double> @test_x86_fnmadd_aba
 
 define <2 x double> @test_x86_fnmadd_bba_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_bba_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfnmadd213sd (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -323,7 +323,7 @@ define <2 x double> @test_x86_fnmadd_bba
 declare <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fnmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_baa_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfnmadd132pd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -333,7 +333,7 @@ define <2 x double> @test_x86_fnmadd_baa
 
 define <2 x double> @test_x86_fnmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_aba_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfnmadd231pd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -343,7 +343,7 @@ define <2 x double> @test_x86_fnmadd_aba
 
 define <2 x double> @test_x86_fnmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_bba_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfnmadd213pd (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -354,7 +354,7 @@ define <2 x double> @test_x86_fnmadd_bba
 declare <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
 define <4 x double> @test_x86_fnmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_baa_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA-NEXT:    vfnmadd132pd (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -364,7 +364,7 @@ define <4 x double> @test_x86_fnmadd_baa
 
 define <4 x double> @test_x86_fnmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_aba_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA-NEXT:    vfnmadd231pd (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -374,7 +374,7 @@ define <4 x double> @test_x86_fnmadd_aba
 
 define <4 x double> @test_x86_fnmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmadd_bba_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %ymm0
 ; FMA-NEXT:    vfnmadd213pd (%rcx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -385,7 +385,7 @@ define <4 x double> @test_x86_fnmadd_bba
 declare <4 x float> @llvm.x86.fma.vfmsub.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fmsub_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_baa_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm1
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfmsub213ss %xmm1, %xmm1, %xmm0
@@ -396,7 +396,7 @@ define <4 x float> @test_x86_fmsub_baa_s
 
 define <4 x float> @test_x86_fmsub_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_aba_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfmsub132ss (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -406,7 +406,7 @@ define <4 x float> @test_x86_fmsub_aba_s
 
 define <4 x float> @test_x86_fmsub_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_bba_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfmsub213ss (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -417,7 +417,7 @@ define <4 x float> @test_x86_fmsub_bba_s
 declare <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_baa_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfmsub132ps (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -427,7 +427,7 @@ define <4 x float> @test_x86_fmsub_baa_p
 
 define <4 x float> @test_x86_fmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_aba_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfmsub231ps (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -437,7 +437,7 @@ define <4 x float> @test_x86_fmsub_aba_p
 
 define <4 x float> @test_x86_fmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_bba_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfmsub213ps (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -448,7 +448,7 @@ define <4 x float> @test_x86_fmsub_bba_p
 declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
 define <8 x float> @test_x86_fmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_baa_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA-NEXT:    vfmsub132ps (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -458,7 +458,7 @@ define <8 x float> @test_x86_fmsub_baa_p
 
 define <8 x float> @test_x86_fmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_aba_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA-NEXT:    vfmsub231ps (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -468,7 +468,7 @@ define <8 x float> @test_x86_fmsub_aba_p
 
 define <8 x float> @test_x86_fmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_bba_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %ymm0
 ; FMA-NEXT:    vfmsub213ps (%rcx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -479,7 +479,7 @@ define <8 x float> @test_x86_fmsub_bba_p
 declare <2 x double> @llvm.x86.fma.vfmsub.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fmsub_baa_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_baa_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm1
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfmsub213sd %xmm1, %xmm1, %xmm0
@@ -490,7 +490,7 @@ define <2 x double> @test_x86_fmsub_baa_
 
 define <2 x double> @test_x86_fmsub_aba_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_aba_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfmsub132sd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -500,7 +500,7 @@ define <2 x double> @test_x86_fmsub_aba_
 
 define <2 x double> @test_x86_fmsub_bba_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_bba_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfmsub213sd (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -511,7 +511,7 @@ define <2 x double> @test_x86_fmsub_bba_
 declare <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_baa_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfmsub132pd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -521,7 +521,7 @@ define <2 x double> @test_x86_fmsub_baa_
 
 define <2 x double> @test_x86_fmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_aba_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfmsub231pd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -531,7 +531,7 @@ define <2 x double> @test_x86_fmsub_aba_
 
 define <2 x double> @test_x86_fmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_bba_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfmsub213pd (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -542,7 +542,7 @@ define <2 x double> @test_x86_fmsub_bba_
 declare <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
 define <4 x double> @test_x86_fmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_baa_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA-NEXT:    vfmsub132pd (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -552,7 +552,7 @@ define <4 x double> @test_x86_fmsub_baa_
 
 define <4 x double> @test_x86_fmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_aba_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA-NEXT:    vfmsub231pd (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -562,7 +562,7 @@ define <4 x double> @test_x86_fmsub_aba_
 
 define <4 x double> @test_x86_fmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fmsub_bba_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %ymm0
 ; FMA-NEXT:    vfmsub213pd (%rcx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -574,7 +574,7 @@ define <4 x double> @test_x86_fmsub_bba_
 declare <4 x float> @llvm.x86.fma.vfnmsub.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fnmsub_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_baa_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm1
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfnmsub213ss %xmm1, %xmm1, %xmm0
@@ -585,7 +585,7 @@ define <4 x float> @test_x86_fnmsub_baa_
 
 define <4 x float> @test_x86_fnmsub_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_aba_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfnmsub132ss (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -595,7 +595,7 @@ define <4 x float> @test_x86_fnmsub_aba_
 
 define <4 x float> @test_x86_fnmsub_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_bba_ss:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfnmsub213ss (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -606,7 +606,7 @@ define <4 x float> @test_x86_fnmsub_bba_
 declare <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fnmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_baa_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfnmsub132ps (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -616,7 +616,7 @@ define <4 x float> @test_x86_fnmsub_baa_
 
 define <4 x float> @test_x86_fnmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_aba_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA-NEXT:    vfnmsub231ps (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -626,7 +626,7 @@ define <4 x float> @test_x86_fnmsub_aba_
 
 define <4 x float> @test_x86_fnmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_bba_ps:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA-NEXT:    vfnmsub213ps (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -637,7 +637,7 @@ define <4 x float> @test_x86_fnmsub_bba_
 declare <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
 define <8 x float> @test_x86_fnmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_baa_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA-NEXT:    vfnmsub132ps (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -647,7 +647,7 @@ define <8 x float> @test_x86_fnmsub_baa_
 
 define <8 x float> @test_x86_fnmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_aba_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA-NEXT:    vfnmsub231ps (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -657,7 +657,7 @@ define <8 x float> @test_x86_fnmsub_aba_
 
 define <8 x float> @test_x86_fnmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_bba_ps_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %ymm0
 ; FMA-NEXT:    vfnmsub213ps (%rcx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -668,7 +668,7 @@ define <8 x float> @test_x86_fnmsub_bba_
 declare <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fnmsub_baa_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_baa_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm1
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfnmsub213sd %xmm1, %xmm1, %xmm0
@@ -679,7 +679,7 @@ define <2 x double> @test_x86_fnmsub_baa
 
 define <2 x double> @test_x86_fnmsub_aba_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_aba_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfnmsub132sd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -689,7 +689,7 @@ define <2 x double> @test_x86_fnmsub_aba
 
 define <2 x double> @test_x86_fnmsub_bba_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_bba_sd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfnmsub213sd (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -700,7 +700,7 @@ define <2 x double> @test_x86_fnmsub_bba
 declare <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fnmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_baa_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfnmsub132pd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -710,7 +710,7 @@ define <2 x double> @test_x86_fnmsub_baa
 
 define <2 x double> @test_x86_fnmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_aba_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA-NEXT:    vfnmsub231pd (%rdx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -720,7 +720,7 @@ define <2 x double> @test_x86_fnmsub_aba
 
 define <2 x double> @test_x86_fnmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_bba_pd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA-NEXT:    vfnmsub213pd (%rcx), %xmm0, %xmm0
 ; FMA-NEXT:    retq
@@ -731,7 +731,7 @@ define <2 x double> @test_x86_fnmsub_bba
 declare <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
 define <4 x double> @test_x86_fnmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_baa_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA-NEXT:    vfnmsub132pd (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -741,7 +741,7 @@ define <4 x double> @test_x86_fnmsub_baa
 
 define <4 x double> @test_x86_fnmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_aba_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA-NEXT:    vfnmsub231pd (%rdx), %ymm0, %ymm0
 ; FMA-NEXT:    retq
@@ -751,7 +751,7 @@ define <4 x double> @test_x86_fnmsub_aba
 
 define <4 x double> @test_x86_fnmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA-LABEL: test_x86_fnmsub_bba_pd_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %ymm0
 ; FMA-NEXT:    vfnmsub213pd (%rcx), %ymm0, %ymm0
 ; FMA-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fma-fneg-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-fneg-combine.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-fneg-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma-fneg-combine.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define <16 x float> @test1(<16 x float> %a, <16 x float> %b, <16 x float> %c)  {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vfmsub213ps %zmm2, %zmm1, %zmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -23,7 +23,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float> @test2(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vfnmsub213ps %zmm2, %zmm1, %zmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -34,7 +34,7 @@ entry:
 
 define <16 x float> @test3(<16 x float> %a, <16 x float> %b, <16 x float> %c)  {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vfmsub213ps %zmm2, %zmm1, %zmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -45,7 +45,7 @@ entry:
 
 define <16 x float> @test4(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vfmadd213ps %zmm2, %zmm1, %zmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -56,7 +56,7 @@ entry:
 
 define <16 x float> @test5(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vfmsub213ps {ru-sae}, %zmm2, %zmm1, %zmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -67,7 +67,7 @@ entry:
 
 define <16 x float> @test6(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vfmadd213ps {ru-sae}, %zmm2, %zmm1, %zmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -79,7 +79,7 @@ entry:
 
 define <8 x float> @test7(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -90,13 +90,13 @@ entry:
 
 define <8 x float> @test8(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
 ; SKX-LABEL: test8:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vxorps {{.*}}(%rip){1to8}, %ymm2, %ymm2
 ; SKX-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test8:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vbroadcastss {{.*#+}} ymm3 = [-0,-0,-0,-0,-0,-0,-0,-0]
 ; KNL-NEXT:    vxorps %ymm3, %ymm2, %ymm2
 ; KNL-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0
@@ -112,7 +112,7 @@ declare <8 x float> @llvm.x86.fma.vfmsub
 
 define <8 x double> @test9(<8 x double> %a, <8 x double> %b, <8 x double> %c) {
 ; CHECK-LABEL: test9:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vfnmsub213pd %zmm2, %zmm1, %zmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -125,7 +125,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <2 x double> @test10(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test10:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0
 ; CHECK-NEXT:    vxorpd {{.*}}(%rip), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -139,14 +139,14 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <4 x float> @test11(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 zeroext %mask) local_unnamed_addr #0 {
 ; SKX-LABEL: test11:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vxorps {{.*}}(%rip){1to4}, %xmm2, %xmm0
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vfmadd231ss %xmm1, %xmm1, %xmm0 {%k1}
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test11:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vbroadcastss {{.*#+}} xmm0 = [-0,-0,-0,-0]
 ; KNL-NEXT:    vxorps %xmm0, %xmm2, %xmm0
 ; KNL-NEXT:    kmovw %edi, %k1
@@ -162,14 +162,14 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float> @test11b(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 zeroext %mask) local_unnamed_addr #0 {
 ; SKX-LABEL: test11b:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm1 {%k1}
 ; SKX-NEXT:    vmovaps %xmm1, %xmm0
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test11b:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm1 {%k1}
 ; KNL-NEXT:    vmovaps %xmm1, %xmm0
@@ -184,14 +184,14 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <8 x double> @test12(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
 ; SKX-LABEL: test12:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1}
 ; SKX-NEXT:    vxorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test12:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1}
 ; KNL-NEXT:    vpxorq {{.*}}(%rip){1to8}, %zmm0, %zmm0
@@ -204,14 +204,14 @@ entry:
 
 define <2 x double> @test13(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
 ; SKX-LABEL: test13:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vxorpd {{.*}}(%rip), %xmm0, %xmm0
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1}
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test13:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    vxorpd {{.*}}(%rip), %xmm0, %xmm0
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1}
@@ -225,14 +225,14 @@ entry:
 
 define <16 x float> @test14(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
 ; SKX-LABEL: test14:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vfnmsub132ps {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
 ; SKX-NEXT:    vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test14:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vfnmsub132ps {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
 ; KNL-NEXT:    vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
@@ -245,7 +245,7 @@ entry:
 
 define <16 x float> @test15(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask)  {
 ; SKX-LABEL: test15:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm3
 ; SKX-NEXT:    vfnmadd213ps {ru-sae}, %zmm2, %zmm0, %zmm1
@@ -255,7 +255,7 @@ define <16 x float> @test15(<16 x float>
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test15:
-; KNL:       # BB#0: # %entry
+; KNL:       # %bb.0: # %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm3
 ; KNL-NEXT:    vfnmadd213ps {ru-sae}, %zmm2, %zmm0, %zmm1
@@ -272,13 +272,13 @@ entry:
 
 define <16 x float> @test16(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
 ; SKX-LABEL: test16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vfmsubadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test16:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vfmsubadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
 ; KNL-NEXT:    retq
@@ -290,13 +290,13 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <8 x double> @test17(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
 ; SKX-LABEL: test17:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vfmsubadd132pd %zmm1, %zmm2, %zmm0 {%k1}
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test17:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vfmsubadd132pd %zmm1, %zmm2, %zmm0 {%k1}
 ; KNL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fma-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-intrinsics-x86.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma-intrinsics-x86.ll Mon Dec  4 09:18:51 2017
@@ -6,17 +6,17 @@
 ; VFMADD
 define <4 x float> @test_x86_fma_vfmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ss:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ss:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ss:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa9,0x00]
@@ -27,19 +27,19 @@ define <4 x float> @test_x86_fma_vfmadd_
 
 define <4 x float> @test_x86_fma_vfmadd_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_bac_ss:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xa9,0xca]
 ; CHECK-FMA-NEXT:    vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_bac_ss:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa9,0xca]
 ; CHECK-AVX512VL-NEXT:    vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_bac_ss:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa9,0x00]
@@ -51,17 +51,17 @@ declare <4 x float> @llvm.x86.fma.vfmadd
 
 define <2 x double> @test_x86_fma_vfmadd_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_sd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_sd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_sd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa9,0x00]
@@ -72,19 +72,19 @@ define <2 x double> @test_x86_fma_vfmadd
 
 define <2 x double> @test_x86_fma_vfmadd_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_bac_sd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xa9,0xca]
 ; CHECK-FMA-NEXT:    vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_bac_sd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa9,0xca]
 ; CHECK-AVX512VL-NEXT:    vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_bac_sd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa9,0x00]
@@ -96,17 +96,17 @@ declare <2 x double> @llvm.x86.fma.vfmad
 
 define <4 x float> @test_x86_fma_vfmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ps:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ps:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ps:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa8,0x00]
@@ -118,17 +118,17 @@ declare <4 x float> @llvm.x86.fma.vfmadd
 
 define <2 x double> @test_x86_fma_vfmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_pd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_pd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_pd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa8,0x00]
@@ -140,17 +140,17 @@ declare <2 x double> @llvm.x86.fma.vfmad
 
 define <8 x float> @test_x86_fma_vfmadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ps_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ps_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ps_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa8,0x00]
@@ -162,17 +162,17 @@ declare <8 x float> @llvm.x86.fma.vfmadd
 
 define <4 x double> @test_x86_fma_vfmadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_pd_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_pd_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_pd_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa8,0x00]
@@ -185,17 +185,17 @@ declare <4 x double> @llvm.x86.fma.vfmad
 ; VFMSUB
 define <4 x float> @test_x86_fma_vfmsub_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ss:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xab,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ss:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xab,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ss:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xab,0x00]
@@ -206,19 +206,19 @@ define <4 x float> @test_x86_fma_vfmsub_
 
 define <4 x float> @test_x86_fma_vfmsub_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_bac_ss:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xab,0xca]
 ; CHECK-FMA-NEXT:    vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_bac_ss:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xab,0xca]
 ; CHECK-AVX512VL-NEXT:    vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_bac_ss:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xab,0x00]
@@ -230,17 +230,17 @@ declare <4 x float> @llvm.x86.fma.vfmsub
 
 define <2 x double> @test_x86_fma_vfmsub_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_sd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xab,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_sd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xab,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_sd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xab,0x00]
@@ -251,19 +251,19 @@ define <2 x double> @test_x86_fma_vfmsub
 
 define <2 x double> @test_x86_fma_vfmsub_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_bac_sd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xab,0xca]
 ; CHECK-FMA-NEXT:    vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_bac_sd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xab,0xca]
 ; CHECK-AVX512VL-NEXT:    vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_bac_sd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xab,0x00]
@@ -275,17 +275,17 @@ declare <2 x double> @llvm.x86.fma.vfmsu
 
 define <4 x float> @test_x86_fma_vfmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ps:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaa,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ps:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xaa,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ps:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaa,0x00]
@@ -297,17 +297,17 @@ declare <4 x float> @llvm.x86.fma.vfmsub
 
 define <2 x double> @test_x86_fma_vfmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_pd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaa,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_pd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xaa,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_pd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaa,0x00]
@@ -319,17 +319,17 @@ declare <2 x double> @llvm.x86.fma.vfmsu
 
 define <8 x float> @test_x86_fma_vfmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ps_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xaa,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ps_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xaa,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ps_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xaa,0x00]
@@ -341,17 +341,17 @@ declare <8 x float> @llvm.x86.fma.vfmsub
 
 define <4 x double> @test_x86_fma_vfmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_pd_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xaa,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_pd_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xaa,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_pd_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xaa,0x00]
@@ -364,17 +364,17 @@ declare <4 x double> @llvm.x86.fma.vfmsu
 ; VFNMADD
 define <4 x float> @test_x86_fma_vfnmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ss:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xad,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ss:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xad,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ss:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xad,0x00]
@@ -385,19 +385,19 @@ define <4 x float> @test_x86_fma_vfnmadd
 
 define <4 x float> @test_x86_fma_vfnmadd_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_bac_ss:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xad,0xca]
 ; CHECK-FMA-NEXT:    vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_bac_ss:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xad,0xca]
 ; CHECK-AVX512VL-NEXT:    vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_bac_ss:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xad,0x00]
@@ -409,17 +409,17 @@ declare <4 x float> @llvm.x86.fma.vfnmad
 
 define <2 x double> @test_x86_fma_vfnmadd_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_sd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xad,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_sd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xad,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_sd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xad,0x00]
@@ -430,19 +430,19 @@ define <2 x double> @test_x86_fma_vfnmad
 
 define <2 x double> @test_x86_fma_vfnmadd_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_bac_sd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xad,0xca]
 ; CHECK-FMA-NEXT:    vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_bac_sd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xad,0xca]
 ; CHECK-AVX512VL-NEXT:    vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_bac_sd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xad,0x00]
@@ -454,17 +454,17 @@ declare <2 x double> @llvm.x86.fma.vfnma
 
 define <4 x float> @test_x86_fma_vfnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ps:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xac,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ps:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xac,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ps:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xac,0x00]
@@ -476,17 +476,17 @@ declare <4 x float> @llvm.x86.fma.vfnmad
 
 define <2 x double> @test_x86_fma_vfnmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_pd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xac,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_pd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xac,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_pd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xac,0x00]
@@ -498,17 +498,17 @@ declare <2 x double> @llvm.x86.fma.vfnma
 
 define <8 x float> @test_x86_fma_vfnmadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ps_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xac,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ps_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xac,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ps_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xac,0x00]
@@ -520,17 +520,17 @@ declare <8 x float> @llvm.x86.fma.vfnmad
 
 define <4 x double> @test_x86_fma_vfnmadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_pd_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xac,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_pd_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xac,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_pd_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xac,0x00]
@@ -543,17 +543,17 @@ declare <4 x double> @llvm.x86.fma.vfnma
 ; VFNMSUB
 define <4 x float> @test_x86_fma_vfnmsub_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ss:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaf,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ss:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xaf,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ss:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaf,0x00]
@@ -564,19 +564,19 @@ define <4 x float> @test_x86_fma_vfnmsub
 
 define <4 x float> @test_x86_fma_vfnmsub_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_bac_ss:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xaf,0xca]
 ; CHECK-FMA-NEXT:    vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_bac_ss:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaf,0xca]
 ; CHECK-AVX512VL-NEXT:    vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_bac_ss:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaf,0x00]
@@ -588,17 +588,17 @@ declare <4 x float> @llvm.x86.fma.vfnmsu
 
 define <2 x double> @test_x86_fma_vfnmsub_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_sd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaf,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_sd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xaf,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_sd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaf,0x00]
@@ -609,19 +609,19 @@ define <2 x double> @test_x86_fma_vfnmsu
 
 define <2 x double> @test_x86_fma_vfnmsub_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_bac_sd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xaf,0xca]
 ; CHECK-FMA-NEXT:    vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_bac_sd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaf,0xca]
 ; CHECK-AVX512VL-NEXT:    vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_bac_sd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaf,0x00]
@@ -633,17 +633,17 @@ declare <2 x double> @llvm.x86.fma.vfnms
 
 define <4 x float> @test_x86_fma_vfnmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ps:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xae,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ps:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xae,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ps:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xae,0x00]
@@ -655,17 +655,17 @@ declare <4 x float> @llvm.x86.fma.vfnmsu
 
 define <2 x double> @test_x86_fma_vfnmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_pd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xae,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_pd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xae,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_pd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xae,0x00]
@@ -677,17 +677,17 @@ declare <2 x double> @llvm.x86.fma.vfnms
 
 define <8 x float> @test_x86_fma_vfnmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ps_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xae,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ps_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xae,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ps_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xae,0x00]
@@ -699,17 +699,17 @@ declare <8 x float> @llvm.x86.fma.vfnmsu
 
 define <4 x double> @test_x86_fma_vfnmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_pd_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xae,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_pd_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xae,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_pd_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xae,0x00]
@@ -722,17 +722,17 @@ declare <4 x double> @llvm.x86.fma.vfnms
 ; VFMADDSUB
 define <4 x float> @test_x86_fma_vfmaddsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_ps:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa6,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_ps:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa6,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_ps:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmaddsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa6,0x00]
@@ -744,17 +744,17 @@ declare <4 x float> @llvm.x86.fma.vfmadd
 
 define <2 x double> @test_x86_fma_vfmaddsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_pd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa6,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_pd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa6,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_pd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmaddsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa6,0x00]
@@ -766,17 +766,17 @@ declare <2 x double> @llvm.x86.fma.vfmad
 
 define <8 x float> @test_x86_fma_vfmaddsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_ps_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa6,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_ps_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa6,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_ps_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmaddsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa6,0x00]
@@ -788,17 +788,17 @@ declare <8 x float> @llvm.x86.fma.vfmadd
 
 define <4 x double> @test_x86_fma_vfmaddsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_pd_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa6,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_pd_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa6,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_pd_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmaddsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa6,0x00]
@@ -811,17 +811,17 @@ declare <4 x double> @llvm.x86.fma.vfmad
 ; VFMSUBADD
 define <4 x float> @test_x86_fma_vfmsubadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_ps:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa7,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_ps:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa7,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_ps:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsubadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa7,0x00]
@@ -833,17 +833,17 @@ declare <4 x float> @llvm.x86.fma.vfmsub
 
 define <2 x double> @test_x86_fma_vfmsubadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_pd:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa7,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_pd:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa7,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_pd:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsubadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa7,0x00]
@@ -855,17 +855,17 @@ declare <2 x double> @llvm.x86.fma.vfmsu
 
 define <8 x float> @test_x86_fma_vfmsubadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_ps_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa7,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_ps_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa7,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_ps_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsubadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa7,0x00]
@@ -877,17 +877,17 @@ declare <8 x float> @llvm.x86.fma.vfmsub
 
 define <4 x double> @test_x86_fma_vfmsubadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_pd_256:
-; CHECK-FMA:       # BB#0:
+; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa7,0xc2]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_pd_256:
-; CHECK-AVX512VL:       # BB#0:
+; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa7,0xc2]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_pd_256:
-; CHECK-FMA-WIN:       # BB#0:
+; CHECK-FMA-WIN:       # %bb.0:
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsubadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa7,0x00]

Modified: llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll Mon Dec  4 09:18:51 2017
@@ -16,7 +16,7 @@ declare <2 x double> @llvm.x86.fma.vfnms
 
 define void @fmadd_aab_ss(float* %a, float* %b) {
 ; CHECK-LABEL: fmadd_aab_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vfmadd213ss (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
@@ -42,7 +42,7 @@ define void @fmadd_aab_ss(float* %a, flo
 
 define void @fmadd_aba_ss(float* %a, float* %b) {
 ; CHECK-LABEL: fmadd_aba_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vfmadd132ss (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
@@ -68,7 +68,7 @@ define void @fmadd_aba_ss(float* %a, flo
 
 define void @fmsub_aab_ss(float* %a, float* %b) {
 ; CHECK-LABEL: fmsub_aab_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vfmsub213ss (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
@@ -94,7 +94,7 @@ define void @fmsub_aab_ss(float* %a, flo
 
 define void @fmsub_aba_ss(float* %a, float* %b) {
 ; CHECK-LABEL: fmsub_aba_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vfmsub132ss (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
@@ -120,7 +120,7 @@ define void @fmsub_aba_ss(float* %a, flo
 
 define void @fnmadd_aab_ss(float* %a, float* %b) {
 ; CHECK-LABEL: fnmadd_aab_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vfnmadd213ss (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
@@ -146,7 +146,7 @@ define void @fnmadd_aab_ss(float* %a, fl
 
 define void @fnmadd_aba_ss(float* %a, float* %b) {
 ; CHECK-LABEL: fnmadd_aba_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vfnmadd132ss (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
@@ -172,7 +172,7 @@ define void @fnmadd_aba_ss(float* %a, fl
 
 define void @fnmsub_aab_ss(float* %a, float* %b) {
 ; CHECK-LABEL: fnmsub_aab_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vfnmsub213ss (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
@@ -198,7 +198,7 @@ define void @fnmsub_aab_ss(float* %a, fl
 
 define void @fnmsub_aba_ss(float* %a, float* %b) {
 ; CHECK-LABEL: fnmsub_aba_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vfnmsub132ss (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
@@ -224,7 +224,7 @@ define void @fnmsub_aba_ss(float* %a, fl
 
 define void @fmadd_aab_sd(double* %a, double* %b) {
 ; CHECK-LABEL: fmadd_aab_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vfmadd213sd (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)
@@ -246,7 +246,7 @@ define void @fmadd_aab_sd(double* %a, do
 
 define void @fmadd_aba_sd(double* %a, double* %b) {
 ; CHECK-LABEL: fmadd_aba_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vfmadd132sd (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)
@@ -268,7 +268,7 @@ define void @fmadd_aba_sd(double* %a, do
 
 define void @fmsub_aab_sd(double* %a, double* %b) {
 ; CHECK-LABEL: fmsub_aab_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vfmsub213sd (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)
@@ -290,7 +290,7 @@ define void @fmsub_aab_sd(double* %a, do
 
 define void @fmsub_aba_sd(double* %a, double* %b) {
 ; CHECK-LABEL: fmsub_aba_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vfmsub132sd (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)
@@ -312,7 +312,7 @@ define void @fmsub_aba_sd(double* %a, do
 
 define void @fnmadd_aab_sd(double* %a, double* %b) {
 ; CHECK-LABEL: fnmadd_aab_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vfnmadd213sd (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)
@@ -334,7 +334,7 @@ define void @fnmadd_aab_sd(double* %a, d
 
 define void @fnmadd_aba_sd(double* %a, double* %b) {
 ; CHECK-LABEL: fnmadd_aba_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vfnmadd132sd (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)
@@ -356,7 +356,7 @@ define void @fnmadd_aba_sd(double* %a, d
 
 define void @fnmsub_aab_sd(double* %a, double* %b) {
 ; CHECK-LABEL: fnmsub_aab_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vfnmsub213sd (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)
@@ -378,7 +378,7 @@ define void @fnmsub_aab_sd(double* %a, d
 
 define void @fnmsub_aba_sd(double* %a, double* %b) {
 ; CHECK-LABEL: fnmsub_aba_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vfnmsub132sd (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)

Modified: llvm/trunk/test/CodeGen/X86/fma-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma-schedule.ll Mon Dec  4 09:18:51 2017
@@ -17,43 +17,43 @@
 
 define <2 x double> @test_vfmadd213pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
 ; GENERIC-LABEL: test_vfmadd213pd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmadd213pd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmadd213pd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmadd213pd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmadd213pd:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmadd213pd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmadd213pd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -65,43 +65,43 @@ define <2 x double> @test_vfmadd213pd(<2
 
 define <4 x double> @test_vfmadd213pd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) {
 ; GENERIC-LABEL: test_vfmadd213pd_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmadd213pd_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmadd213pd_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmadd213pd_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmadd213pd_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmadd213pd_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmadd213pd_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -113,43 +113,43 @@ define <4 x double> @test_vfmadd213pd_ym
 
 define <4 x float> @test_vfmadd213ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
 ; GENERIC-LABEL: test_vfmadd213ps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmadd213ps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmadd213ps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmadd213ps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmadd213ps:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmadd213ps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmadd213ps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -161,43 +161,43 @@ define <4 x float> @test_vfmadd213ps(<4
 
 define <8 x float> @test_vfmadd213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) {
 ; GENERIC-LABEL: test_vfmadd213ps_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmadd213ps_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmadd213ps_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmadd213ps_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmadd213ps_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmadd213ps_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmadd213ps_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -209,43 +209,43 @@ define <8 x float> @test_vfmadd213ps_ymm
 
 define <2 x double> @test_vfmadd213sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
 ; GENERIC-LABEL: test_vfmadd213sd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmadd213sd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmadd213sd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmadd213sd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmadd213sd:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmadd213sd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmadd213sd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -257,43 +257,43 @@ define <2 x double> @test_vfmadd213sd(<2
 
 define <4 x float> @test_vfmadd213ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
 ; GENERIC-LABEL: test_vfmadd213ss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmadd213ss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmadd213ss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmadd213ss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmadd213ss:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmadd213ss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmadd213ss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -317,43 +317,43 @@ define <4 x float> @test_vfmadd213ss(<4
 
 define <2 x double> @test_vfmaddsubpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
 ; GENERIC-LABEL: test_vfmaddsubpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmaddsubpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmaddsubpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmaddsubpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmaddsubpd:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmaddsubpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmaddsubpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -365,43 +365,43 @@ define <2 x double> @test_vfmaddsubpd(<2
 
 define <4 x double> @test_vfmaddsubpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a4, <4 x double> *%a3) {
 ; GENERIC-LABEL: test_vfmaddsubpd_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmaddsubpd_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmaddsubpd_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmaddsubpd_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmaddsubpd_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmaddsubpd_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmaddsubpd_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -413,43 +413,43 @@ define <4 x double> @test_vfmaddsubpd_ym
 
 define <4 x float> @test_vfmaddsubps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a4, <4 x float> *%a3) {
 ; GENERIC-LABEL: test_vfmaddsubps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmaddsubps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmaddsubps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmaddsubps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmaddsubps:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmaddsubps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmaddsubps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -461,43 +461,43 @@ define <4 x float> @test_vfmaddsubps(<4
 
 define <8 x float> @test_vfmaddsubps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a8, <8 x float> *%a3) {
 ; GENERIC-LABEL: test_vfmaddsubps_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmaddsubps_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmaddsubps_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmaddsubps_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmaddsubps_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmaddsubps_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmaddsubps_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -521,43 +521,43 @@ define <8 x float> @test_vfmaddsubps_ymm
 
 define <2 x double> @test_vfmsubaddpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
 ; GENERIC-LABEL: test_vfmsubaddpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsubaddpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsubaddpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsubaddpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsubaddpd:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmsubaddpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsubaddpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -569,43 +569,43 @@ define <2 x double> @test_vfmsubaddpd(<2
 
 define <4 x double> @test_vfmsubaddpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a4, <4 x double> *%a3) {
 ; GENERIC-LABEL: test_vfmsubaddpd_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsubaddpd_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsubaddpd_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsubaddpd_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsubaddpd_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmsubaddpd_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsubaddpd_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -617,43 +617,43 @@ define <4 x double> @test_vfmsubaddpd_ym
 
 define <4 x float> @test_vfmsubaddps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a4, <4 x float> *%a3) {
 ; GENERIC-LABEL: test_vfmsubaddps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsubaddps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsubaddps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsubaddps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsubaddps:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmsubaddps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsubaddps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -665,43 +665,43 @@ define <4 x float> @test_vfmsubaddps(<4
 
 define <8 x float> @test_vfmsubaddps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a8, <8 x float> *%a3) {
 ; GENERIC-LABEL: test_vfmsubaddps_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsubaddps_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsubaddps_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsubaddps_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsubaddps_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmsubaddps_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsubaddps_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -725,43 +725,43 @@ define <8 x float> @test_vfmsubaddps_ymm
 
 define <2 x double> @test_vfmsub213pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
 ; GENERIC-LABEL: test_vfmsub213pd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsub213pd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsub213pd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsub213pd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsub213pd:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmsub213pd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsub213pd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -773,43 +773,43 @@ define <2 x double> @test_vfmsub213pd(<2
 
 define <4 x double> @test_vfmsub213pd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) {
 ; GENERIC-LABEL: test_vfmsub213pd_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsub213pd_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsub213pd_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsub213pd_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsub213pd_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmsub213pd_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsub213pd_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -821,43 +821,43 @@ define <4 x double> @test_vfmsub213pd_ym
 
 define <4 x float> @test_vfmsub213ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
 ; GENERIC-LABEL: test_vfmsub213ps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsub213ps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsub213ps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsub213ps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsub213ps:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmsub213ps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsub213ps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -869,43 +869,43 @@ define <4 x float> @test_vfmsub213ps(<4
 
 define <8 x float> @test_vfmsub213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) {
 ; GENERIC-LABEL: test_vfmsub213ps_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsub213ps_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsub213ps_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsub213ps_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsub213ps_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmsub213ps_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsub213ps_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -917,43 +917,43 @@ define <8 x float> @test_vfmsub213ps_ymm
 
 define <2 x double> @test_vfmsub213sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
 ; GENERIC-LABEL: test_vfmsub213sd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsub213sd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsub213sd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsub213sd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsub213sd:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmsub213sd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsub213sd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -965,43 +965,43 @@ define <2 x double> @test_vfmsub213sd(<2
 
 define <4 x float> @test_vfmsub213ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
 ; GENERIC-LABEL: test_vfmsub213ss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsub213ss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsub213ss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsub213ss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsub213ss:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfmsub213ss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsub213ss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1025,43 +1025,43 @@ define <4 x float> @test_vfmsub213ss(<4
 
 define <2 x double> @test_vfnmadd213pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
 ; GENERIC-LABEL: test_vfnmadd213pd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmadd213pd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmadd213pd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmadd213pd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmadd213pd:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmadd213pd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmadd213pd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1073,43 +1073,43 @@ define <2 x double> @test_vfnmadd213pd(<
 
 define <4 x double> @test_vfnmadd213pd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) {
 ; GENERIC-LABEL: test_vfnmadd213pd_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmadd213pd_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmadd213pd_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmadd213pd_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmadd213pd_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmadd213pd_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmadd213pd_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1121,43 +1121,43 @@ define <4 x double> @test_vfnmadd213pd_y
 
 define <4 x float> @test_vfnmadd213ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
 ; GENERIC-LABEL: test_vfnmadd213ps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmadd213ps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmadd213ps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmadd213ps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmadd213ps:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmadd213ps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmadd213ps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1169,43 +1169,43 @@ define <4 x float> @test_vfnmadd213ps(<4
 
 define <8 x float> @test_vfnmadd213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) {
 ; GENERIC-LABEL: test_vfnmadd213ps_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmadd213ps_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmadd213ps_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmadd213ps_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmadd213ps_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmadd213ps_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmadd213ps_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1217,43 +1217,43 @@ define <8 x float> @test_vfnmadd213ps_ym
 
 define <2 x double> @test_vfnmadd213sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
 ; GENERIC-LABEL: test_vfnmadd213sd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmadd213sd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmadd213sd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmadd213sd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmadd213sd:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmadd213sd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmadd213sd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1265,43 +1265,43 @@ define <2 x double> @test_vfnmadd213sd(<
 
 define <4 x float> @test_vfnmadd213ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
 ; GENERIC-LABEL: test_vfnmadd213ss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmadd213ss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmadd213ss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmadd213ss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmadd213ss:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmadd213ss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmadd213ss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1325,43 +1325,43 @@ define <4 x float> @test_vfnmadd213ss(<4
 
 define <2 x double> @test_vfnmsub213pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
 ; GENERIC-LABEL: test_vfnmsub213pd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmsub213pd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmsub213pd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmsub213pd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmsub213pd:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmsub213pd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmsub213pd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1373,43 +1373,43 @@ define <2 x double> @test_vfnmsub213pd(<
 
 define <4 x double> @test_vfnmsub213pd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) {
 ; GENERIC-LABEL: test_vfnmsub213pd_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmsub213pd_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmsub213pd_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmsub213pd_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmsub213pd_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmsub213pd_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmsub213pd_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1421,43 +1421,43 @@ define <4 x double> @test_vfnmsub213pd_y
 
 define <4 x float> @test_vfnmsub213ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
 ; GENERIC-LABEL: test_vfnmsub213ps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmsub213ps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmsub213ps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmsub213ps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmsub213ps:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmsub213ps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmsub213ps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1469,43 +1469,43 @@ define <4 x float> @test_vfnmsub213ps(<4
 
 define <8 x float> @test_vfnmsub213ps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) {
 ; GENERIC-LABEL: test_vfnmsub213ps_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0
 ; GENERIC-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmsub213ps_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmsub213ps_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmsub213ps_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmsub213ps_ymm:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmsub213ps_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmsub213ps_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0
 ; ZNVER1-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1517,43 +1517,43 @@ define <8 x float> @test_vfnmsub213ps_ym
 
 define <2 x double> @test_vfnmsub213sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) {
 ; GENERIC-LABEL: test_vfnmsub213sd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmsub213sd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmsub213sd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmsub213sd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmsub213sd:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmsub213sd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmsub213sd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1565,43 +1565,43 @@ define <2 x double> @test_vfnmsub213sd(<
 
 define <4 x float> @test_vfnmsub213ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) {
 ; GENERIC-LABEL: test_vfnmsub213ss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0
 ; GENERIC-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmsub213ss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmsub213ss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmsub213ss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmsub213ss:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [2:1.00]
 ;
 ; SKX-LABEL: test_vfnmsub213ss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmsub213ss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0
 ; ZNVER1-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]

Modified: llvm/trunk/test/CodeGen/X86/fma.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define float @test_f32(float %a, float %b, float %c) #0 {
 ; FMA32-LABEL: test_f32:
-; FMA32:       ## BB#0: ## %entry
+; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    pushl %eax ## encoding: [0x50]
 ; FMA32-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
 ; FMA32-NEXT:    ## xmm0 = mem[0],zero,zero,zero
@@ -23,29 +23,29 @@ define float @test_f32(float %a, float %
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMACALL32-LABEL: test_f32:
-; FMACALL32:       ## BB#0: ## %entry
+; FMACALL32:       ## %bb.0: ## %entry
 ; FMACALL32-NEXT:    jmp _fmaf ## TAILCALL
 ; FMACALL32-NEXT:    ## encoding: [0xeb,A]
 ; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-1, kind: FK_PCRel_1
 ;
 ; FMA64-LABEL: test_f32:
-; FMA64:       ## BB#0: ## %entry
+; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; FMACALL64-LABEL: test_f32:
-; FMACALL64:       ## BB#0: ## %entry
+; FMACALL64:       ## %bb.0: ## %entry
 ; FMACALL64-NEXT:    jmp _fmaf ## TAILCALL
 ; FMACALL64-NEXT:    ## encoding: [0xeb,A]
 ; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-1, kind: FK_PCRel_1
 ;
 ; AVX512-LABEL: test_f32:
-; AVX512:       ## BB#0: ## %entry
+; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_f32:
-; AVX512VL:       ## BB#0: ## %entry
+; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
@@ -55,7 +55,7 @@ entry:
 
 define double @test_f64(double %a, double %b, double %c) #0 {
 ; FMA32-LABEL: test_f64:
-; FMA32:       ## BB#0: ## %entry
+; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    subl $12, %esp ## encoding: [0x83,0xec,0x0c]
 ; FMA32-NEXT:    vmovsd {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x10]
 ; FMA32-NEXT:    ## xmm0 = mem[0],zero
@@ -68,29 +68,29 @@ define double @test_f64(double %a, doubl
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMACALL32-LABEL: test_f64:
-; FMACALL32:       ## BB#0: ## %entry
+; FMACALL32:       ## %bb.0: ## %entry
 ; FMACALL32-NEXT:    jmp _fma ## TAILCALL
 ; FMACALL32-NEXT:    ## encoding: [0xeb,A]
 ; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-1, kind: FK_PCRel_1
 ;
 ; FMA64-LABEL: test_f64:
-; FMA64:       ## BB#0: ## %entry
+; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; FMACALL64-LABEL: test_f64:
-; FMACALL64:       ## BB#0: ## %entry
+; FMACALL64:       ## %bb.0: ## %entry
 ; FMACALL64-NEXT:    jmp _fma ## TAILCALL
 ; FMACALL64-NEXT:    ## encoding: [0xeb,A]
 ; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-1, kind: FK_PCRel_1
 ;
 ; AVX512-LABEL: test_f64:
-; AVX512:       ## BB#0: ## %entry
+; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_f64:
-; AVX512VL:       ## BB#0: ## %entry
+; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
@@ -100,7 +100,7 @@ entry:
 
 define x86_fp80 @test_f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c) #0 {
 ; FMA32-LABEL: test_f80:
-; FMA32:       ## BB#0: ## %entry
+; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    subl $60, %esp ## encoding: [0x83,0xec,0x3c]
 ; FMA32-NEXT:    fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x40]
 ; FMA32-NEXT:    fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -114,7 +114,7 @@ define x86_fp80 @test_f80(x86_fp80 %a, x
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMACALL32-LABEL: test_f80:
-; FMACALL32:       ## BB#0: ## %entry
+; FMACALL32:       ## %bb.0: ## %entry
 ; FMACALL32-NEXT:    subl $60, %esp ## encoding: [0x83,0xec,0x3c]
 ; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x40]
 ; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -128,7 +128,7 @@ define x86_fp80 @test_f80(x86_fp80 %a, x
 ; FMACALL32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMA64-LABEL: test_f80:
-; FMA64:       ## BB#0: ## %entry
+; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
 ; FMA64-NEXT:    fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
 ; FMA64-NEXT:    fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -142,7 +142,7 @@ define x86_fp80 @test_f80(x86_fp80 %a, x
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; FMACALL64-LABEL: test_f80:
-; FMACALL64:       ## BB#0: ## %entry
+; FMACALL64:       ## %bb.0: ## %entry
 ; FMACALL64-NEXT:    subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
 ; FMACALL64-NEXT:    fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
 ; FMACALL64-NEXT:    fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -156,7 +156,7 @@ define x86_fp80 @test_f80(x86_fp80 %a, x
 ; FMACALL64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_f80:
-; AVX512:       ## BB#0: ## %entry
+; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
 ; AVX512-NEXT:    fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
 ; AVX512-NEXT:    fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -170,7 +170,7 @@ define x86_fp80 @test_f80(x86_fp80 %a, x
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_f80:
-; AVX512VL:       ## BB#0: ## %entry
+; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
 ; AVX512VL-NEXT:    fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
 ; AVX512VL-NEXT:    fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
@@ -189,40 +189,40 @@ entry:
 
 define float @test_f32_cst() #0 {
 ; FMA32-LABEL: test_f32_cst:
-; FMA32:       ## BB#0: ## %entry
+; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    flds LCPI3_0 ## encoding: [0xd9,0x05,A,A,A,A]
 ; FMA32-NEXT:    ## fixup A - offset: 2, value: LCPI3_0, kind: FK_Data_4
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMACALL32-LABEL: test_f32_cst:
-; FMACALL32:       ## BB#0: ## %entry
+; FMACALL32:       ## %bb.0: ## %entry
 ; FMACALL32-NEXT:    flds LCPI3_0 ## encoding: [0xd9,0x05,A,A,A,A]
 ; FMACALL32-NEXT:    ## fixup A - offset: 2, value: LCPI3_0, kind: FK_Data_4
 ; FMACALL32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMA64-LABEL: test_f32_cst:
-; FMA64:       ## BB#0: ## %entry
+; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vmovss {{.*}}(%rip), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
 ; FMA64-NEXT:    ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
 ; FMA64-NEXT:    ## xmm0 = mem[0],zero,zero,zero
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; FMACALL64-LABEL: test_f32_cst:
-; FMACALL64:       ## BB#0: ## %entry
+; FMACALL64:       ## %bb.0: ## %entry
 ; FMACALL64-NEXT:    movss {{.*}}(%rip), %xmm0 ## encoding: [0xf3,0x0f,0x10,0x05,A,A,A,A]
 ; FMACALL64-NEXT:    ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
 ; FMACALL64-NEXT:    ## xmm0 = mem[0],zero,zero,zero
 ; FMACALL64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_f32_cst:
-; AVX512:       ## BB#0: ## %entry
+; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vmovss {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
 ; AVX512-NEXT:    ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
 ; AVX512-NEXT:    ## xmm0 = mem[0],zero,zero,zero
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_f32_cst:
-; AVX512VL:       ## BB#0: ## %entry
+; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vmovss {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
 ; AVX512VL-NEXT:    ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
 ; AVX512VL-NEXT:    ## xmm0 = mem[0],zero,zero,zero
@@ -234,22 +234,22 @@ entry:
 
 define <4 x float> @test_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #0 {
 ; FMA32-LABEL: test_v4f32:
-; FMA32:       ## BB#0: ## %entry
+; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMA64-LABEL: test_v4f32:
-; FMA64:       ## BB#0: ## %entry
+; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_v4f32:
-; AVX512:       ## BB#0: ## %entry
+; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v4f32:
-; AVX512VL:       ## BB#0: ## %entry
+; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
@@ -259,22 +259,22 @@ entry:
 
 define <8 x float> @test_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #0 {
 ; FMA32-LABEL: test_v8f32:
-; FMA32:       ## BB#0: ## %entry
+; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMA64-LABEL: test_v8f32:
-; FMA64:       ## BB#0: ## %entry
+; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_v8f32:
-; AVX512:       ## BB#0: ## %entry
+; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v8f32:
-; AVX512VL:       ## BB#0: ## %entry
+; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
@@ -284,7 +284,7 @@ entry:
 
 define <16 x float> @test_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c) #0 {
 ; FMA32-LABEL: test_v16f32:
-; FMA32:       ## BB#0: ## %entry
+; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    pushl %ebp ## encoding: [0x55]
 ; FMA32-NEXT:    movl %esp, %ebp ## encoding: [0x89,0xe5]
 ; FMA32-NEXT:    andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
@@ -296,18 +296,18 @@ define <16 x float> @test_v16f32(<16 x f
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMA64-LABEL: test_v16f32:
-; FMA64:       ## BB#0: ## %entry
+; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213ps %ymm4, %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0x6d,0xa8,0xc4]
 ; FMA64-NEXT:    vfmadd213ps %ymm5, %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0x65,0xa8,0xcd]
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_v16f32:
-; AVX512:       ## BB#0: ## %entry
+; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2]
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v16f32:
-; AVX512VL:       ## BB#0: ## %entry
+; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2]
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
@@ -317,22 +317,22 @@ entry:
 
 define <2 x double> @test_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #0 {
 ; FMA32-LABEL: test_v2f64:
-; FMA32:       ## BB#0: ## %entry
+; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMA64-LABEL: test_v2f64:
-; FMA64:       ## BB#0: ## %entry
+; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_v2f64:
-; AVX512:       ## BB#0: ## %entry
+; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v2f64:
-; AVX512VL:       ## BB#0: ## %entry
+; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
@@ -342,22 +342,22 @@ entry:
 
 define <4 x double> @test_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #0 {
 ; FMA32-LABEL: test_v4f64:
-; FMA32:       ## BB#0: ## %entry
+; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMA64-LABEL: test_v4f64:
-; FMA64:       ## BB#0: ## %entry
+; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_v4f64:
-; AVX512:       ## BB#0: ## %entry
+; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v4f64:
-; AVX512VL:       ## BB#0: ## %entry
+; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
@@ -367,7 +367,7 @@ entry:
 
 define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c) #0 {
 ; FMA32-LABEL: test_v8f64:
-; FMA32:       ## BB#0: ## %entry
+; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    pushl %ebp ## encoding: [0x55]
 ; FMA32-NEXT:    movl %esp, %ebp ## encoding: [0x89,0xe5]
 ; FMA32-NEXT:    andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
@@ -379,18 +379,18 @@ define <8 x double> @test_v8f64(<8 x dou
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
 ; FMA64-LABEL: test_v8f64:
-; FMA64:       ## BB#0: ## %entry
+; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213pd %ymm4, %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0xed,0xa8,0xc4]
 ; FMA64-NEXT:    vfmadd213pd %ymm5, %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0xe5,0xa8,0xcd]
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_v8f64:
-; AVX512:       ## BB#0: ## %entry
+; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2]
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v8f64:
-; AVX512VL:       ## BB#0: ## %entry
+; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2]
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:

Modified: llvm/trunk/test/CodeGen/X86/fma4-commute-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma4-commute-x86.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma4-commute-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma4-commute-x86.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@ attributes #0 = { nounwind }
 declare <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fmadd_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_baa_ss:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA4-NEXT:    vfmaddss %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -16,7 +16,7 @@ define <4 x float> @test_x86_fmadd_baa_s
 
 define <4 x float> @test_x86_fmadd_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_aba_ss:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA4-NEXT:    vfmaddss %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -26,7 +26,7 @@ define <4 x float> @test_x86_fmadd_aba_s
 
 define <4 x float> @test_x86_fmadd_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_bba_ss:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA4-NEXT:    vfmaddss (%rcx), %xmm0, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -37,7 +37,7 @@ define <4 x float> @test_x86_fmadd_bba_s
 declare <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_baa_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA4-NEXT:    vfmaddps %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -47,7 +47,7 @@ define <4 x float> @test_x86_fmadd_baa_p
 
 define <4 x float> @test_x86_fmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_aba_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA4-NEXT:    vfmaddps %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -57,7 +57,7 @@ define <4 x float> @test_x86_fmadd_aba_p
 
 define <4 x float> @test_x86_fmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_bba_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA4-NEXT:    vfmaddps (%rcx), %xmm0, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -68,7 +68,7 @@ define <4 x float> @test_x86_fmadd_bba_p
 declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
 define <8 x float> @test_x86_fmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_baa_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA4-NEXT:    vfmaddps %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -78,7 +78,7 @@ define <8 x float> @test_x86_fmadd_baa_p
 
 define <8 x float> @test_x86_fmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_aba_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA4-NEXT:    vfmaddps %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -88,7 +88,7 @@ define <8 x float> @test_x86_fmadd_aba_p
 
 define <8 x float> @test_x86_fmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_bba_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rdx), %ymm0
 ; FMA4-NEXT:    vfmaddps (%rcx), %ymm0, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -99,7 +99,7 @@ define <8 x float> @test_x86_fmadd_bba_p
 declare <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fmadd_baa_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_baa_sd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA4-NEXT:    vfmaddsd %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -109,7 +109,7 @@ define <2 x double> @test_x86_fmadd_baa_
 
 define <2 x double> @test_x86_fmadd_aba_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_aba_sd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA4-NEXT:    vfmaddsd %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -119,7 +119,7 @@ define <2 x double> @test_x86_fmadd_aba_
 
 define <2 x double> @test_x86_fmadd_bba_sd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_bba_sd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA4-NEXT:    vfmaddsd (%rcx), %xmm0, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -130,7 +130,7 @@ define <2 x double> @test_x86_fmadd_bba_
 declare <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_baa_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA4-NEXT:    vfmaddpd %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -140,7 +140,7 @@ define <2 x double> @test_x86_fmadd_baa_
 
 define <2 x double> @test_x86_fmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_aba_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA4-NEXT:    vfmaddpd %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -150,7 +150,7 @@ define <2 x double> @test_x86_fmadd_aba_
 
 define <2 x double> @test_x86_fmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_bba_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA4-NEXT:    vfmaddpd (%rcx), %xmm0, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -161,7 +161,7 @@ define <2 x double> @test_x86_fmadd_bba_
 declare <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
 define <4 x double> @test_x86_fmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_baa_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA4-NEXT:    vfmaddpd %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -171,7 +171,7 @@ define <4 x double> @test_x86_fmadd_baa_
 
 define <4 x double> @test_x86_fmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_aba_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA4-NEXT:    vfmaddpd %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -181,7 +181,7 @@ define <4 x double> @test_x86_fmadd_aba_
 
 define <4 x double> @test_x86_fmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmadd_bba_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rdx), %ymm0
 ; FMA4-NEXT:    vfmaddpd (%rcx), %ymm0, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -192,7 +192,7 @@ define <4 x double> @test_x86_fmadd_bba_
 declare <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fnmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_baa_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA4-NEXT:    vfnmaddps %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -202,7 +202,7 @@ define <4 x float> @test_x86_fnmadd_baa_
 
 define <4 x float> @test_x86_fnmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_aba_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA4-NEXT:    vfnmaddps %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -212,7 +212,7 @@ define <4 x float> @test_x86_fnmadd_aba_
 
 define <4 x float> @test_x86_fnmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_bba_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA4-NEXT:    vfnmaddps (%rcx), %xmm0, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -223,7 +223,7 @@ define <4 x float> @test_x86_fnmadd_bba_
 declare <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
 define <8 x float> @test_x86_fnmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_baa_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA4-NEXT:    vfnmaddps %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -233,7 +233,7 @@ define <8 x float> @test_x86_fnmadd_baa_
 
 define <8 x float> @test_x86_fnmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_aba_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA4-NEXT:    vfnmaddps %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -243,7 +243,7 @@ define <8 x float> @test_x86_fnmadd_aba_
 
 define <8 x float> @test_x86_fnmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_bba_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rdx), %ymm0
 ; FMA4-NEXT:    vfnmaddps (%rcx), %ymm0, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -254,7 +254,7 @@ define <8 x float> @test_x86_fnmadd_bba_
 declare <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fnmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_baa_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA4-NEXT:    vfnmaddpd %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -264,7 +264,7 @@ define <2 x double> @test_x86_fnmadd_baa
 
 define <2 x double> @test_x86_fnmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_aba_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA4-NEXT:    vfnmaddpd %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -274,7 +274,7 @@ define <2 x double> @test_x86_fnmadd_aba
 
 define <2 x double> @test_x86_fnmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_bba_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA4-NEXT:    vfnmaddpd (%rcx), %xmm0, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -285,7 +285,7 @@ define <2 x double> @test_x86_fnmadd_bba
 declare <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
 define <4 x double> @test_x86_fnmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_baa_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA4-NEXT:    vfnmaddpd %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -295,7 +295,7 @@ define <4 x double> @test_x86_fnmadd_baa
 
 define <4 x double> @test_x86_fnmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_aba_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA4-NEXT:    vfnmaddpd %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -305,7 +305,7 @@ define <4 x double> @test_x86_fnmadd_aba
 
 define <4 x double> @test_x86_fnmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmadd_bba_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rdx), %ymm0
 ; FMA4-NEXT:    vfnmaddpd (%rcx), %ymm0, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -316,7 +316,7 @@ define <4 x double> @test_x86_fnmadd_bba
 declare <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_baa_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA4-NEXT:    vfmsubps %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -326,7 +326,7 @@ define <4 x float> @test_x86_fmsub_baa_p
 
 define <4 x float> @test_x86_fmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_aba_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA4-NEXT:    vfmsubps %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -336,7 +336,7 @@ define <4 x float> @test_x86_fmsub_aba_p
 
 define <4 x float> @test_x86_fmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_bba_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA4-NEXT:    vfmsubps (%rcx), %xmm0, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -347,7 +347,7 @@ define <4 x float> @test_x86_fmsub_bba_p
 declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
 define <8 x float> @test_x86_fmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_baa_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA4-NEXT:    vfmsubps %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -357,7 +357,7 @@ define <8 x float> @test_x86_fmsub_baa_p
 
 define <8 x float> @test_x86_fmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_aba_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA4-NEXT:    vfmsubps %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -367,7 +367,7 @@ define <8 x float> @test_x86_fmsub_aba_p
 
 define <8 x float> @test_x86_fmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_bba_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rdx), %ymm0
 ; FMA4-NEXT:    vfmsubps (%rcx), %ymm0, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -378,7 +378,7 @@ define <8 x float> @test_x86_fmsub_bba_p
 declare <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_baa_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA4-NEXT:    vfmsubpd %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -388,7 +388,7 @@ define <2 x double> @test_x86_fmsub_baa_
 
 define <2 x double> @test_x86_fmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_aba_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA4-NEXT:    vfmsubpd %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -398,7 +398,7 @@ define <2 x double> @test_x86_fmsub_aba_
 
 define <2 x double> @test_x86_fmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_bba_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA4-NEXT:    vfmsubpd (%rcx), %xmm0, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -409,7 +409,7 @@ define <2 x double> @test_x86_fmsub_bba_
 declare <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
 define <4 x double> @test_x86_fmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_baa_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA4-NEXT:    vfmsubpd %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -419,7 +419,7 @@ define <4 x double> @test_x86_fmsub_baa_
 
 define <4 x double> @test_x86_fmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_aba_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA4-NEXT:    vfmsubpd %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -429,7 +429,7 @@ define <4 x double> @test_x86_fmsub_aba_
 
 define <4 x double> @test_x86_fmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fmsub_bba_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rdx), %ymm0
 ; FMA4-NEXT:    vfmsubpd (%rcx), %ymm0, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -440,7 +440,7 @@ define <4 x double> @test_x86_fmsub_bba_
 declare <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 define <4 x float> @test_x86_fnmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_baa_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA4-NEXT:    vfnmsubps %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -450,7 +450,7 @@ define <4 x float> @test_x86_fnmsub_baa_
 
 define <4 x float> @test_x86_fnmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_aba_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %xmm0
 ; FMA4-NEXT:    vfnmsubps %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -460,7 +460,7 @@ define <4 x float> @test_x86_fnmsub_aba_
 
 define <4 x float> @test_x86_fnmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_bba_ps:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rdx), %xmm0
 ; FMA4-NEXT:    vfnmsubps (%rcx), %xmm0, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -471,7 +471,7 @@ define <4 x float> @test_x86_fnmsub_bba_
 declare <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
 define <8 x float> @test_x86_fnmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_baa_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA4-NEXT:    vfnmsubps %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -481,7 +481,7 @@ define <8 x float> @test_x86_fnmsub_baa_
 
 define <8 x float> @test_x86_fnmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_aba_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rcx), %ymm0
 ; FMA4-NEXT:    vfnmsubps %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -491,7 +491,7 @@ define <8 x float> @test_x86_fnmsub_aba_
 
 define <8 x float> @test_x86_fnmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_bba_ps_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovaps (%rdx), %ymm0
 ; FMA4-NEXT:    vfnmsubps (%rcx), %ymm0, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -502,7 +502,7 @@ define <8 x float> @test_x86_fnmsub_bba_
 declare <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 define <2 x double> @test_x86_fnmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_baa_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA4-NEXT:    vfnmsubpd %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -512,7 +512,7 @@ define <2 x double> @test_x86_fnmsub_baa
 
 define <2 x double> @test_x86_fnmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_aba_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %xmm0
 ; FMA4-NEXT:    vfnmsubpd %xmm0, (%rdx), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -522,7 +522,7 @@ define <2 x double> @test_x86_fnmsub_aba
 
 define <2 x double> @test_x86_fnmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_bba_pd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rdx), %xmm0
 ; FMA4-NEXT:    vfnmsubpd (%rcx), %xmm0, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
@@ -533,7 +533,7 @@ define <2 x double> @test_x86_fnmsub_bba
 declare <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
 define <4 x double> @test_x86_fnmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_baa_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA4-NEXT:    vfnmsubpd %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -543,7 +543,7 @@ define <4 x double> @test_x86_fnmsub_baa
 
 define <4 x double> @test_x86_fnmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_aba_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rcx), %ymm0
 ; FMA4-NEXT:    vfnmsubpd %ymm0, (%rdx), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
@@ -553,7 +553,7 @@ define <4 x double> @test_x86_fnmsub_aba
 
 define <4 x double> @test_x86_fnmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
 ; FMA4-LABEL: test_x86_fnmsub_bba_pd_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmovapd (%rdx), %ymm0
 ; FMA4-NEXT:    vfnmsubpd (%rcx), %ymm0, %ymm0, %ymm0
 ; FMA4-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fma4-fneg-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma4-fneg-combine.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma4-fneg-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma4-fneg-combine.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@ declare <2 x double> @llvm.x86.fma4.vfma
 ; TODO this can be negated
 define <4 x float> @test1(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vxorps {{.*}}(%rip), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -19,7 +19,7 @@ define <4 x float> @test1(<4 x float> %a
 
 define <4 x float> @test2(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmsubss %xmm2, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
@@ -29,7 +29,7 @@ define <4 x float> @test2(<4 x float> %a
 
 define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %b
@@ -39,7 +39,7 @@ define <4 x float> @test3(<4 x float> %a
 
 define <4 x float> @test4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
@@ -49,7 +49,7 @@ define <4 x float> @test4(<4 x float> %a
 
 define <4 x float> @test5(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmsubss %xmm2, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
@@ -60,7 +60,7 @@ define <4 x float> @test5(<4 x float> %a
 
 define <2 x double> @test6(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vxorpd {{.*}}(%rip), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -71,7 +71,7 @@ define <2 x double> @test6(<2 x double>
 
 define <2 x double> @test7(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmsubsd %xmm2, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
@@ -81,7 +81,7 @@ define <2 x double> @test7(<2 x double>
 
 define <2 x double> @test8(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %b
@@ -91,7 +91,7 @@ define <2 x double> @test8(<2 x double>
 
 define <2 x double> @test9(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test9:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
@@ -101,7 +101,7 @@ define <2 x double> @test9(<2 x double>
 
 define <2 x double> @test10(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test10:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a

Modified: llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 ; VFMADD
 define <4 x float> @test_x86_fma4_vfmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma4_vfmadd_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddss %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6a,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -14,7 +14,7 @@ define <4 x float> @test_x86_fma4_vfmadd
 
 define <4 x float> @test_x86_fma4_vfmadd_bac_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma4_vfmadd_bac_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddss %xmm2, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0xf1,0x6a,0xc2,0x00]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %a1, <4 x float> %a0, <4 x float> %a2)
@@ -24,7 +24,7 @@ declare <4 x float> @llvm.x86.fma4.vfmad
 
 define <2 x double> @test_x86_fma4_vfmadd_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma4_vfmadd_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6b,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -33,7 +33,7 @@ define <2 x double> @test_x86_fma4_vfmad
 
 define <2 x double> @test_x86_fma4_vfmadd_bac_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma4_vfmadd_bac_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddsd %xmm2, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0xf1,0x6b,0xc2,0x00]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %a1, <2 x double> %a0, <2 x double> %a2)
@@ -43,7 +43,7 @@ declare <2 x double> @llvm.x86.fma4.vfma
 
 define <4 x float> @test_x86_fma_vfmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmadd_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x68,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -53,7 +53,7 @@ declare <4 x float> @llvm.x86.fma.vfmadd
 
 define <2 x double> @test_x86_fma_vfmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmadd_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x69,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -63,7 +63,7 @@ declare <2 x double> @llvm.x86.fma.vfmad
 
 define <8 x float> @test_x86_fma_vfmadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmadd_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x68,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -73,7 +73,7 @@ declare <8 x float> @llvm.x86.fma.vfmadd
 
 define <4 x double> @test_x86_fma_vfmadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmadd_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x69,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -84,7 +84,7 @@ declare <4 x double> @llvm.x86.fma.vfmad
 ; VFMSUB
 define <4 x float> @test_x86_fma_vfmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsub_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmsubps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6c,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -94,7 +94,7 @@ declare <4 x float> @llvm.x86.fma.vfmsub
 
 define <2 x double> @test_x86_fma_vfmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsub_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmsubpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6d,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -104,7 +104,7 @@ declare <2 x double> @llvm.x86.fma.vfmsu
 
 define <8 x float> @test_x86_fma_vfmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsub_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmsubps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x6c,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -114,7 +114,7 @@ declare <8 x float> @llvm.x86.fma.vfmsub
 
 define <4 x double> @test_x86_fma_vfmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsub_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmsubpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x6d,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -125,7 +125,7 @@ declare <4 x double> @llvm.x86.fma.vfmsu
 ; VFNMADD
 define <4 x float> @test_x86_fma_vfnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmadd_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x78,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -135,7 +135,7 @@ declare <4 x float> @llvm.x86.fma.vfnmad
 
 define <2 x double> @test_x86_fma_vfnmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmadd_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmaddpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x79,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -145,7 +145,7 @@ declare <2 x double> @llvm.x86.fma.vfnma
 
 define <8 x float> @test_x86_fma_vfnmadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmadd_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmaddps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x78,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -155,7 +155,7 @@ declare <8 x float> @llvm.x86.fma.vfnmad
 
 define <4 x double> @test_x86_fma_vfnmadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmadd_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmaddpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x79,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -166,7 +166,7 @@ declare <4 x double> @llvm.x86.fma.vfnma
 ; VFNMSUB
 define <4 x float> @test_x86_fma_vfnmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmsub_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmsubps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x7c,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -176,7 +176,7 @@ declare <4 x float> @llvm.x86.fma.vfnmsu
 
 define <2 x double> @test_x86_fma_vfnmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmsub_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmsubpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x7d,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -186,7 +186,7 @@ declare <2 x double> @llvm.x86.fma.vfnms
 
 define <8 x float> @test_x86_fma_vfnmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmsub_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmsubps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x7c,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -196,7 +196,7 @@ declare <8 x float> @llvm.x86.fma.vfnmsu
 
 define <4 x double> @test_x86_fma_vfnmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmsub_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfnmsubpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x7d,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -207,7 +207,7 @@ declare <4 x double> @llvm.x86.fma.vfnms
 ; VFMADDSUB
 define <4 x float> @test_x86_fma_vfmaddsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmaddsub_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddsubps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5c,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -217,7 +217,7 @@ declare <4 x float> @llvm.x86.fma.vfmadd
 
 define <2 x double> @test_x86_fma_vfmaddsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmaddsub_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddsubpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5d,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -227,7 +227,7 @@ declare <2 x double> @llvm.x86.fma.vfmad
 
 define <8 x float> @test_x86_fma_vfmaddsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmaddsub_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddsubps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5c,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -237,7 +237,7 @@ declare <8 x float> @llvm.x86.fma.vfmadd
 
 define <4 x double> @test_x86_fma_vfmaddsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmaddsub_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddsubpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5d,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -248,7 +248,7 @@ declare <4 x double> @llvm.x86.fma.vfmad
 ; VFMSUBADD
 define <4 x float> @test_x86_fma_vfmsubadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsubadd_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmsubaddps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5e,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmsubadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -258,7 +258,7 @@ declare <4 x float> @llvm.x86.fma.vfmsub
 
 define <2 x double> @test_x86_fma_vfmsubadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsubadd_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmsubaddpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5f,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmsubadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -268,7 +268,7 @@ declare <2 x double> @llvm.x86.fma.vfmsu
 
 define <8 x float> @test_x86_fma_vfmsubadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsubadd_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmsubaddps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5e,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfmsubadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -278,7 +278,7 @@ declare <8 x float> @llvm.x86.fma.vfmsub
 
 define <4 x double> @test_x86_fma_vfmsubadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsubadd_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmsubaddpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5f,0xc2,0x10]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfmsubadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)

Modified: llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 ; VFMADD
 define < 4 x float > @test_x86_fma4_vfmadd_ss_load(< 4 x float > %a0, < 4 x float > %a1, float* %a2) {
 ; CHECK-LABEL: test_x86_fma4_vfmadd_ss_load:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddss (%rdi), %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %x = load float , float *%a2
@@ -15,7 +15,7 @@ define < 4 x float > @test_x86_fma4_vfma
 }
 define < 4 x float > @test_x86_fma4_vfmadd_ss_load2(< 4 x float > %a0, float* %a1, < 4 x float > %a2) {
 ; CHECK-LABEL: test_x86_fma4_vfmadd_ss_load2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddss %xmm1, (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %x = load float , float *%a1
@@ -28,7 +28,7 @@ declare < 4 x float > @llvm.x86.fma4.vfm
 
 define < 2 x double > @test_x86_fma4_vfmadd_sd_load(< 2 x double > %a0, < 2 x double > %a1, double* %a2) {
 ; CHECK-LABEL: test_x86_fma4_vfmadd_sd_load:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddsd (%rdi), %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %x = load double , double *%a2
@@ -38,7 +38,7 @@ define < 2 x double > @test_x86_fma4_vfm
 }
 define < 2 x double > @test_x86_fma4_vfmadd_sd_load2(< 2 x double > %a0, double* %a1, < 2 x double > %a2) {
 ; CHECK-LABEL: test_x86_fma4_vfmadd_sd_load2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddsd %xmm1, (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %x = load double , double *%a1
@@ -49,7 +49,7 @@ define < 2 x double > @test_x86_fma4_vfm
 declare < 2 x double > @llvm.x86.fma4.vfmadd.sd(< 2 x double >, < 2 x double >, < 2 x double >) nounwind readnone
 define < 4 x float > @test_x86_fma_vfmadd_ps_load(< 4 x float > %a0, < 4 x float > %a1, < 4 x float >* %a2) {
 ; CHECK-LABEL: test_x86_fma_vfmadd_ps_load:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddps (%rdi), %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %x = load <4 x float>, <4 x float>* %a2
@@ -58,7 +58,7 @@ define < 4 x float > @test_x86_fma_vfmad
 }
 define < 4 x float > @test_x86_fma_vfmadd_ps_load2(< 4 x float > %a0, < 4 x float >* %a1, < 4 x float > %a2) {
 ; CHECK-LABEL: test_x86_fma_vfmadd_ps_load2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddps %xmm1, (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %x = load <4 x float>, <4 x float>* %a1
@@ -70,7 +70,7 @@ declare < 4 x float > @llvm.x86.fma.vfma
 ; To test execution dependency
 define < 4 x float > @test_x86_fma_vfmadd_ps_load3(< 4 x float >* %a0, < 4 x float >* %a1, < 4 x float > %a2) {
 ; CHECK-LABEL: test_x86_fma_vfmadd_ps_load3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %xmm1
 ; CHECK-NEXT:    vfmaddps %xmm0, (%rsi), %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -82,7 +82,7 @@ define < 4 x float > @test_x86_fma_vfmad
 
 define < 2 x double > @test_x86_fma_vfmadd_pd_load(< 2 x double > %a0, < 2 x double > %a1, < 2 x double >* %a2) {
 ; CHECK-LABEL: test_x86_fma_vfmadd_pd_load:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddpd (%rdi), %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %x = load <2 x double>, <2 x double>* %a2
@@ -91,7 +91,7 @@ define < 2 x double > @test_x86_fma_vfma
 }
 define < 2 x double > @test_x86_fma_vfmadd_pd_load2(< 2 x double > %a0, < 2 x double >* %a1, < 2 x double > %a2) {
 ; CHECK-LABEL: test_x86_fma_vfmadd_pd_load2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfmaddpd %xmm1, (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %x = load <2 x double>, <2 x double>* %a1
@@ -103,7 +103,7 @@ declare < 2 x double > @llvm.x86.fma.vfm
 ; To test execution dependency
 define < 2 x double > @test_x86_fma_vfmadd_pd_load3(< 2 x double >* %a0, < 2 x double >* %a1, < 2 x double > %a2) {
 ; CHECK-LABEL: test_x86_fma_vfmadd_pd_load3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovapd (%rdi), %xmm1
 ; CHECK-NEXT:    vfmaddpd %xmm0, (%rsi), %xmm1, %xmm0
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fma4-scalar-memfold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma4-scalar-memfold.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma4-scalar-memfold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma4-scalar-memfold.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@ declare <2 x double> @llvm.x86.fma4.vfma
 
 define void @fmadd_aab_ss(float* %a, float* %b) {
 ; CHECK-LABEL: fmadd_aab_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vfmaddss (%rsi), %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
@@ -34,7 +34,7 @@ define void @fmadd_aab_ss(float* %a, flo
 
 define void @fmadd_aba_ss(float* %a, float* %b) {
 ; CHECK-LABEL: fmadd_aba_ss:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vfmaddss %xmm0, (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
@@ -60,7 +60,7 @@ define void @fmadd_aba_ss(float* %a, flo
 
 define void @fmadd_aab_sd(double* %a, double* %b) {
 ; CHECK-LABEL: fmadd_aab_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vfmaddsd (%rsi), %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)
@@ -82,7 +82,7 @@ define void @fmadd_aab_sd(double* %a, do
 
 define void @fmadd_aba_sd(double* %a, double* %b) {
 ; CHECK-LABEL: fmadd_aba_sd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vfmaddsd %xmm0, (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)

Modified: llvm/trunk/test/CodeGen/X86/fma_patterns.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma_patterns.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma_patterns.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma_patterns.ll Mon Dec  4 09:18:51 2017
@@ -14,17 +14,17 @@
 
 define float @test_f32_fmadd(float %a0, float %a1, float %a2) {
 ; FMA-LABEL: test_f32_fmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_f32_fmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_f32_fmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul float %a0, %a1
@@ -34,17 +34,17 @@ define float @test_f32_fmadd(float %a0,
 
 define <4 x float> @test_4f32_fmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
 ; FMA-LABEL: test_4f32_fmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_4f32_fmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddps %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_4f32_fmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul <4 x float> %a0, %a1
@@ -54,17 +54,17 @@ define <4 x float> @test_4f32_fmadd(<4 x
 
 define <8 x float> @test_8f32_fmadd(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
 ; FMA-LABEL: test_8f32_fmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_8f32_fmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddps %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_8f32_fmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %x = fmul <8 x float> %a0, %a1
@@ -74,17 +74,17 @@ define <8 x float> @test_8f32_fmadd(<8 x
 
 define double @test_f64_fmadd(double %a0, double %a1, double %a2) {
 ; FMA-LABEL: test_f64_fmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_f64_fmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_f64_fmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul double %a0, %a1
@@ -94,17 +94,17 @@ define double @test_f64_fmadd(double %a0
 
 define <2 x double> @test_2f64_fmadd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
 ; FMA-LABEL: test_2f64_fmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_2f64_fmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddpd %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_2f64_fmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul <2 x double> %a0, %a1
@@ -114,17 +114,17 @@ define <2 x double> @test_2f64_fmadd(<2
 
 define <4 x double> @test_4f64_fmadd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
 ; FMA-LABEL: test_4f64_fmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_4f64_fmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddpd %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_4f64_fmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %x = fmul <4 x double> %a0, %a1
@@ -138,17 +138,17 @@ define <4 x double> @test_4f64_fmadd(<4
 
 define float @test_f32_fmsub(float %a0, float %a1, float %a2) {
 ; FMA-LABEL: test_f32_fmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_f32_fmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubss %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_f32_fmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul float %a0, %a1
@@ -158,17 +158,17 @@ define float @test_f32_fmsub(float %a0,
 
 define <4 x float> @test_4f32_fmsub(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
 ; FMA-LABEL: test_4f32_fmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_4f32_fmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubps %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_4f32_fmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul <4 x float> %a0, %a1
@@ -178,17 +178,17 @@ define <4 x float> @test_4f32_fmsub(<4 x
 
 define <8 x float> @test_8f32_fmsub(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
 ; FMA-LABEL: test_8f32_fmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_8f32_fmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubps %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_8f32_fmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %x = fmul <8 x float> %a0, %a1
@@ -198,17 +198,17 @@ define <8 x float> @test_8f32_fmsub(<8 x
 
 define double @test_f64_fmsub(double %a0, double %a1, double %a2) {
 ; FMA-LABEL: test_f64_fmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_f64_fmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubsd %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_f64_fmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul double %a0, %a1
@@ -218,17 +218,17 @@ define double @test_f64_fmsub(double %a0
 
 define <2 x double> @test_2f64_fmsub(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
 ; FMA-LABEL: test_2f64_fmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_2f64_fmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubpd %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_2f64_fmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul <2 x double> %a0, %a1
@@ -238,17 +238,17 @@ define <2 x double> @test_2f64_fmsub(<2
 
 define <4 x double> @test_4f64_fmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
 ; FMA-LABEL: test_4f64_fmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_4f64_fmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubpd %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_4f64_fmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %x = fmul <4 x double> %a0, %a1
@@ -262,17 +262,17 @@ define <4 x double> @test_4f64_fmsub(<4
 
 define float @test_f32_fnmadd(float %a0, float %a1, float %a2) {
 ; FMA-LABEL: test_f32_fnmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_f32_fnmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_f32_fnmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul float %a0, %a1
@@ -282,17 +282,17 @@ define float @test_f32_fnmadd(float %a0,
 
 define <4 x float> @test_4f32_fnmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
 ; FMA-LABEL: test_4f32_fnmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_4f32_fnmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_4f32_fnmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul <4 x float> %a0, %a1
@@ -302,17 +302,17 @@ define <4 x float> @test_4f32_fnmadd(<4
 
 define <8 x float> @test_8f32_fnmadd(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
 ; FMA-LABEL: test_8f32_fnmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_8f32_fnmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmaddps %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_8f32_fnmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %x = fmul <8 x float> %a0, %a1
@@ -322,17 +322,17 @@ define <8 x float> @test_8f32_fnmadd(<8
 
 define double @test_f64_fnmadd(double %a0, double %a1, double %a2) {
 ; FMA-LABEL: test_f64_fnmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_f64_fnmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_f64_fnmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul double %a0, %a1
@@ -342,17 +342,17 @@ define double @test_f64_fnmadd(double %a
 
 define <2 x double> @test_2f64_fnmadd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
 ; FMA-LABEL: test_2f64_fnmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_2f64_fnmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmaddpd %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_2f64_fnmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul <2 x double> %a0, %a1
@@ -362,17 +362,17 @@ define <2 x double> @test_2f64_fnmadd(<2
 
 define <4 x double> @test_4f64_fnmadd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
 ; FMA-LABEL: test_4f64_fnmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_4f64_fnmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmaddpd %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_4f64_fnmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %x = fmul <4 x double> %a0, %a1
@@ -386,17 +386,17 @@ define <4 x double> @test_4f64_fnmadd(<4
 
 define float @test_f32_fnmsub(float %a0, float %a1, float %a2) {
 ; FMA-LABEL: test_f32_fnmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_f32_fnmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmsubss %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_f32_fnmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul float %a0, %a1
@@ -407,17 +407,17 @@ define float @test_f32_fnmsub(float %a0,
 
 define <4 x float> @test_4f32_fnmsub(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
 ; FMA-LABEL: test_4f32_fnmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_4f32_fnmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmsubps %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_4f32_fnmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul <4 x float> %a0, %a1
@@ -428,17 +428,17 @@ define <4 x float> @test_4f32_fnmsub(<4
 
 define <8 x float> @test_8f32_fnmsub(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
 ; FMA-LABEL: test_8f32_fnmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_8f32_fnmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmsubps %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_8f32_fnmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %x = fmul <8 x float> %a0, %a1
@@ -449,17 +449,17 @@ define <8 x float> @test_8f32_fnmsub(<8
 
 define double @test_f64_fnmsub(double %a0, double %a1, double %a2) {
 ; FMA-LABEL: test_f64_fnmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_f64_fnmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_f64_fnmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul double %a0, %a1
@@ -470,17 +470,17 @@ define double @test_f64_fnmsub(double %a
 
 define <2 x double> @test_2f64_fnmsub(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
 ; FMA-LABEL: test_2f64_fnmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_2f64_fnmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmsubpd %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_2f64_fnmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = fmul <2 x double> %a0, %a1
@@ -491,17 +491,17 @@ define <2 x double> @test_2f64_fnmsub(<2
 
 define <4 x double> @test_4f64_fnmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
 ; FMA-LABEL: test_4f64_fnmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_4f64_fnmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmsubpd %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_4f64_fnmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %x = fmul <4 x double> %a0, %a1
@@ -516,17 +516,17 @@ define <4 x double> @test_4f64_fnmsub(<4
 
 define <4 x float> @test_4f32_fmadd_load(<4 x float>* %a0, <4 x float> %a1, <4 x float> %a2) {
 ; FMA-LABEL: test_4f32_fmadd_load:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd132ps (%rdi), %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_4f32_fmadd_load:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddps %xmm1, (%rdi), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_4f32_fmadd_load:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd132ps (%rdi), %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = load <4 x float>, <4 x float>* %a0
@@ -537,17 +537,17 @@ define <4 x float> @test_4f32_fmadd_load
 
 define <2 x double> @test_2f64_fmsub_load(<2 x double>* %a0, <2 x double> %a1, <2 x double> %a2) {
 ; FMA-LABEL: test_2f64_fmsub_load:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub132pd (%rdi), %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_2f64_fmsub_load:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubpd %xmm1, (%rdi), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_2f64_fmsub_load:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub132pd (%rdi), %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %x = load <2 x double>, <2 x double>* %a0
@@ -562,35 +562,35 @@ define <2 x double> @test_2f64_fmsub_loa
 
 define <4 x float> @test_v4f32_mul_add_x_one_y(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_add_x_one_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vaddps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_add_x_one_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vaddps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_add_x_one_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_add_x_one_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmadd213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_add_x_one_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_add_x_one_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmadd213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %a = fadd <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
@@ -600,35 +600,35 @@ define <4 x float> @test_v4f32_mul_add_x
 
 define <4 x float> @test_v4f32_mul_y_add_x_one(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_y_add_x_one:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vaddps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_y_add_x_one:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vaddps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_y_add_x_one:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_y_add_x_one:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmadd213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_y_add_x_one:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_y_add_x_one:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmadd213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %a = fadd <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
@@ -638,35 +638,35 @@ define <4 x float> @test_v4f32_mul_y_add
 
 define <4 x float> @test_v4f32_mul_add_x_negone_y(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vaddps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vaddps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmsub213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_add_x_negone_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmsub213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %a = fadd <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
@@ -676,35 +676,35 @@ define <4 x float> @test_v4f32_mul_add_x
 
 define <4 x float> @test_v4f32_mul_y_add_x_negone(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vaddps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vaddps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmsub213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_y_add_x_negone:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmsub213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %a = fadd <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
@@ -714,38 +714,38 @@ define <4 x float> @test_v4f32_mul_y_add
 
 define <4 x float> @test_v4f32_mul_sub_one_x_y(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1]
 ; AVX512-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_sub_one_x_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
@@ -755,38 +755,38 @@ define <4 x float> @test_v4f32_mul_sub_o
 
 define <4 x float> @test_v4f32_mul_y_sub_one_x(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1]
 ; AVX512-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_y_sub_one_x:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
@@ -796,38 +796,38 @@ define <4 x float> @test_v4f32_mul_y_sub
 
 define <4 x float> @test_v4f32_mul_sub_negone_x_y(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastss {{.*#+}} xmm2 = [-1,-1,-1,-1]
 ; AVX512-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmsub213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmsubps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_sub_negone_x_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmsub213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <4 x float> <float -1.0, float -1.0, float -1.0, float -1.0>, %x
@@ -837,38 +837,38 @@ define <4 x float> @test_v4f32_mul_sub_n
 
 define <4 x float> @test_v4f32_mul_y_sub_negone_x(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastss {{.*#+}} xmm2 = [-1,-1,-1,-1]
 ; AVX512-INFS-NEXT:    vsubps %xmm0, %xmm2, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmsub213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmsubps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_y_sub_negone_x:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmsub213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <4 x float> <float -1.0, float -1.0, float -1.0, float -1.0>, %x
@@ -878,35 +878,35 @@ define <4 x float> @test_v4f32_mul_y_sub
 
 define <4 x float> @test_v4f32_mul_sub_x_one_y(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vsubps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vsubps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmsub213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_sub_x_one_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmsub213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
@@ -916,35 +916,35 @@ define <4 x float> @test_v4f32_mul_sub_x
 
 define <4 x float> @test_v4f32_mul_y_sub_x_one(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vsubps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vsubps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmsub213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_y_sub_x_one:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmsub213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
@@ -954,35 +954,35 @@ define <4 x float> @test_v4f32_mul_y_sub
 
 define <4 x float> @test_v4f32_mul_sub_x_negone_y(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vsubps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vsubps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmadd213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_sub_x_negone_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmadd213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
@@ -992,35 +992,35 @@ define <4 x float> @test_v4f32_mul_sub_x
 
 define <4 x float> @test_v4f32_mul_y_sub_x_negone(<4 x float> %x, <4 x float> %y) {
 ; FMA-INFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vsubps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vsubps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA4-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; AVX512-INFS-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmadd213ps %xmm1, %xmm1, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_mul_y_sub_x_negone:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmadd213ps %xmm1, %xmm1, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
@@ -1034,7 +1034,7 @@ define <4 x float> @test_v4f32_mul_y_sub
 
 define float @test_f32_interp(float %x, float %y, float %t) {
 ; FMA-INFS-LABEL: test_f32_interp:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
 ; FMA-INFS-NEXT:    vsubss %xmm2, %xmm3, %xmm3
 ; FMA-INFS-NEXT:    vmulss %xmm3, %xmm1, %xmm1
@@ -1042,7 +1042,7 @@ define float @test_f32_interp(float %x,
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_f32_interp:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
 ; FMA4-INFS-NEXT:    vsubss %xmm2, %xmm3, %xmm3
 ; FMA4-INFS-NEXT:    vmulss %xmm3, %xmm1, %xmm1
@@ -1050,7 +1050,7 @@ define float @test_f32_interp(float %x,
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_f32_interp:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
 ; AVX512-INFS-NEXT:    vsubss %xmm2, %xmm3, %xmm3
 ; AVX512-INFS-NEXT:    vmulss %xmm3, %xmm1, %xmm1
@@ -1058,19 +1058,19 @@ define float @test_f32_interp(float %x,
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_f32_interp:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213ss %xmm1, %xmm2, %xmm1
 ; FMA-NOINFS-NEXT:    vfmadd213ss %xmm1, %xmm2, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_f32_interp:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddss %xmm1, %xmm1, %xmm2, %xmm1
 ; FMA4-NOINFS-NEXT:    vfmaddss %xmm1, %xmm2, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_f32_interp:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213ss %xmm1, %xmm2, %xmm1
 ; AVX512-NOINFS-NEXT:    vfmadd213ss %xmm1, %xmm2, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
@@ -1083,7 +1083,7 @@ define float @test_f32_interp(float %x,
 
 define <4 x float> @test_v4f32_interp(<4 x float> %x, <4 x float> %y, <4 x float> %t) {
 ; FMA-INFS-LABEL: test_v4f32_interp:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubps %xmm2, %xmm3, %xmm3
 ; FMA-INFS-NEXT:    vmulps %xmm3, %xmm1, %xmm1
@@ -1091,7 +1091,7 @@ define <4 x float> @test_v4f32_interp(<4
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f32_interp:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubps %xmm2, %xmm3, %xmm3
 ; FMA4-INFS-NEXT:    vmulps %xmm3, %xmm1, %xmm1
@@ -1099,7 +1099,7 @@ define <4 x float> @test_v4f32_interp(<4
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f32_interp:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
 ; AVX512-INFS-NEXT:    vsubps %xmm2, %xmm3, %xmm3
 ; AVX512-INFS-NEXT:    vmulps %xmm3, %xmm1, %xmm1
@@ -1107,19 +1107,19 @@ define <4 x float> @test_v4f32_interp(<4
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f32_interp:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213ps %xmm1, %xmm2, %xmm1
 ; FMA-NOINFS-NEXT:    vfmadd213ps %xmm1, %xmm2, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f32_interp:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddps %xmm1, %xmm1, %xmm2, %xmm1
 ; FMA4-NOINFS-NEXT:    vfmaddps %xmm1, %xmm2, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f32_interp:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213ps %xmm1, %xmm2, %xmm1
 ; AVX512-NOINFS-NEXT:    vfmadd213ps %xmm1, %xmm2, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
@@ -1132,7 +1132,7 @@ define <4 x float> @test_v4f32_interp(<4
 
 define <8 x float> @test_v8f32_interp(<8 x float> %x, <8 x float> %y, <8 x float> %t) {
 ; FMA-INFS-LABEL: test_v8f32_interp:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubps %ymm2, %ymm3, %ymm3
 ; FMA-INFS-NEXT:    vmulps %ymm3, %ymm1, %ymm1
@@ -1140,7 +1140,7 @@ define <8 x float> @test_v8f32_interp(<8
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v8f32_interp:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubps %ymm2, %ymm3, %ymm3
 ; FMA4-INFS-NEXT:    vmulps %ymm3, %ymm1, %ymm1
@@ -1148,7 +1148,7 @@ define <8 x float> @test_v8f32_interp(<8
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v8f32_interp:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1]
 ; AVX512-INFS-NEXT:    vsubps %ymm2, %ymm3, %ymm3
 ; AVX512-INFS-NEXT:    vmulps %ymm3, %ymm1, %ymm1
@@ -1156,19 +1156,19 @@ define <8 x float> @test_v8f32_interp(<8
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v8f32_interp:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213ps %ymm1, %ymm2, %ymm1
 ; FMA-NOINFS-NEXT:    vfmadd213ps %ymm1, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v8f32_interp:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddps %ymm1, %ymm1, %ymm2, %ymm1
 ; FMA4-NOINFS-NEXT:    vfmaddps %ymm1, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v8f32_interp:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213ps %ymm1, %ymm2, %ymm1
 ; AVX512-NOINFS-NEXT:    vfmadd213ps %ymm1, %ymm2, %ymm0
 ; AVX512-NOINFS-NEXT:    retq
@@ -1181,7 +1181,7 @@ define <8 x float> @test_v8f32_interp(<8
 
 define double @test_f64_interp(double %x, double %y, double %t) {
 ; FMA-INFS-LABEL: test_f64_interp:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
 ; FMA-INFS-NEXT:    vsubsd %xmm2, %xmm3, %xmm3
 ; FMA-INFS-NEXT:    vmulsd %xmm3, %xmm1, %xmm1
@@ -1189,7 +1189,7 @@ define double @test_f64_interp(double %x
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_f64_interp:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
 ; FMA4-INFS-NEXT:    vsubsd %xmm2, %xmm3, %xmm3
 ; FMA4-INFS-NEXT:    vmulsd %xmm3, %xmm1, %xmm1
@@ -1197,7 +1197,7 @@ define double @test_f64_interp(double %x
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_f64_interp:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
 ; AVX512-INFS-NEXT:    vsubsd %xmm2, %xmm3, %xmm3
 ; AVX512-INFS-NEXT:    vmulsd %xmm3, %xmm1, %xmm1
@@ -1205,19 +1205,19 @@ define double @test_f64_interp(double %x
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_f64_interp:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213sd %xmm1, %xmm2, %xmm1
 ; FMA-NOINFS-NEXT:    vfmadd213sd %xmm1, %xmm2, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_f64_interp:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddsd %xmm1, %xmm1, %xmm2, %xmm1
 ; FMA4-NOINFS-NEXT:    vfmaddsd %xmm1, %xmm2, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_f64_interp:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213sd %xmm1, %xmm2, %xmm1
 ; AVX512-NOINFS-NEXT:    vfmadd213sd %xmm1, %xmm2, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
@@ -1230,7 +1230,7 @@ define double @test_f64_interp(double %x
 
 define <2 x double> @test_v2f64_interp(<2 x double> %x, <2 x double> %y, <2 x double> %t) {
 ; FMA-INFS-LABEL: test_v2f64_interp:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovapd {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubpd %xmm2, %xmm3, %xmm3
 ; FMA-INFS-NEXT:    vmulpd %xmm3, %xmm1, %xmm1
@@ -1238,7 +1238,7 @@ define <2 x double> @test_v2f64_interp(<
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v2f64_interp:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovapd {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubpd %xmm2, %xmm3, %xmm3
 ; FMA4-INFS-NEXT:    vmulpd %xmm3, %xmm1, %xmm1
@@ -1246,7 +1246,7 @@ define <2 x double> @test_v2f64_interp(<
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v2f64_interp:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vmovapd {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00]
 ; AVX512-INFS-NEXT:    vsubpd %xmm2, %xmm3, %xmm3
 ; AVX512-INFS-NEXT:    vmulpd %xmm3, %xmm1, %xmm1
@@ -1254,19 +1254,19 @@ define <2 x double> @test_v2f64_interp(<
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v2f64_interp:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213pd %xmm1, %xmm2, %xmm1
 ; FMA-NOINFS-NEXT:    vfmadd213pd %xmm1, %xmm2, %xmm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v2f64_interp:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddpd %xmm1, %xmm1, %xmm2, %xmm1
 ; FMA4-NOINFS-NEXT:    vfmaddpd %xmm1, %xmm2, %xmm0, %xmm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v2f64_interp:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213pd %xmm1, %xmm2, %xmm1
 ; AVX512-NOINFS-NEXT:    vfmadd213pd %xmm1, %xmm2, %xmm0
 ; AVX512-NOINFS-NEXT:    retq
@@ -1279,7 +1279,7 @@ define <2 x double> @test_v2f64_interp(<
 
 define <4 x double> @test_v4f64_interp(<4 x double> %x, <4 x double> %y, <4 x double> %t) {
 ; FMA-INFS-LABEL: test_v4f64_interp:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovapd {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubpd %ymm2, %ymm3, %ymm3
 ; FMA-INFS-NEXT:    vmulpd %ymm3, %ymm1, %ymm1
@@ -1287,7 +1287,7 @@ define <4 x double> @test_v4f64_interp(<
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v4f64_interp:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovapd {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubpd %ymm2, %ymm3, %ymm3
 ; FMA4-INFS-NEXT:    vmulpd %ymm3, %ymm1, %ymm1
@@ -1295,7 +1295,7 @@ define <4 x double> @test_v4f64_interp(<
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v4f64_interp:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastsd {{.*#+}} ymm3 = [1,1,1,1]
 ; AVX512-INFS-NEXT:    vsubpd %ymm2, %ymm3, %ymm3
 ; AVX512-INFS-NEXT:    vmulpd %ymm3, %ymm1, %ymm1
@@ -1303,19 +1303,19 @@ define <4 x double> @test_v4f64_interp(<
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v4f64_interp:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213pd %ymm1, %ymm2, %ymm1
 ; FMA-NOINFS-NEXT:    vfmadd213pd %ymm1, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v4f64_interp:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddpd %ymm1, %ymm1, %ymm2, %ymm1
 ; FMA4-NOINFS-NEXT:    vfmaddpd %ymm1, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v4f64_interp:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213pd %ymm1, %ymm2, %ymm1
 ; AVX512-NOINFS-NEXT:    vfmadd213pd %ymm1, %ymm2, %ymm0
 ; AVX512-NOINFS-NEXT:    retq
@@ -1332,17 +1332,17 @@ define <4 x double> @test_v4f64_interp(<
 
 define <4 x float> @test_v4f32_fneg_fmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; FMA-LABEL: test_v4f32_fneg_fmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v4f32_fneg_fmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmsubps %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f32_fneg_fmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %mul = fmul <4 x float> %a0, %a1
@@ -1353,17 +1353,17 @@ define <4 x float> @test_v4f32_fneg_fmad
 
 define <4 x double> @test_v4f64_fneg_fmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; FMA-LABEL: test_v4f64_fneg_fmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v4f64_fneg_fmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmaddpd %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f64_fneg_fmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %mul = fmul <4 x double> %a0, %a1
@@ -1374,17 +1374,17 @@ define <4 x double> @test_v4f64_fneg_fms
 
 define <4 x float> @test_v4f32_fneg_fnmadd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; FMA-LABEL: test_v4f32_fneg_fnmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v4f32_fneg_fnmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubps %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f32_fneg_fnmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %mul = fmul <4 x float> %a0, %a1
@@ -1396,17 +1396,17 @@ define <4 x float> @test_v4f32_fneg_fnma
 
 define <4 x double> @test_v4f64_fneg_fnmsub(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; FMA-LABEL: test_v4f64_fneg_fnmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v4f64_fneg_fnmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddpd %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f64_fneg_fnmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %mul = fmul <4 x double> %a0, %a1
@@ -1422,17 +1422,17 @@ define <4 x double> @test_v4f64_fneg_fnm
 
 define <4 x float> @test_v4f32_fma_x_c1_fmul_x_c2(<4 x float> %x) #0 {
 ; FMA-LABEL: test_v4f32_fma_x_c1_fmul_x_c2:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v4f32_fma_x_c1_fmul_x_c2:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f32_fma_x_c1_fmul_x_c2:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmulps {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %m0 = fmul <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0>
@@ -1447,17 +1447,17 @@ define <4 x float> @test_v4f32_fma_x_c1_
 
 define <4 x float> @test_v4f32_fma_fmul_x_c1_c2_y(<4 x float> %x, <4 x float> %y) #0 {
 ; FMA-LABEL: test_v4f32_fma_fmul_x_c1_c2_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd132ps {{.*}}(%rip), %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v4f32_fma_fmul_x_c1_c2_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddps %xmm1, {{.*}}(%rip), %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f32_fma_fmul_x_c1_c2_y:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd132ps {{.*}}(%rip), %xmm1, %xmm0
 ; AVX512-NEXT:    retq
   %m0 = fmul <4 x float> %x,  <float 1.0, float 2.0, float 3.0, float 4.0>
@@ -1470,19 +1470,19 @@ define <4 x float> @test_v4f32_fma_fmul_
 
 define double @test_f64_fneg_fmul(double %x, double %y) #0 {
 ; FMA-LABEL: test_f64_fneg_fmul:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; FMA-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_f64_fneg_fmul:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; FMA4-NEXT:    vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_f64_fneg_fmul:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
@@ -1493,19 +1493,19 @@ define double @test_f64_fneg_fmul(double
 
 define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) #0 {
 ; FMA-LABEL: test_v4f32_fneg_fmul:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; FMA-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v4f32_fneg_fmul:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; FMA4-NEXT:    vfnmsubps %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f32_fneg_fmul:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0
 ; AVX512-NEXT:    retq
@@ -1516,19 +1516,19 @@ define <4 x float> @test_v4f32_fneg_fmul
 
 define <4 x double> @test_v4f64_fneg_fmul(<4 x double> %x, <4 x double> %y) #0 {
 ; FMA-LABEL: test_v4f64_fneg_fmul:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; FMA-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v4f64_fneg_fmul:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; FMA4-NEXT:    vfnmsubpd %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f64_fneg_fmul:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
@@ -1539,19 +1539,19 @@ define <4 x double> @test_v4f64_fneg_fmu
 
 define <4 x double> @test_v4f64_fneg_fmul_no_nsz(<4 x double> %x, <4 x double> %y) #0 {
 ; FMA-LABEL: test_v4f64_fneg_fmul_no_nsz:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
 ; FMA-NEXT:    vxorpd {{.*}}(%rip), %ymm0, %ymm0
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v4f64_fneg_fmul_no_nsz:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    vxorpd {{.*}}(%rip), %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f64_fneg_fmul_no_nsz:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vxorpd {{.*}}(%rip){1to4}, %ymm0, %ymm0
 ; AVX512-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fma_patterns_wide.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma_patterns_wide.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma_patterns_wide.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma_patterns_wide.ll Mon Dec  4 09:18:51 2017
@@ -14,19 +14,19 @@
 
 define <16 x float> @test_16f32_fmadd(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
 ; FMA-LABEL: test_16f32_fmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd213ps %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfmadd213ps %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_16f32_fmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddps %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfmaddps %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_16f32_fmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd213ps %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %x = fmul <16 x float> %a0, %a1
@@ -36,19 +36,19 @@ define <16 x float> @test_16f32_fmadd(<1
 
 define <8 x double> @test_8f64_fmadd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
 ; FMA-LABEL: test_8f64_fmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd213pd %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfmadd213pd %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_8f64_fmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddpd %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfmaddpd %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_8f64_fmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd213pd %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %x = fmul <8 x double> %a0, %a1
@@ -62,19 +62,19 @@ define <8 x double> @test_8f64_fmadd(<8
 
 define <16 x float> @test_16f32_fmsub(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
 ; FMA-LABEL: test_16f32_fmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub213ps %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfmsub213ps %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_16f32_fmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubps %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfmsubps %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_16f32_fmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub213ps %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %x = fmul <16 x float> %a0, %a1
@@ -84,19 +84,19 @@ define <16 x float> @test_16f32_fmsub(<1
 
 define <8 x double> @test_8f64_fmsub(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
 ; FMA-LABEL: test_8f64_fmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub213pd %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfmsub213pd %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_8f64_fmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubpd %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfmsubpd %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_8f64_fmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub213pd %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %x = fmul <8 x double> %a0, %a1
@@ -110,19 +110,19 @@ define <8 x double> @test_8f64_fmsub(<8
 
 define <16 x float> @test_16f32_fnmadd(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
 ; FMA-LABEL: test_16f32_fnmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmadd213ps %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfnmadd213ps %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_16f32_fnmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmaddps %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfnmaddps %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_16f32_fnmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmadd213ps %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %x = fmul <16 x float> %a0, %a1
@@ -132,19 +132,19 @@ define <16 x float> @test_16f32_fnmadd(<
 
 define <8 x double> @test_8f64_fnmadd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
 ; FMA-LABEL: test_8f64_fnmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmadd213pd %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfnmadd213pd %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_8f64_fnmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmaddpd %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfnmaddpd %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_8f64_fnmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmadd213pd %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %x = fmul <8 x double> %a0, %a1
@@ -158,19 +158,19 @@ define <8 x double> @test_8f64_fnmadd(<8
 
 define <16 x float> @test_16f32_fnmsub(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
 ; FMA-LABEL: test_16f32_fnmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmsub213ps %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfnmsub213ps %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_16f32_fnmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmsubps %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfnmsubps %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_16f32_fnmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmsub213ps %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %x = fmul <16 x float> %a0, %a1
@@ -181,19 +181,19 @@ define <16 x float> @test_16f32_fnmsub(<
 
 define <8 x double> @test_8f64_fnmsub(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
 ; FMA-LABEL: test_8f64_fnmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmsub213pd %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfnmsub213pd %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_8f64_fnmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmsubpd %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfnmsubpd %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_8f64_fnmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmsub213pd %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %x = fmul <8 x double> %a0, %a1
@@ -208,19 +208,19 @@ define <8 x double> @test_8f64_fnmsub(<8
 
 define <16 x float> @test_16f32_fmadd_load(<16 x float>* %a0, <16 x float> %a1, <16 x float> %a2) {
 ; FMA-LABEL: test_16f32_fmadd_load:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd132ps (%rdi), %ymm2, %ymm0
 ; FMA-NEXT:    vfmadd132ps 32(%rdi), %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_16f32_fmadd_load:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddps %ymm2, (%rdi), %ymm0, %ymm0
 ; FMA4-NEXT:    vfmaddps %ymm3, 32(%rdi), %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_16f32_fmadd_load:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd132ps (%rdi), %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %x = load <16 x float>, <16 x float>* %a0
@@ -231,19 +231,19 @@ define <16 x float> @test_16f32_fmadd_lo
 
 define <8 x double> @test_8f64_fmsub_load(<8 x double>* %a0, <8 x double> %a1, <8 x double> %a2) {
 ; FMA-LABEL: test_8f64_fmsub_load:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub132pd (%rdi), %ymm2, %ymm0
 ; FMA-NEXT:    vfmsub132pd 32(%rdi), %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_8f64_fmsub_load:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubpd %ymm2, (%rdi), %ymm0, %ymm0
 ; FMA4-NEXT:    vfmsubpd %ymm3, 32(%rdi), %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_8f64_fmsub_load:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub132pd (%rdi), %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %x = load <8 x double>, <8 x double>* %a0
@@ -258,7 +258,7 @@ define <8 x double> @test_8f64_fmsub_loa
 
 define <16 x float> @test_v16f32_mul_add_x_one_y(<16 x float> %x, <16 x float> %y) {
 ; FMA-INFS-LABEL: test_v16f32_mul_add_x_one_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vaddps %ymm4, %ymm1, %ymm1
 ; FMA-INFS-NEXT:    vaddps %ymm4, %ymm0, %ymm0
@@ -267,7 +267,7 @@ define <16 x float> @test_v16f32_mul_add
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v16f32_mul_add_x_one_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vaddps %ymm4, %ymm1, %ymm1
 ; FMA4-INFS-NEXT:    vaddps %ymm4, %ymm0, %ymm0
@@ -276,25 +276,25 @@ define <16 x float> @test_v16f32_mul_add
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v16f32_mul_add_x_one_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    vmulps %zmm1, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v16f32_mul_add_x_one_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmadd213ps %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfmadd213ps %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v16f32_mul_add_x_one_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmaddps %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfmaddps %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v16f32_mul_add_x_one_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmadd213ps %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %a = fadd <16 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>
@@ -304,7 +304,7 @@ define <16 x float> @test_v16f32_mul_add
 
 define <8 x double> @test_v8f64_mul_y_add_x_one(<8 x double> %x, <8 x double> %y) {
 ; FMA-INFS-LABEL: test_v8f64_mul_y_add_x_one:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vaddpd %ymm4, %ymm1, %ymm1
 ; FMA-INFS-NEXT:    vaddpd %ymm4, %ymm0, %ymm0
@@ -313,7 +313,7 @@ define <8 x double> @test_v8f64_mul_y_ad
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v8f64_mul_y_add_x_one:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vaddpd %ymm4, %ymm1, %ymm1
 ; FMA4-INFS-NEXT:    vaddpd %ymm4, %ymm0, %ymm0
@@ -322,25 +322,25 @@ define <8 x double> @test_v8f64_mul_y_ad
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v8f64_mul_y_add_x_one:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vaddpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    vmulpd %zmm0, %zmm1, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v8f64_mul_y_add_x_one:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmadd213pd %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfmadd213pd %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v8f64_mul_y_add_x_one:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmaddpd %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfmaddpd %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v8f64_mul_y_add_x_one:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmadd213pd %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %a = fadd <8 x double> %x, <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>
@@ -350,7 +350,7 @@ define <8 x double> @test_v8f64_mul_y_ad
 
 define <16 x float> @test_v16f32_mul_add_x_negone_y(<16 x float> %x, <16 x float> %y) {
 ; FMA-INFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA-INFS-NEXT:    vaddps %ymm4, %ymm1, %ymm1
 ; FMA-INFS-NEXT:    vaddps %ymm4, %ymm0, %ymm0
@@ -359,7 +359,7 @@ define <16 x float> @test_v16f32_mul_add
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA4-INFS-NEXT:    vaddps %ymm4, %ymm1, %ymm1
 ; FMA4-INFS-NEXT:    vaddps %ymm4, %ymm0, %ymm0
@@ -368,25 +368,25 @@ define <16 x float> @test_v16f32_mul_add
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    vmulps %zmm1, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmsub213ps %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfmsub213ps %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmsubps %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfmsubps %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v16f32_mul_add_x_negone_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmsub213ps %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %a = fadd <16 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0>
@@ -396,7 +396,7 @@ define <16 x float> @test_v16f32_mul_add
 
 define <8 x double> @test_v8f64_mul_y_add_x_negone(<8 x double> %x, <8 x double> %y) {
 ; FMA-INFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA-INFS-NEXT:    vaddpd %ymm4, %ymm1, %ymm1
 ; FMA-INFS-NEXT:    vaddpd %ymm4, %ymm0, %ymm0
@@ -405,7 +405,7 @@ define <8 x double> @test_v8f64_mul_y_ad
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA4-INFS-NEXT:    vaddpd %ymm4, %ymm1, %ymm1
 ; FMA4-INFS-NEXT:    vaddpd %ymm4, %ymm0, %ymm0
@@ -414,25 +414,25 @@ define <8 x double> @test_v8f64_mul_y_ad
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vaddpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    vmulpd %zmm0, %zmm1, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmsub213pd %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfmsub213pd %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmsubpd %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfmsubpd %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v8f64_mul_y_add_x_negone:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmsub213pd %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %a = fadd <8 x double> %x, <double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0>
@@ -442,7 +442,7 @@ define <8 x double> @test_v8f64_mul_y_ad
 
 define <16 x float> @test_v16f32_mul_sub_one_x_y(<16 x float> %x, <16 x float> %y) {
 ; FMA-INFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubps %ymm1, %ymm4, %ymm1
 ; FMA-INFS-NEXT:    vsubps %ymm0, %ymm4, %ymm0
@@ -451,7 +451,7 @@ define <16 x float> @test_v16f32_mul_sub
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubps %ymm1, %ymm4, %ymm1
 ; FMA4-INFS-NEXT:    vsubps %ymm0, %ymm4, %ymm0
@@ -460,26 +460,26 @@ define <16 x float> @test_v16f32_mul_sub
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastss {{.*#+}} zmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
 ; AVX512-INFS-NEXT:    vsubps %zmm0, %zmm2, %zmm0
 ; AVX512-INFS-NEXT:    vmulps %zmm1, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213ps %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfnmadd213ps %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddps %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfnmaddps %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v16f32_mul_sub_one_x_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213ps %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
@@ -489,7 +489,7 @@ define <16 x float> @test_v16f32_mul_sub
 
 define <8 x double> @test_v8f64_mul_y_sub_one_x(<8 x double> %x, <8 x double> %y) {
 ; FMA-INFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubpd %ymm1, %ymm4, %ymm1
 ; FMA-INFS-NEXT:    vsubpd %ymm0, %ymm4, %ymm0
@@ -498,7 +498,7 @@ define <8 x double> @test_v8f64_mul_y_su
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubpd %ymm1, %ymm4, %ymm1
 ; FMA4-INFS-NEXT:    vsubpd %ymm0, %ymm4, %ymm0
@@ -507,26 +507,26 @@ define <8 x double> @test_v8f64_mul_y_su
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastsd {{.*#+}} zmm2 = [1,1,1,1,1,1,1,1]
 ; AVX512-INFS-NEXT:    vsubpd %zmm0, %zmm2, %zmm0
 ; AVX512-INFS-NEXT:    vmulpd %zmm0, %zmm1, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213pd %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfnmadd213pd %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddpd %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfnmaddpd %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v8f64_mul_y_sub_one_x:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213pd %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <8 x double> <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>, %x
@@ -536,7 +536,7 @@ define <8 x double> @test_v8f64_mul_y_su
 
 define <16 x float> @test_v16f32_mul_sub_negone_x_y(<16 x float> %x, <16 x float> %y) {
 ; FMA-INFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA-INFS-NEXT:    vsubps %ymm1, %ymm4, %ymm1
 ; FMA-INFS-NEXT:    vsubps %ymm0, %ymm4, %ymm0
@@ -545,7 +545,7 @@ define <16 x float> @test_v16f32_mul_sub
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubps %ymm1, %ymm4, %ymm1
 ; FMA4-INFS-NEXT:    vsubps %ymm0, %ymm4, %ymm0
@@ -554,26 +554,26 @@ define <16 x float> @test_v16f32_mul_sub
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastss {{.*#+}} zmm2 = [-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]
 ; AVX512-INFS-NEXT:    vsubps %zmm0, %zmm2, %zmm0
 ; AVX512-INFS-NEXT:    vmulps %zmm1, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmsub213ps %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfnmsub213ps %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmsubps %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfnmsubps %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v16f32_mul_sub_negone_x_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmsub213ps %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <16 x float> <float -1.0, float -1.0, float -1.0, float -1.0,float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0>, %x
@@ -583,7 +583,7 @@ define <16 x float> @test_v16f32_mul_sub
 
 define <8 x double> @test_v8f64_mul_y_sub_negone_x(<8 x double> %x, <8 x double> %y) {
 ; FMA-INFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA-INFS-NEXT:    vsubpd %ymm1, %ymm4, %ymm1
 ; FMA-INFS-NEXT:    vsubpd %ymm0, %ymm4, %ymm0
@@ -592,7 +592,7 @@ define <8 x double> @test_v8f64_mul_y_su
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubpd %ymm1, %ymm4, %ymm1
 ; FMA4-INFS-NEXT:    vsubpd %ymm0, %ymm4, %ymm0
@@ -601,26 +601,26 @@ define <8 x double> @test_v8f64_mul_y_su
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastsd {{.*#+}} zmm2 = [-1,-1,-1,-1,-1,-1,-1,-1]
 ; AVX512-INFS-NEXT:    vsubpd %zmm0, %zmm2, %zmm0
 ; AVX512-INFS-NEXT:    vmulpd %zmm0, %zmm1, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmsub213pd %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfnmsub213pd %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmsubpd %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfnmsubpd %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v8f64_mul_y_sub_negone_x:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmsub213pd %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <8 x double> <double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0>, %x
@@ -630,7 +630,7 @@ define <8 x double> @test_v8f64_mul_y_su
 
 define <16 x float> @test_v16f32_mul_sub_x_one_y(<16 x float> %x, <16 x float> %y) {
 ; FMA-INFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubps %ymm4, %ymm1, %ymm1
 ; FMA-INFS-NEXT:    vsubps %ymm4, %ymm0, %ymm0
@@ -639,7 +639,7 @@ define <16 x float> @test_v16f32_mul_sub
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubps %ymm4, %ymm1, %ymm1
 ; FMA4-INFS-NEXT:    vsubps %ymm4, %ymm0, %ymm0
@@ -648,25 +648,25 @@ define <16 x float> @test_v16f32_mul_sub
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vsubps {{.*}}(%rip){1to16}, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    vmulps %zmm1, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmsub213ps %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfmsub213ps %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmsubps %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfmsubps %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v16f32_mul_sub_x_one_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmsub213ps %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <16 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>
@@ -676,7 +676,7 @@ define <16 x float> @test_v16f32_mul_sub
 
 define <8 x double> @test_v8f64_mul_y_sub_x_one(<8 x double> %x, <8 x double> %y) {
 ; FMA-INFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubpd %ymm4, %ymm1, %ymm1
 ; FMA-INFS-NEXT:    vsubpd %ymm4, %ymm0, %ymm0
@@ -685,7 +685,7 @@ define <8 x double> @test_v8f64_mul_y_su
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubpd %ymm4, %ymm1, %ymm1
 ; FMA4-INFS-NEXT:    vsubpd %ymm4, %ymm0, %ymm0
@@ -694,25 +694,25 @@ define <8 x double> @test_v8f64_mul_y_su
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vsubpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    vmulpd %zmm0, %zmm1, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmsub213pd %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfmsub213pd %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmsubpd %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfmsubpd %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v8f64_mul_y_sub_x_one:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmsub213pd %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <8 x double> %x, <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>
@@ -722,7 +722,7 @@ define <8 x double> @test_v8f64_mul_y_su
 
 define <16 x float> @test_v16f32_mul_sub_x_negone_y(<16 x float> %x, <16 x float> %y) {
 ; FMA-INFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA-INFS-NEXT:    vsubps %ymm4, %ymm1, %ymm1
 ; FMA-INFS-NEXT:    vsubps %ymm4, %ymm0, %ymm0
@@ -731,7 +731,7 @@ define <16 x float> @test_v16f32_mul_sub
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubps %ymm4, %ymm1, %ymm1
 ; FMA4-INFS-NEXT:    vsubps %ymm4, %ymm0, %ymm0
@@ -740,25 +740,25 @@ define <16 x float> @test_v16f32_mul_sub
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vsubps {{.*}}(%rip){1to16}, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    vmulps %zmm1, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmadd213ps %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfmadd213ps %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmaddps %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfmaddps %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v16f32_mul_sub_x_negone_y:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmadd213ps %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <16 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0>
@@ -768,7 +768,7 @@ define <16 x float> @test_v16f32_mul_sub
 
 define <8 x double> @test_v8f64_mul_y_sub_x_negone(<8 x double> %x, <8 x double> %y) {
 ; FMA-INFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA-INFS-NEXT:    vsubpd %ymm4, %ymm1, %ymm1
 ; FMA-INFS-NEXT:    vsubpd %ymm4, %ymm0, %ymm0
@@ -777,7 +777,7 @@ define <8 x double> @test_v8f64_mul_y_su
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubpd %ymm4, %ymm1, %ymm1
 ; FMA4-INFS-NEXT:    vsubpd %ymm4, %ymm0, %ymm0
@@ -786,25 +786,25 @@ define <8 x double> @test_v8f64_mul_y_su
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vsubpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; AVX512-INFS-NEXT:    vmulpd %zmm0, %zmm1, %zmm0
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfmadd213pd %ymm2, %ymm2, %ymm0
 ; FMA-NOINFS-NEXT:    vfmadd213pd %ymm3, %ymm3, %ymm1
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfmaddpd %ymm2, %ymm2, %ymm0, %ymm0
 ; FMA4-NOINFS-NEXT:    vfmaddpd %ymm3, %ymm3, %ymm1, %ymm1
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v8f64_mul_y_sub_x_negone:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfmadd213pd %zmm1, %zmm1, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
   %s = fsub <8 x double> %x, <double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0>
@@ -818,7 +818,7 @@ define <8 x double> @test_v8f64_mul_y_su
 
 define <16 x float> @test_v16f32_interp(<16 x float> %x, <16 x float> %y, <16 x float> %t) {
 ; FMA-INFS-LABEL: test_v16f32_interp:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovaps {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubps %ymm4, %ymm6, %ymm7
 ; FMA-INFS-NEXT:    vsubps %ymm5, %ymm6, %ymm6
@@ -829,7 +829,7 @@ define <16 x float> @test_v16f32_interp(
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v16f32_interp:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovaps {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubps %ymm4, %ymm6, %ymm7
 ; FMA4-INFS-NEXT:    vsubps %ymm5, %ymm6, %ymm6
@@ -840,7 +840,7 @@ define <16 x float> @test_v16f32_interp(
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v16f32_interp:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastss {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
 ; AVX512-INFS-NEXT:    vsubps %zmm2, %zmm3, %zmm3
 ; AVX512-INFS-NEXT:    vmulps %zmm3, %zmm1, %zmm1
@@ -848,7 +848,7 @@ define <16 x float> @test_v16f32_interp(
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v16f32_interp:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213ps %ymm3, %ymm5, %ymm3
 ; FMA-NOINFS-NEXT:    vfnmadd213ps %ymm2, %ymm4, %ymm2
 ; FMA-NOINFS-NEXT:    vfmadd213ps %ymm2, %ymm4, %ymm0
@@ -856,7 +856,7 @@ define <16 x float> @test_v16f32_interp(
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v16f32_interp:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddps %ymm3, %ymm3, %ymm5, %ymm3
 ; FMA4-NOINFS-NEXT:    vfnmaddps %ymm2, %ymm2, %ymm4, %ymm2
 ; FMA4-NOINFS-NEXT:    vfmaddps %ymm2, %ymm4, %ymm0, %ymm0
@@ -864,7 +864,7 @@ define <16 x float> @test_v16f32_interp(
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v16f32_interp:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213ps %zmm1, %zmm2, %zmm1
 ; AVX512-NOINFS-NEXT:    vfmadd213ps %zmm1, %zmm2, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
@@ -877,7 +877,7 @@ define <16 x float> @test_v16f32_interp(
 
 define <8 x double> @test_v8f64_interp(<8 x double> %x, <8 x double> %y, <8 x double> %t) {
 ; FMA-INFS-LABEL: test_v8f64_interp:
-; FMA-INFS:       # BB#0:
+; FMA-INFS:       # %bb.0:
 ; FMA-INFS-NEXT:    vmovapd {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-INFS-NEXT:    vsubpd %ymm4, %ymm6, %ymm7
 ; FMA-INFS-NEXT:    vsubpd %ymm5, %ymm6, %ymm6
@@ -888,7 +888,7 @@ define <8 x double> @test_v8f64_interp(<
 ; FMA-INFS-NEXT:    retq
 ;
 ; FMA4-INFS-LABEL: test_v8f64_interp:
-; FMA4-INFS:       # BB#0:
+; FMA4-INFS:       # %bb.0:
 ; FMA4-INFS-NEXT:    vmovapd {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA4-INFS-NEXT:    vsubpd %ymm4, %ymm6, %ymm7
 ; FMA4-INFS-NEXT:    vsubpd %ymm5, %ymm6, %ymm6
@@ -899,7 +899,7 @@ define <8 x double> @test_v8f64_interp(<
 ; FMA4-INFS-NEXT:    retq
 ;
 ; AVX512-INFS-LABEL: test_v8f64_interp:
-; AVX512-INFS:       # BB#0:
+; AVX512-INFS:       # %bb.0:
 ; AVX512-INFS-NEXT:    vbroadcastsd {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
 ; AVX512-INFS-NEXT:    vsubpd %zmm2, %zmm3, %zmm3
 ; AVX512-INFS-NEXT:    vmulpd %zmm3, %zmm1, %zmm1
@@ -907,7 +907,7 @@ define <8 x double> @test_v8f64_interp(<
 ; AVX512-INFS-NEXT:    retq
 ;
 ; FMA-NOINFS-LABEL: test_v8f64_interp:
-; FMA-NOINFS:       # BB#0:
+; FMA-NOINFS:       # %bb.0:
 ; FMA-NOINFS-NEXT:    vfnmadd213pd %ymm3, %ymm5, %ymm3
 ; FMA-NOINFS-NEXT:    vfnmadd213pd %ymm2, %ymm4, %ymm2
 ; FMA-NOINFS-NEXT:    vfmadd213pd %ymm2, %ymm4, %ymm0
@@ -915,7 +915,7 @@ define <8 x double> @test_v8f64_interp(<
 ; FMA-NOINFS-NEXT:    retq
 ;
 ; FMA4-NOINFS-LABEL: test_v8f64_interp:
-; FMA4-NOINFS:       # BB#0:
+; FMA4-NOINFS:       # %bb.0:
 ; FMA4-NOINFS-NEXT:    vfnmaddpd %ymm3, %ymm3, %ymm5, %ymm3
 ; FMA4-NOINFS-NEXT:    vfnmaddpd %ymm2, %ymm2, %ymm4, %ymm2
 ; FMA4-NOINFS-NEXT:    vfmaddpd %ymm2, %ymm4, %ymm0, %ymm0
@@ -923,7 +923,7 @@ define <8 x double> @test_v8f64_interp(<
 ; FMA4-NOINFS-NEXT:    retq
 ;
 ; AVX512-NOINFS-LABEL: test_v8f64_interp:
-; AVX512-NOINFS:       # BB#0:
+; AVX512-NOINFS:       # %bb.0:
 ; AVX512-NOINFS-NEXT:    vfnmadd213pd %zmm1, %zmm2, %zmm1
 ; AVX512-NOINFS-NEXT:    vfmadd213pd %zmm1, %zmm2, %zmm0
 ; AVX512-NOINFS-NEXT:    retq
@@ -940,19 +940,19 @@ define <8 x double> @test_v8f64_interp(<
 
 define <16 x float> @test_v16f32_fneg_fmadd(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) #0 {
 ; FMA-LABEL: test_v16f32_fneg_fmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmsub213ps %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfnmsub213ps %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v16f32_fneg_fmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmsubps %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfnmsubps %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16f32_fneg_fmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmsub213ps %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %mul = fmul <16 x float> %a0, %a1
@@ -963,19 +963,19 @@ define <16 x float> @test_v16f32_fneg_fm
 
 define <8 x double> @test_v8f64_fneg_fmsub(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) #0 {
 ; FMA-LABEL: test_v8f64_fneg_fmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfnmadd213pd %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfnmadd213pd %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v8f64_fneg_fmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfnmaddpd %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfnmaddpd %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8f64_fneg_fmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfnmadd213pd %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %mul = fmul <8 x double> %a0, %a1
@@ -986,19 +986,19 @@ define <8 x double> @test_v8f64_fneg_fms
 
 define <16 x float> @test_v16f32_fneg_fnmadd(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) #0 {
 ; FMA-LABEL: test_v16f32_fneg_fnmadd:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmsub213ps %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfmsub213ps %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v16f32_fneg_fnmadd:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmsubps %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfmsubps %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16f32_fneg_fnmadd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmsub213ps %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %mul = fmul <16 x float> %a0, %a1
@@ -1010,19 +1010,19 @@ define <16 x float> @test_v16f32_fneg_fn
 
 define <8 x double> @test_v8f64_fneg_fnmsub(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) #0 {
 ; FMA-LABEL: test_v8f64_fneg_fnmsub:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd213pd %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfmadd213pd %ymm5, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v8f64_fneg_fnmsub:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddpd %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfmaddpd %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8f64_fneg_fnmsub:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd213pd %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %mul = fmul <8 x double> %a0, %a1
@@ -1038,19 +1038,19 @@ define <8 x double> @test_v8f64_fneg_fnm
 
 define <16 x float> @test_v16f32_fma_x_c1_fmul_x_c2(<16 x float> %x) #0 {
 ; FMA-LABEL: test_v16f32_fma_x_c1_fmul_x_c2:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0
 ; FMA-NEXT:    vmulps {{.*}}(%rip), %ymm1, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v16f32_fma_x_c1_fmul_x_c2:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0
 ; FMA4-NEXT:    vmulps {{.*}}(%rip), %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16f32_fma_x_c1_fmul_x_c2:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmulps {{.*}}(%rip), %zmm0, %zmm0
 ; AVX512-NEXT:    retq
   %m0 = fmul <16 x float> %x, <float 17.0, float 16.0, float 15.0, float 14.0, float 13.0, float 12.0, float 11.0, float 10.0, float 9.0, float 8.0, float 7.0, float 6.0, float 5.0, float 4.0, float 3.0, float 2.0>
@@ -1065,19 +1065,19 @@ define <16 x float> @test_v16f32_fma_x_c
 
 define <16 x float> @test_v16f32_fma_fmul_x_c1_c2_y(<16 x float> %x, <16 x float> %y) #0 {
 ; FMA-LABEL: test_v16f32_fma_fmul_x_c1_c2_y:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vfmadd132ps {{.*}}(%rip), %ymm2, %ymm0
 ; FMA-NEXT:    vfmadd132ps {{.*}}(%rip), %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v16f32_fma_fmul_x_c1_c2_y:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vfmaddps %ymm2, {{.*}}(%rip), %ymm0, %ymm0
 ; FMA4-NEXT:    vfmaddps %ymm3, {{.*}}(%rip), %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16f32_fma_fmul_x_c1_c2_y:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vfmadd132ps {{.*}}(%rip), %zmm1, %zmm0
 ; AVX512-NEXT:    retq
   %m0 = fmul <16 x float> %x,  <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>
@@ -1090,21 +1090,21 @@ define <16 x float> @test_v16f32_fma_fmu
 
 define <16 x float> @test_v16f32_fneg_fmul(<16 x float> %x, <16 x float> %y) #0 {
 ; FMA-LABEL: test_v16f32_fneg_fmul:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vxorps %xmm4, %xmm4, %xmm4
 ; FMA-NEXT:    vfnmsub213ps %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfnmsub213ps %ymm4, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v16f32_fneg_fmul:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vxorps %xmm4, %xmm4, %xmm4
 ; FMA4-NEXT:    vfnmsubps %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfnmsubps %ymm4, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16f32_fneg_fmul:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vfnmsub213ps %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
@@ -1115,21 +1115,21 @@ define <16 x float> @test_v16f32_fneg_fm
 
 define <8 x double> @test_v8f64_fneg_fmul(<8 x double> %x, <8 x double> %y) #0 {
 ; FMA-LABEL: test_v8f64_fneg_fmul:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
 ; FMA-NEXT:    vfnmsub213pd %ymm4, %ymm2, %ymm0
 ; FMA-NEXT:    vfnmsub213pd %ymm4, %ymm3, %ymm1
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v8f64_fneg_fmul:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
 ; FMA4-NEXT:    vfnmsubpd %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfnmsubpd %ymm4, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8f64_fneg_fmul:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vfnmsub213pd %zmm2, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
@@ -1140,7 +1140,7 @@ define <8 x double> @test_v8f64_fneg_fmu
 
 define <8 x double> @test_v8f64_fneg_fmul_no_nsz(<8 x double> %x, <8 x double> %y) #0 {
 ; FMA-LABEL: test_v8f64_fneg_fmul_no_nsz:
-; FMA:       # BB#0:
+; FMA:       # %bb.0:
 ; FMA-NEXT:    vmulpd %ymm3, %ymm1, %ymm1
 ; FMA-NEXT:    vmulpd %ymm2, %ymm0, %ymm0
 ; FMA-NEXT:    vmovapd {{.*#+}} ymm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
@@ -1149,7 +1149,7 @@ define <8 x double> @test_v8f64_fneg_fmu
 ; FMA-NEXT:    retq
 ;
 ; FMA4-LABEL: test_v8f64_fneg_fmul_no_nsz:
-; FMA4:       # BB#0:
+; FMA4:       # %bb.0:
 ; FMA4-NEXT:    vmulpd %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    vmulpd %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vmovapd {{.*#+}} ymm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
@@ -1158,7 +1158,7 @@ define <8 x double> @test_v8f64_fneg_fmu
 ; FMA4-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8f64_fneg_fmul_no_nsz:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmulpd %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vxorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; AVX512-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fmaddsub-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fmaddsub-combine.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fmaddsub-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fmaddsub-combine.ll Mon Dec  4 09:18:51 2017
@@ -7,12 +7,12 @@
 
 define <2 x double> @mul_addsub_pd128(<2 x double> %A, <2 x double> %B,  <2 x double> %C) #0 {
 ; FMA3-LABEL: mul_addsub_pd128:
-; FMA3:       # BB#0: # %entry
+; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0
 ; FMA3-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_addsub_pd128:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vfmaddsubpd %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 entry:
@@ -25,12 +25,12 @@ entry:
 
 define <4 x float> @mul_addsub_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 {
 ; FMA3-LABEL: mul_addsub_ps128:
-; FMA3:       # BB#0: # %entry
+; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0
 ; FMA3-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_addsub_ps128:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vfmaddsubps %xmm2, %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    retq
 entry:
@@ -43,12 +43,12 @@ entry:
 
 define <4 x double> @mul_addsub_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 {
 ; FMA3-LABEL: mul_addsub_pd256:
-; FMA3:       # BB#0: # %entry
+; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0
 ; FMA3-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_addsub_pd256:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vfmaddsubpd %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 entry:
@@ -61,12 +61,12 @@ entry:
 
 define <8 x float> @mul_addsub_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 {
 ; FMA3-LABEL: mul_addsub_ps256:
-; FMA3:       # BB#0: # %entry
+; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0
 ; FMA3-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_addsub_ps256:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vfmaddsubps %ymm2, %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    retq
 entry:
@@ -79,18 +79,18 @@ entry:
 
 define <8 x double> @mul_addsub_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 {
 ; FMA3_256-LABEL: mul_addsub_pd512:
-; FMA3_256:       # BB#0: # %entry
+; FMA3_256:       # %bb.0: # %entry
 ; FMA3_256-NEXT:    vfmaddsub213pd %ymm4, %ymm2, %ymm0
 ; FMA3_256-NEXT:    vfmaddsub213pd %ymm5, %ymm3, %ymm1
 ; FMA3_256-NEXT:    retq
 ;
 ; FMA3_512-LABEL: mul_addsub_pd512:
-; FMA3_512:       # BB#0: # %entry
+; FMA3_512:       # %bb.0: # %entry
 ; FMA3_512-NEXT:    vfmaddsub213pd %zmm2, %zmm1, %zmm0
 ; FMA3_512-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_addsub_pd512:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vfmaddsubpd %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfmaddsubpd %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq
@@ -104,18 +104,18 @@ entry:
 
 define <16 x float> @mul_addsub_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 {
 ; FMA3_256-LABEL: mul_addsub_ps512:
-; FMA3_256:       # BB#0: # %entry
+; FMA3_256:       # %bb.0: # %entry
 ; FMA3_256-NEXT:    vfmaddsub213ps %ymm4, %ymm2, %ymm0
 ; FMA3_256-NEXT:    vfmaddsub213ps %ymm5, %ymm3, %ymm1
 ; FMA3_256-NEXT:    retq
 ;
 ; FMA3_512-LABEL: mul_addsub_ps512:
-; FMA3_512:       # BB#0: # %entry
+; FMA3_512:       # %bb.0: # %entry
 ; FMA3_512-NEXT:    vfmaddsub213ps %zmm2, %zmm1, %zmm0
 ; FMA3_512-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_addsub_ps512:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vfmaddsubps %ymm4, %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vfmaddsubps %ymm5, %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fmf-flags.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fmf-flags.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fmf-flags.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fmf-flags.ll Mon Dec  4 09:18:51 2017
@@ -6,14 +6,14 @@ declare float @llvm.sqrt.f32(float %x);
 
 define float @fast_recip_sqrt(float %x) {
 ; X64-LABEL: fast_recip_sqrt:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    sqrtss %xmm0, %xmm1
 ; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    divss %xmm1, %xmm0
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: fast_recip_sqrt:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    flds {{[0-9]+}}(%esp)
 ; X86-NEXT:    fsqrt
 ; X86-NEXT:    fld1
@@ -28,7 +28,7 @@ declare float @llvm.fmuladd.f32(float %a
 
 define float @fast_fmuladd_opts(float %a , float %b , float %c) {
 ; X64-LABEL: fast_fmuladd_opts:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps %xmm0, %xmm1
 ; X64-NEXT:    addss %xmm1, %xmm1
 ; X64-NEXT:    addss %xmm0, %xmm1
@@ -36,7 +36,7 @@ define float @fast_fmuladd_opts(float %a
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: fast_fmuladd_opts:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    flds {{[0-9]+}}(%esp)
 ; X86-NEXT:    fld %st(0)
 ; X86-NEXT:    fadd %st(1)
@@ -52,7 +52,7 @@ define float @fast_fmuladd_opts(float %a
 
 define double @not_so_fast_mul_add(double %x) {
 ; X64-LABEL: not_so_fast_mul_add:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; X64-NEXT:    mulsd %xmm0, %xmm1
 ; X64-NEXT:    addsd %xmm1, %xmm0
@@ -60,7 +60,7 @@ define double @not_so_fast_mul_add(doubl
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: not_so_fast_mul_add:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    fldl {{[0-9]+}}(%esp)
 ; X86-NEXT:    fld %st(0)
 ; X86-NEXT:    fmull {{\.LCPI.*}}
@@ -79,7 +79,7 @@ define double @not_so_fast_mul_add(doubl
 
 define float @not_so_fast_recip_sqrt(float %x) {
 ; X64-LABEL: not_so_fast_recip_sqrt:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    sqrtss %xmm0, %xmm1
 ; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    divss %xmm1, %xmm0
@@ -87,7 +87,7 @@ define float @not_so_fast_recip_sqrt(flo
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: not_so_fast_recip_sqrt:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    flds {{[0-9]+}}(%esp)
 ; X86-NEXT:    fsqrt
 ; X86-NEXT:    fld1

Modified: llvm/trunk/test/CodeGen/X86/fmsubadd-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fmsubadd-combine.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fmsubadd-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fmsubadd-combine.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 {
 ; FMA3_256-LABEL: mul_subadd_pd128:
-; FMA3_256:       # BB#0: # %entry
+; FMA3_256:       # %bb.0: # %entry
 ; FMA3_256-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
 ; FMA3_256-NEXT:    vsubpd %xmm2, %xmm0, %xmm1
 ; FMA3_256-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
@@ -15,7 +15,7 @@ define <2 x double> @mul_subadd_pd128(<2
 ; FMA3_256-NEXT:    retq
 ;
 ; FMA3_512-LABEL: mul_subadd_pd128:
-; FMA3_512:       # BB#0: # %entry
+; FMA3_512:       # %bb.0: # %entry
 ; FMA3_512-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
 ; FMA3_512-NEXT:    vsubpd %xmm2, %xmm0, %xmm1
 ; FMA3_512-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
@@ -23,7 +23,7 @@ define <2 x double> @mul_subadd_pd128(<2
 ; FMA3_512-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_subadd_pd128:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    vsubpd %xmm2, %xmm0, %xmm1
 ; FMA4-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
@@ -39,7 +39,7 @@ entry:
 
 define <4 x float> @mul_subadd_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 {
 ; FMA3-LABEL: mul_subadd_ps128:
-; FMA3:       # BB#0: # %entry
+; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA3-NEXT:    vsubps %xmm2, %xmm0, %xmm1
 ; FMA3-NEXT:    vaddps %xmm2, %xmm0, %xmm0
@@ -47,7 +47,7 @@ define <4 x float> @mul_subadd_ps128(<4
 ; FMA3-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_subadd_ps128:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; FMA4-NEXT:    vsubps %xmm2, %xmm0, %xmm1
 ; FMA4-NEXT:    vaddps %xmm2, %xmm0, %xmm0
@@ -63,7 +63,7 @@ entry:
 
 define <4 x double> @mul_subadd_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 {
 ; FMA3-LABEL: mul_subadd_pd256:
-; FMA3:       # BB#0: # %entry
+; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
 ; FMA3-NEXT:    vsubpd %ymm2, %ymm0, %ymm1
 ; FMA3-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
@@ -71,7 +71,7 @@ define <4 x double> @mul_subadd_pd256(<4
 ; FMA3-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_subadd_pd256:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    vsubpd %ymm2, %ymm0, %ymm1
 ; FMA4-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
@@ -87,7 +87,7 @@ entry:
 
 define <8 x float> @mul_subadd_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 {
 ; FMA3-LABEL: mul_subadd_ps256:
-; FMA3:       # BB#0: # %entry
+; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vmulps %ymm1, %ymm0, %ymm0
 ; FMA3-NEXT:    vsubps %ymm2, %ymm0, %ymm1
 ; FMA3-NEXT:    vaddps %ymm2, %ymm0, %ymm0
@@ -95,7 +95,7 @@ define <8 x float> @mul_subadd_ps256(<8
 ; FMA3-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_subadd_ps256:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vmulps %ymm1, %ymm0, %ymm0
 ; FMA4-NEXT:    vsubps %ymm2, %ymm0, %ymm1
 ; FMA4-NEXT:    vaddps %ymm2, %ymm0, %ymm0
@@ -111,7 +111,7 @@ entry:
 
 define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 {
 ; FMA3_256-LABEL: mul_subadd_pd512:
-; FMA3_256:       # BB#0: # %entry
+; FMA3_256:       # %bb.0: # %entry
 ; FMA3_256-NEXT:    vmulpd %ymm2, %ymm0, %ymm0
 ; FMA3_256-NEXT:    vmulpd %ymm3, %ymm1, %ymm1
 ; FMA3_256-NEXT:    vsubpd %ymm5, %ymm1, %ymm2
@@ -123,7 +123,7 @@ define <8 x double> @mul_subadd_pd512(<8
 ; FMA3_256-NEXT:    retq
 ;
 ; FMA3_512-LABEL: mul_subadd_pd512:
-; FMA3_512:       # BB#0: # %entry
+; FMA3_512:       # %bb.0: # %entry
 ; FMA3_512-NEXT:    vmulpd %zmm1, %zmm0, %zmm0
 ; FMA3_512-NEXT:    vsubpd %zmm2, %zmm0, %zmm1
 ; FMA3_512-NEXT:    vaddpd %zmm2, %zmm0, %zmm0
@@ -131,7 +131,7 @@ define <8 x double> @mul_subadd_pd512(<8
 ; FMA3_512-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_subadd_pd512:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vmulpd %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vmulpd %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    vsubpd %ymm5, %ymm1, %ymm2
@@ -151,7 +151,7 @@ entry:
 
 define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 {
 ; FMA3_256-LABEL: mul_subadd_ps512:
-; FMA3_256:       # BB#0: # %entry
+; FMA3_256:       # %bb.0: # %entry
 ; FMA3_256-NEXT:    vmulps %ymm2, %ymm0, %ymm0
 ; FMA3_256-NEXT:    vmulps %ymm3, %ymm1, %ymm1
 ; FMA3_256-NEXT:    vsubps %ymm5, %ymm1, %ymm2
@@ -163,7 +163,7 @@ define <16 x float> @mul_subadd_ps512(<1
 ; FMA3_256-NEXT:    retq
 ;
 ; FMA3_512-LABEL: mul_subadd_ps512:
-; FMA3_512:       # BB#0: # %entry
+; FMA3_512:       # %bb.0: # %entry
 ; FMA3_512-NEXT:    vmulps %zmm1, %zmm0, %zmm1
 ; FMA3_512-NEXT:    vaddps %zmm2, %zmm1, %zmm0
 ; FMA3_512-NEXT:    movw $-21846, %ax # imm = 0xAAAA
@@ -172,7 +172,7 @@ define <16 x float> @mul_subadd_ps512(<1
 ; FMA3_512-NEXT:    retq
 ;
 ; FMA4-LABEL: mul_subadd_ps512:
-; FMA4:       # BB#0: # %entry
+; FMA4:       # %bb.0: # %entry
 ; FMA4-NEXT:    vmulps %ymm2, %ymm0, %ymm0
 ; FMA4-NEXT:    vmulps %ymm3, %ymm1, %ymm1
 ; FMA4-NEXT:    vsubps %ymm5, %ymm1, %ymm2

Modified: llvm/trunk/test/CodeGen/X86/fold-load-binops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-load-binops.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-load-binops.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-load-binops.ll Mon Dec  4 09:18:51 2017
@@ -9,12 +9,12 @@
 
 define <4 x float> @addss(<4 x float> %va, float* %pb) {
 ; SSE-LABEL: addss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: addss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %a = extractelement <4 x float> %va, i32 0
@@ -26,12 +26,12 @@ define <4 x float> @addss(<4 x float> %v
 
 define <2 x double> @addsd(<2 x double> %va, double* %pb) {
 ; SSE-LABEL: addsd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addsd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: addsd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddsd (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %a = extractelement <2 x double> %va, i32 0
@@ -43,12 +43,12 @@ define <2 x double> @addsd(<2 x double>
 
 define <4 x float> @subss(<4 x float> %va, float* %pb) {
 ; SSE-LABEL: subss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: subss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubss (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %a = extractelement <4 x float> %va, i32 0
@@ -60,12 +60,12 @@ define <4 x float> @subss(<4 x float> %v
 
 define <2 x double> @subsd(<2 x double> %va, double* %pb) {
 ; SSE-LABEL: subsd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subsd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: subsd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubsd (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %a = extractelement <2 x double> %va, i32 0
@@ -77,12 +77,12 @@ define <2 x double> @subsd(<2 x double>
 
 define <4 x float> @mulss(<4 x float> %va, float* %pb) {
 ; SSE-LABEL: mulss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: mulss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulss (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %a = extractelement <4 x float> %va, i32 0
@@ -94,12 +94,12 @@ define <4 x float> @mulss(<4 x float> %v
 
 define <2 x double> @mulsd(<2 x double> %va, double* %pb) {
 ; SSE-LABEL: mulsd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulsd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: mulsd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulsd (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %a = extractelement <2 x double> %va, i32 0
@@ -111,12 +111,12 @@ define <2 x double> @mulsd(<2 x double>
 
 define <4 x float> @divss(<4 x float> %va, float* %pb) {
 ; SSE-LABEL: divss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: divss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %a = extractelement <4 x float> %va, i32 0
@@ -128,12 +128,12 @@ define <4 x float> @divss(<4 x float> %v
 
 define <2 x double> @divsd(<2 x double> %va, double* %pb) {
 ; SSE-LABEL: divsd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: divsd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %a = extractelement <2 x double> %va, i32 0

Modified: llvm/trunk/test/CodeGen/X86/fold-load-unops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-load-unops.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-load-unops.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-load-unops.ll Mon Dec  4 09:18:51 2017
@@ -7,13 +7,13 @@
 
 define float @rcpss(float* %a) {
 ; SSE-LABEL: rcpss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    rcpss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: rcpss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vrcpss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -26,13 +26,13 @@ define float @rcpss(float* %a) {
 
 define float @rsqrtss(float* %a) {
 ; SSE-LABEL: rsqrtss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    rsqrtss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: rsqrtss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -45,13 +45,13 @@ define float @rsqrtss(float* %a) {
 
 define float @sqrtss(float* %a) {
 ; SSE-LABEL: sqrtss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    sqrtss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sqrtss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -64,13 +64,13 @@ define float @sqrtss(float* %a) {
 
 define double @sqrtsd(double* %a) {
 ; SSE-LABEL: sqrtsd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    sqrtsd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sqrtsd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -83,12 +83,12 @@ define double @sqrtsd(double* %a) {
 
 define float @rcpss_size(float* %a) optsize {
 ; SSE-LABEL: rcpss_size:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    rcpss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: rcpss_size:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vrcpss (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load float, float* %a
@@ -100,12 +100,12 @@ define float @rcpss_size(float* %a) opts
 
 define <4 x float> @rcpss_full_size(<4 x float>* %a) optsize {
 ; SSE-LABEL: rcpss_full_size:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    rcpss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: rcpss_full_size:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vrcpss (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load <4 x float>, <4 x float>* %a
@@ -115,12 +115,12 @@ define <4 x float> @rcpss_full_size(<4 x
 
 define float @rsqrtss_size(float* %a) optsize {
 ; SSE-LABEL: rsqrtss_size:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    rsqrtss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: rsqrtss_size:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vrsqrtss (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load float, float* %a
@@ -132,12 +132,12 @@ define float @rsqrtss_size(float* %a) op
 
 define <4 x float> @rsqrtss_full_size(<4 x float>* %a) optsize {
 ; SSE-LABEL: rsqrtss_full_size:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    rsqrtss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: rsqrtss_full_size:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vrsqrtss (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load <4 x float>, <4 x float>* %a
@@ -147,12 +147,12 @@ define <4 x float> @rsqrtss_full_size(<4
 
 define float @sqrtss_size(float* %a) optsize{
 ; SSE-LABEL: sqrtss_size:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sqrtss_size:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtss (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load float, float* %a
@@ -164,12 +164,12 @@ define float @sqrtss_size(float* %a) opt
 
 define <4 x float> @sqrtss_full_size(<4 x float>* %a) optsize{
 ; SSE-LABEL: sqrtss_full_size:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sqrtss_full_size:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtss (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load <4 x float>, <4 x float>* %a
@@ -179,12 +179,12 @@ define <4 x float> @sqrtss_full_size(<4
 
 define double @sqrtsd_size(double* %a) optsize {
 ; SSE-LABEL: sqrtsd_size:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtsd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sqrtsd_size:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtsd (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load double, double* %a
@@ -196,12 +196,12 @@ define double @sqrtsd_size(double* %a) o
 
 define <2 x double> @sqrtsd_full_size(<2 x double>* %a) optsize {
 ; SSE-LABEL: sqrtsd_full_size:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtsd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sqrtsd_full_size:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtsd (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load <2 x double>, <2 x double>* %a




More information about the llvm-commits mailing list