[llvm] r319665 - [CodeGen] Unify MBB reference format in both MIR and debug output

Francis Visoiu Mistrih via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 4 09:18:56 PST 2017


Modified: llvm/trunk/test/CodeGen/X86/sad.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sad.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sad.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sad.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 
 define i32 @sad_16i8() nounwind {
 ; SSE2-LABEL: sad_16i8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
@@ -22,7 +22,7 @@ define i32 @sad_16i8() nounwind {
 ; SSE2-NEXT:    paddd %xmm3, %xmm1
 ; SSE2-NEXT:    addq $4, %rax
 ; SSE2-NEXT:    jne .LBB0_1
-; SSE2-NEXT:  # BB#2: # %middle.block
+; SSE2-NEXT:  # %bb.2: # %middle.block
 ; SSE2-NEXT:    paddd %xmm0, %xmm1
 ; SSE2-NEXT:    paddd %xmm0, %xmm0
 ; SSE2-NEXT:    paddd %xmm1, %xmm0
@@ -34,7 +34,7 @@ define i32 @sad_16i8() nounwind {
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad_16i8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -46,7 +46,7 @@ define i32 @sad_16i8() nounwind {
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm2, %ymm1
 ; AVX2-NEXT:    addq $4, %rax
 ; AVX2-NEXT:    jne .LBB0_1
-; AVX2-NEXT:  # BB#2: # %middle.block
+; AVX2-NEXT:  # %bb.2: # %middle.block
 ; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
@@ -58,7 +58,7 @@ define i32 @sad_16i8() nounwind {
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad_16i8:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512F-NEXT:    .p2align 4, 0x90
@@ -69,7 +69,7 @@ define i32 @sad_16i8() nounwind {
 ; AVX512F-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
 ; AVX512F-NEXT:    addq $4, %rax
 ; AVX512F-NEXT:    jne .LBB0_1
-; AVX512F-NEXT:  # BB#2: # %middle.block
+; AVX512F-NEXT:  # %bb.2: # %middle.block
 ; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512F-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -83,7 +83,7 @@ define i32 @sad_16i8() nounwind {
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: sad_16i8:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512BW-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512BW-NEXT:    .p2align 4, 0x90
@@ -94,7 +94,7 @@ define i32 @sad_16i8() nounwind {
 ; AVX512BW-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    addq $4, %rax
 ; AVX512BW-NEXT:    jne .LBB0_1
-; AVX512BW-NEXT:  # BB#2: # %middle.block
+; AVX512BW-NEXT:  # %bb.2: # %middle.block
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -145,7 +145,7 @@ middle.block:
 
 define i32 @sad_32i8() nounwind {
 ; SSE2-LABEL: sad_32i8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pxor %xmm12, %xmm12
 ; SSE2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; SSE2-NEXT:    pxor %xmm13, %xmm13
@@ -261,7 +261,7 @@ define i32 @sad_32i8() nounwind {
 ; SSE2-NEXT:    paddd %xmm8, %xmm0
 ; SSE2-NEXT:    addq $4, %rax
 ; SSE2-NEXT:    jne .LBB1_1
-; SSE2-NEXT:  # BB#2: # %middle.block
+; SSE2-NEXT:  # %bb.2: # %middle.block
 ; SSE2-NEXT:    paddd %xmm15, %xmm6
 ; SSE2-NEXT:    paddd %xmm0, %xmm3
 ; SSE2-NEXT:    paddd %xmm6, %xmm3
@@ -277,7 +277,7 @@ define i32 @sad_32i8() nounwind {
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad_32i8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -289,7 +289,7 @@ define i32 @sad_32i8() nounwind {
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm2, %ymm1
 ; AVX2-NEXT:    addq $4, %rax
 ; AVX2-NEXT:    jne .LBB1_1
-; AVX2-NEXT:  # BB#2: # %middle.block
+; AVX2-NEXT:  # %bb.2: # %middle.block
 ; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddd %ymm0, %ymm0, %ymm0
 ; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
@@ -303,7 +303,7 @@ define i32 @sad_32i8() nounwind {
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad_32i8:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -315,7 +315,7 @@ define i32 @sad_32i8() nounwind {
 ; AVX512F-NEXT:    vpaddd %zmm1, %zmm2, %zmm1
 ; AVX512F-NEXT:    addq $4, %rax
 ; AVX512F-NEXT:    jne .LBB1_1
-; AVX512F-NEXT:  # BB#2: # %middle.block
+; AVX512F-NEXT:  # %bb.2: # %middle.block
 ; AVX512F-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
 ; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512F-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
@@ -330,7 +330,7 @@ define i32 @sad_32i8() nounwind {
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: sad_32i8:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512BW-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -342,7 +342,7 @@ define i32 @sad_32i8() nounwind {
 ; AVX512BW-NEXT:    vpaddd %zmm1, %zmm2, %zmm1
 ; AVX512BW-NEXT:    addq $4, %rax
 ; AVX512BW-NEXT:    jne .LBB1_1
-; AVX512BW-NEXT:  # BB#2: # %middle.block
+; AVX512BW-NEXT:  # %bb.2: # %middle.block
 ; AVX512BW-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
@@ -396,7 +396,7 @@ middle.block:
 
 define i32 @sad_avx64i8() nounwind {
 ; SSE2-LABEL: sad_avx64i8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    subq $200, %rsp
 ; SSE2-NEXT:    pxor %xmm14, %xmm14
 ; SSE2-NEXT:    movq $-1024, %rax # imm = 0xFC00
@@ -653,7 +653,7 @@ define i32 @sad_avx64i8() nounwind {
 ; SSE2-NEXT:    paddd %xmm7, %xmm0
 ; SSE2-NEXT:    addq $4, %rax
 ; SSE2-NEXT:    jne .LBB2_1
-; SSE2-NEXT:  # BB#2: # %middle.block
+; SSE2-NEXT:  # %bb.2: # %middle.block
 ; SSE2-NEXT:    paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
 ; SSE2-NEXT:    paddd -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Folded Reload
 ; SSE2-NEXT:    paddd %xmm3, %xmm8
@@ -678,7 +678,7 @@ define i32 @sad_avx64i8() nounwind {
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad_avx64i8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
@@ -736,7 +736,7 @@ define i32 @sad_avx64i8() nounwind {
 ; AVX2-NEXT:    vpaddd %ymm4, %ymm8, %ymm4
 ; AVX2-NEXT:    addq $4, %rax
 ; AVX2-NEXT:    jne .LBB2_1
-; AVX2-NEXT:  # BB#2: # %middle.block
+; AVX2-NEXT:  # %bb.2: # %middle.block
 ; AVX2-NEXT:    vpaddd %ymm6, %ymm2, %ymm2
 ; AVX2-NEXT:    vpaddd %ymm7, %ymm4, %ymm4
 ; AVX2-NEXT:    vpaddd %ymm4, %ymm2, %ymm2
@@ -754,7 +754,7 @@ define i32 @sad_avx64i8() nounwind {
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad_avx64i8:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -785,7 +785,7 @@ define i32 @sad_avx64i8() nounwind {
 ; AVX512F-NEXT:    vpaddd %zmm3, %zmm4, %zmm3
 ; AVX512F-NEXT:    addq $4, %rax
 ; AVX512F-NEXT:    jne .LBB2_1
-; AVX512F-NEXT:  # BB#2: # %middle.block
+; AVX512F-NEXT:  # %bb.2: # %middle.block
 ; AVX512F-NEXT:    vpaddd %zmm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    vpaddd %zmm3, %zmm1, %zmm1
 ; AVX512F-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
@@ -802,7 +802,7 @@ define i32 @sad_avx64i8() nounwind {
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: sad_avx64i8:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512BW-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -814,7 +814,7 @@ define i32 @sad_avx64i8() nounwind {
 ; AVX512BW-NEXT:    vpaddd %zmm1, %zmm2, %zmm1
 ; AVX512BW-NEXT:    addq $4, %rax
 ; AVX512BW-NEXT:    jne .LBB2_1
-; AVX512BW-NEXT:  # BB#2: # %middle.block
+; AVX512BW-NEXT:  # %bb.2: # %middle.block
 ; AVX512BW-NEXT:    vpaddd %zmm0, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
@@ -872,7 +872,7 @@ middle.block:
 
 define i32 @sad_2i8() nounwind {
 ; SSE2-LABEL: sad_2i8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; SSE2-NEXT:    movl $65535, %ecx # imm = 0xFFFF
@@ -888,14 +888,14 @@ define i32 @sad_2i8() nounwind {
 ; SSE2-NEXT:    paddq %xmm2, %xmm0
 ; SSE2-NEXT:    addq $4, %rax
 ; SSE2-NEXT:    jne .LBB3_1
-; SSE2-NEXT:  # BB#2: # %middle.block
+; SSE2-NEXT:  # %bb.2: # %middle.block
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    paddq %xmm0, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad_2i8:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -910,14 +910,14 @@ define i32 @sad_2i8() nounwind {
 ; AVX2-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
 ; AVX2-NEXT:    addq $4, %rax
 ; AVX2-NEXT:    jne .LBB3_1
-; AVX2-NEXT:  # BB#2: # %middle.block
+; AVX2-NEXT:  # %bb.2: # %middle.block
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
 ; AVX2-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad_2i8:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -932,14 +932,14 @@ define i32 @sad_2i8() nounwind {
 ; AVX512F-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
 ; AVX512F-NEXT:    addq $4, %rax
 ; AVX512F-NEXT:    jne .LBB3_1
-; AVX512F-NEXT:  # BB#2: # %middle.block
+; AVX512F-NEXT:  # %bb.2: # %middle.block
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
 ; AVX512F-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
 ; AVX512F-NEXT:    vmovd %xmm0, %eax
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: sad_2i8:
-; AVX512BW:       # BB#0: # %entry
+; AVX512BW:       # %bb.0: # %entry
 ; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512BW-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -954,7 +954,7 @@ define i32 @sad_2i8() nounwind {
 ; AVX512BW-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
 ; AVX512BW-NEXT:    addq $4, %rax
 ; AVX512BW-NEXT:    jne .LBB3_1
-; AVX512BW-NEXT:  # BB#2: # %middle.block
+; AVX512BW-NEXT:  # %bb.2: # %middle.block
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
 ; AVX512BW-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
 ; AVX512BW-NEXT:    vmovd %xmm0, %eax
@@ -992,7 +992,7 @@ middle.block:
 
 define i32 @sad_nonloop_4i8(<4 x i8>* nocapture readonly %p, i64, <4 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
 ; SSE2-LABEL: sad_nonloop_4i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    psadbw %xmm0, %xmm1
@@ -1000,7 +1000,7 @@ define i32 @sad_nonloop_4i8(<4 x i8>* no
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad_nonloop_4i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX2-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX2-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -1008,7 +1008,7 @@ define i32 @sad_nonloop_4i8(<4 x i8>* no
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad_nonloop_4i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX512F-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX512F-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -1016,7 +1016,7 @@ define i32 @sad_nonloop_4i8(<4 x i8>* no
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: sad_nonloop_4i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX512BW-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX512BW-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -1040,7 +1040,7 @@ define i32 @sad_nonloop_4i8(<4 x i8>* no
 
 define i32 @sad_nonloop_8i8(<8 x i8>* nocapture readonly %p, i64, <8 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
 ; SSE2-LABEL: sad_nonloop_8i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    psadbw %xmm0, %xmm1
@@ -1048,7 +1048,7 @@ define i32 @sad_nonloop_8i8(<8 x i8>* no
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad_nonloop_8i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -1056,7 +1056,7 @@ define i32 @sad_nonloop_8i8(<8 x i8>* no
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad_nonloop_8i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -1064,7 +1064,7 @@ define i32 @sad_nonloop_8i8(<8 x i8>* no
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: sad_nonloop_8i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512BW-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -1090,7 +1090,7 @@ define i32 @sad_nonloop_8i8(<8 x i8>* no
 
 define i32 @sad_nonloop_16i8(<16 x i8>* nocapture readonly %p, i64, <16 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
 ; SSE2-LABEL: sad_nonloop_16i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; SSE2-NEXT:    movdqu (%rdx), %xmm1
 ; SSE2-NEXT:    psadbw %xmm0, %xmm1
@@ -1100,7 +1100,7 @@ define i32 @sad_nonloop_16i8(<16 x i8>*
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad_nonloop_16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqu (%rdi), %xmm0
 ; AVX2-NEXT:    vpsadbw (%rdx), %xmm0, %xmm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1109,7 +1109,7 @@ define i32 @sad_nonloop_16i8(<16 x i8>*
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad_nonloop_16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqu (%rdi), %xmm0
 ; AVX512F-NEXT:    vpsadbw (%rdx), %xmm0, %xmm0
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1118,7 +1118,7 @@ define i32 @sad_nonloop_16i8(<16 x i8>*
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: sad_nonloop_16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqu (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpsadbw (%rdx), %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1147,7 +1147,7 @@ define i32 @sad_nonloop_16i8(<16 x i8>*
 
 define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
 ; SSE2-LABEL: sad_nonloop_32i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; SSE2-NEXT:    movdqu 16(%rdi), %xmm12
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
@@ -1244,7 +1244,7 @@ define i32 @sad_nonloop_32i8(<32 x i8>*
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad_nonloop_32i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX2-NEXT:    vpsadbw (%rdx), %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1256,7 +1256,7 @@ define i32 @sad_nonloop_32i8(<32 x i8>*
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad_nonloop_32i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX512F-NEXT:    vpsadbw (%rdx), %ymm0, %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1268,7 +1268,7 @@ define i32 @sad_nonloop_32i8(<32 x i8>*
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: sad_nonloop_32i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX512BW-NEXT:    vpsadbw (%rdx), %ymm0, %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/sad_variations.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sad_variations.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sad_variations.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sad_variations.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define i32 @sad8_32bit_icmp_sge(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #0 {
 ; SSE2-LABEL: sad8_32bit_icmp_sge:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    psadbw %xmm0, %xmm1
@@ -13,7 +13,7 @@ define i32 @sad8_32bit_icmp_sge(i8* noca
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad8_32bit_icmp_sge:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -21,7 +21,7 @@ define i32 @sad8_32bit_icmp_sge(i8* noca
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad8_32bit_icmp_sge:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -55,7 +55,7 @@ for.body:
 
 define i32 @sad8_32bit_icmp_sgt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #1 {
 ; SSE2-LABEL: sad8_32bit_icmp_sgt:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    psadbw %xmm0, %xmm1
@@ -63,7 +63,7 @@ define i32 @sad8_32bit_icmp_sgt(i8* noca
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad8_32bit_icmp_sgt:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -71,7 +71,7 @@ define i32 @sad8_32bit_icmp_sgt(i8* noca
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad8_32bit_icmp_sgt:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -104,7 +104,7 @@ for.body:
 
 define i32 @sad8_32bit_icmp_sle(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #2 {
 ; SSE2-LABEL: sad8_32bit_icmp_sle:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    psadbw %xmm0, %xmm1
@@ -112,7 +112,7 @@ define i32 @sad8_32bit_icmp_sle(i8* noca
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad8_32bit_icmp_sle:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -120,7 +120,7 @@ define i32 @sad8_32bit_icmp_sle(i8* noca
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad8_32bit_icmp_sle:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -153,7 +153,7 @@ for.body:
 
 define i32 @sad8_32bit_icmp_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #3 {
 ; SSE2-LABEL: sad8_32bit_icmp_slt:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    psadbw %xmm0, %xmm1
@@ -161,7 +161,7 @@ define i32 @sad8_32bit_icmp_slt(i8* noca
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad8_32bit_icmp_slt:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -169,7 +169,7 @@ define i32 @sad8_32bit_icmp_slt(i8* noca
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad8_32bit_icmp_slt:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -202,7 +202,7 @@ for.body:
 
 define i64 @sad8_64bit_icmp_sext_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i64 %stride) local_unnamed_addr #4 {
 ; SSE2-LABEL: sad8_64bit_icmp_sext_slt:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    psadbw %xmm0, %xmm1
@@ -210,7 +210,7 @@ define i64 @sad8_64bit_icmp_sext_slt(i8*
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad8_64bit_icmp_sext_slt:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -218,7 +218,7 @@ define i64 @sad8_64bit_icmp_sext_slt(i8*
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad8_64bit_icmp_sext_slt:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -251,7 +251,7 @@ for.body:
 
 define i64 @sad8_64bit_icmp_zext_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i64 %stride) local_unnamed_addr #4 {
 ; SSE2-LABEL: sad8_64bit_icmp_zext_slt:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    psadbw %xmm0, %xmm1
@@ -259,7 +259,7 @@ define i64 @sad8_64bit_icmp_zext_slt(i8*
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad8_64bit_icmp_zext_slt:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -267,7 +267,7 @@ define i64 @sad8_64bit_icmp_zext_slt(i8*
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad8_64bit_icmp_zext_slt:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -300,7 +300,7 @@ for.body:
 
 define i64 @sad8_early_64bit_icmp_zext_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i64 %stride) local_unnamed_addr #4 {
 ; SSE2-LABEL: sad8_early_64bit_icmp_zext_slt:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    psadbw %xmm0, %xmm1
@@ -308,7 +308,7 @@ define i64 @sad8_early_64bit_icmp_zext_s
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: sad8_early_64bit_icmp_zext_slt:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
@@ -316,7 +316,7 @@ define i64 @sad8_early_64bit_icmp_zext_s
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: sad8_early_64bit_icmp_zext_slt:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/sandybridge-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sandybridge-loads.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sandybridge-loads.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sandybridge-loads.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @wideloads(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
 ; CHECK-LABEL: wideloads:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %xmm0
 ; CHECK-NEXT:    vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
 ; CHECK-NEXT:    vmovaps (%rsi), %ymm1
@@ -28,7 +28,7 @@ define void @wideloads(<8 x float>* %a,
 
 define void @widestores(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
 ; CHECK-LABEL: widestores:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %ymm0
 ; CHECK-NEXT:    vmovaps (%rsi), %ymm1
 ; CHECK-NEXT:    vmovaps %ymm0, (%rsi)

Modified: llvm/trunk/test/CodeGen/X86/sar_fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sar_fold.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sar_fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sar_fold.ll Mon Dec  4 09:18:51 2017
@@ -2,7 +2,7 @@
 
 define i32 @shl16sar15(i32 %a) #0 {
 ; CHECK-LABEL: shl16sar15:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movswl {{[0-9]+}}(%esp), %eax
   %1 = shl i32 %a, 16
   %2 = ashr exact i32 %1, 15
@@ -11,7 +11,7 @@ define i32 @shl16sar15(i32 %a) #0 {
 
 define i32 @shl16sar17(i32 %a) #0 {
 ; CHECK-LABEL: shl16sar17:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movswl {{[0-9]+}}(%esp), %eax
   %1 = shl i32 %a, 16
   %2 = ashr exact i32 %1, 17
@@ -20,7 +20,7 @@ define i32 @shl16sar17(i32 %a) #0 {
 
 define i32 @shl24sar23(i32 %a) #0 {
 ; CHECK-LABEL: shl24sar23:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
   %1 = shl i32 %a, 24
   %2 = ashr exact i32 %1, 23
@@ -29,7 +29,7 @@ define i32 @shl24sar23(i32 %a) #0 {
 
 define i32 @shl24sar25(i32 %a) #0 {
 ; CHECK-LABEL: shl24sar25:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
   %1 = shl i32 %a, 24
   %2 = ashr exact i32 %1, 25

Modified: llvm/trunk/test/CodeGen/X86/sar_fold64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sar_fold64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sar_fold64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sar_fold64.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @shl48sar47(i64 %a) #0 {
 ; CHECK-LABEL: shl48sar47:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movswq %di, %rax
 ; CHECK-NEXT:    addl %eax, %eax
 ; CHECK-NEXT:    # kill: %eax<def> %eax<kill> %rax<kill>
@@ -16,7 +16,7 @@ define i32 @shl48sar47(i64 %a) #0 {
 
 define i32 @shl48sar49(i64 %a) #0 {
 ; CHECK-LABEL: shl48sar49:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movswq %di, %rax
 ; CHECK-NEXT:    shrq %rax
 ; CHECK-NEXT:    # kill: %eax<def> %eax<kill> %rax<kill>
@@ -29,7 +29,7 @@ define i32 @shl48sar49(i64 %a) #0 {
 
 define i32 @shl56sar55(i64 %a) #0 {
 ; CHECK-LABEL: shl56sar55:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movsbq %dil, %rax
 ; CHECK-NEXT:    addl %eax, %eax
 ; CHECK-NEXT:    # kill: %eax<def> %eax<kill> %rax<kill>
@@ -42,7 +42,7 @@ define i32 @shl56sar55(i64 %a) #0 {
 
 define i32 @shl56sar57(i64 %a) #0 {
 ; CHECK-LABEL: shl56sar57:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movsbq %dil, %rax
 ; CHECK-NEXT:    shrq %rax
 ; CHECK-NEXT:    # kill: %eax<def> %eax<kill> %rax<kill>
@@ -55,7 +55,7 @@ define i32 @shl56sar57(i64 %a) #0 {
 
 define i8 @all_sign_bit_ashr(i8 %x) {
 ; CHECK-LABEL: all_sign_bit_ashr:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andb $1, %dil
 ; CHECK-NEXT:    negb %dil
 ; CHECK-NEXT:    movl %edi, %eax
@@ -68,7 +68,7 @@ define i8 @all_sign_bit_ashr(i8 %x) {
 
 define <4 x i32> @all_sign_bit_ashr_vec(<4 x i32> %x) {
 ; CHECK-LABEL: all_sign_bit_ashr_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pand {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    psubd %xmm0, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/sbb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sbb.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sbb.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sbb.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define i8 @i8_select_0_or_neg1(i8 %x) {
 ; CHECK-LABEL: i8_select_0_or_neg1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    negb %dil
 ; CHECK-NEXT:    sbbb %al, %al
 ; CHECK-NEXT:    retq
@@ -20,7 +20,7 @@ define i8 @i8_select_0_or_neg1(i8 %x) {
 
 define i16 @i16_select_0_or_neg1_as_math(i16 %x) {
 ; CHECK-LABEL: i16_select_0_or_neg1_as_math:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    negw %di
 ; CHECK-NEXT:    sbbw %ax, %ax
 ; CHECK-NEXT:    retq
@@ -34,7 +34,7 @@ define i16 @i16_select_0_or_neg1_as_math
 
 define i32 @i32_select_0_or_neg1_commuted(i32 %x) {
 ; CHECK-LABEL: i32_select_0_or_neg1_commuted:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    negl %edi
 ; CHECK-NEXT:    sbbl %eax, %eax
 ; CHECK-NEXT:    retq
@@ -47,7 +47,7 @@ define i32 @i32_select_0_or_neg1_commute
 
 define i64 @i64_select_0_or_neg1_commuted_as_math(i64 %x) {
 ; CHECK-LABEL: i64_select_0_or_neg1_commuted_as_math:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    negq %rdi
 ; CHECK-NEXT:    sbbq %rax, %rax
 ; CHECK-NEXT:    retq
@@ -61,7 +61,7 @@ define i64 @i64_select_0_or_neg1_commute
 
 define i64 @i64_select_neg1_or_0(i64 %x) {
 ; CHECK-LABEL: i64_select_neg1_or_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpq $1, %rdi
 ; CHECK-NEXT:    sbbq %rax, %rax
 ; CHECK-NEXT:    retq
@@ -74,7 +74,7 @@ define i64 @i64_select_neg1_or_0(i64 %x)
 
 define i32 @i32_select_neg1_or_0_as_math(i32 %x) {
 ; CHECK-LABEL: i32_select_neg1_or_0_as_math:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $1, %edi
 ; CHECK-NEXT:    sbbl %eax, %eax
 ; CHECK-NEXT:    retq
@@ -88,7 +88,7 @@ define i32 @i32_select_neg1_or_0_as_math
 
 define i16 @i16_select_neg1_or_0_commuted(i16 %x) {
 ; CHECK-LABEL: i16_select_neg1_or_0_commuted:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpw $1, %di
 ; CHECK-NEXT:    sbbw %ax, %ax
 ; CHECK-NEXT:    retq
@@ -101,7 +101,7 @@ define i16 @i16_select_neg1_or_0_commute
 
 define i8 @i8_select_neg1_or_0_commuted_as_math(i8 %x) {
 ; CHECK-LABEL: i8_select_neg1_or_0_commuted_as_math:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpb $1, %dil
 ; CHECK-NEXT:    sbbb %al, %al
 ; CHECK-NEXT:    retq
@@ -115,7 +115,7 @@ define i8 @i8_select_neg1_or_0_commuted_
 
 define i32 @ult_select_neg1_or_0(i32 %x, i32 %y) nounwind {
 ; CHECK-LABEL: ult_select_neg1_or_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl %esi, %edi
 ; CHECK-NEXT:    sbbl %eax, %eax
 ; CHECK-NEXT:    retq
@@ -129,7 +129,7 @@ define i32 @ult_select_neg1_or_0(i32 %x,
 
 define i32 @ugt_select_neg1_or_0(i32 %x, i32 %y) nounwind {
 ; CHECK-LABEL: ugt_select_neg1_or_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl %esi, %edi
 ; CHECK-NEXT:    sbbl %eax, %eax
 ; CHECK-NEXT:    retq
@@ -143,7 +143,7 @@ define i32 @ugt_select_neg1_or_0(i32 %x,
 
 define i32 @uge_select_0_or_neg1(i32 %x, i32 %y) nounwind {
 ; CHECK-LABEL: uge_select_0_or_neg1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl %esi, %edi
 ; CHECK-NEXT:    sbbl %eax, %eax
 ; CHECK-NEXT:    retq
@@ -158,7 +158,7 @@ define i32 @uge_select_0_or_neg1(i32 %x,
 
 define i32 @ule_select_0_or_neg1(i32 %x, i32 %y) nounwind {
 ; CHECK-LABEL: ule_select_0_or_neg1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl %esi, %edi
 ; CHECK-NEXT:    sbbl %eax, %eax
 ; CHECK-NEXT:    retq
@@ -173,7 +173,7 @@ define i32 @ule_select_0_or_neg1(i32 %x,
 
 define i32 @uge_select_0_or_neg1_sub(i32 %x, i32 %y) nounwind {
 ; CHECK-LABEL: uge_select_0_or_neg1_sub:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl %esi, %edi
 ; CHECK-NEXT:    sbbl %eax, %eax
 ; CHECK-NEXT:    retq
@@ -188,7 +188,7 @@ define i32 @uge_select_0_or_neg1_sub(i32
 
 define i64 @ugt_select_neg1_or_0_sub(i64 %x, i64 %y) nounwind {
 ; CHECK-LABEL: ugt_select_neg1_or_0_sub:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpq %rdi, %rsi
 ; CHECK-NEXT:    sbbq %rax, %rax
 ; CHECK-NEXT:    retq
@@ -203,7 +203,7 @@ define i64 @ugt_select_neg1_or_0_sub(i64
 
 define i16 @ult_select_neg1_or_0_sub(i16 %x, i16 %y) nounwind {
 ; CHECK-LABEL: ult_select_neg1_or_0_sub:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpw %di, %si
 ; CHECK-NEXT:    sbbw %ax, %ax
 ; CHECK-NEXT:    retq
@@ -220,7 +220,7 @@ define i16 @ult_select_neg1_or_0_sub(i16
 
 define void @PR33560(i8 %x, i64 %y) {
 ; CHECK-LABEL: PR33560:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    negb %dil
 ; CHECK-NEXT:    sbbq %rax, %rax
 ; CHECK-NEXT:    cmpq %rsi, %rax

Modified: llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll Mon Dec  4 09:18:51 2017
@@ -11,7 +11,7 @@
 
 define float @u32_to_f(i32 %a) nounwind {
 ; AVX512_32-LABEL: u32_to_f:
-; AVX512_32:       # BB#0:
+; AVX512_32:       # %bb.0:
 ; AVX512_32-NEXT:    pushl %eax
 ; AVX512_32-NEXT:    vcvtusi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; AVX512_32-NEXT:    vmovss %xmm0, (%esp)
@@ -20,12 +20,12 @@ define float @u32_to_f(i32 %a) nounwind
 ; AVX512_32-NEXT:    retl
 ;
 ; AVX512_64-LABEL: u32_to_f:
-; AVX512_64:       # BB#0:
+; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: u32_to_f:
-; SSE2_32:       # BB#0:
+; SSE2_32:       # %bb.0:
 ; SSE2_32-NEXT:    pushl %eax
 ; SSE2_32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE2_32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -39,13 +39,13 @@ define float @u32_to_f(i32 %a) nounwind
 ; SSE2_32-NEXT:    retl
 ;
 ; SSE2_64-LABEL: u32_to_f:
-; SSE2_64:       # BB#0:
+; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    movl %edi, %eax
 ; SSE2_64-NEXT:    cvtsi2ssq %rax, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: u32_to_f:
-; X87:       # BB#0:
+; X87:       # %bb.0:
 ; X87-NEXT:    pushl %ebp
 ; X87-NEXT:    movl %esp, %ebp
 ; X87-NEXT:    andl $-8, %esp
@@ -63,7 +63,7 @@ define float @u32_to_f(i32 %a) nounwind
 
 define float @s32_to_f(i32 %a) nounwind {
 ; AVX512_32-LABEL: s32_to_f:
-; AVX512_32:       # BB#0:
+; AVX512_32:       # %bb.0:
 ; AVX512_32-NEXT:    pushl %eax
 ; AVX512_32-NEXT:    vcvtsi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; AVX512_32-NEXT:    vmovss %xmm0, (%esp)
@@ -72,12 +72,12 @@ define float @s32_to_f(i32 %a) nounwind
 ; AVX512_32-NEXT:    retl
 ;
 ; AVX512_64-LABEL: s32_to_f:
-; AVX512_64:       # BB#0:
+; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: s32_to_f:
-; SSE2_32:       # BB#0:
+; SSE2_32:       # %bb.0:
 ; SSE2_32-NEXT:    pushl %eax
 ; SSE2_32-NEXT:    cvtsi2ssl {{[0-9]+}}(%esp), %xmm0
 ; SSE2_32-NEXT:    movss %xmm0, (%esp)
@@ -86,12 +86,12 @@ define float @s32_to_f(i32 %a) nounwind
 ; SSE2_32-NEXT:    retl
 ;
 ; SSE2_64-LABEL: s32_to_f:
-; SSE2_64:       # BB#0:
+; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    cvtsi2ssl %edi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s32_to_f:
-; X87:       # BB#0:
+; X87:       # %bb.0:
 ; X87-NEXT:    pushl %eax
 ; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X87-NEXT:    movl %eax, (%esp)
@@ -104,7 +104,7 @@ define float @s32_to_f(i32 %a) nounwind
 
 define double @u32_to_d(i32 %a) nounwind {
 ; AVX512_32-LABEL: u32_to_d:
-; AVX512_32:       # BB#0:
+; AVX512_32:       # %bb.0:
 ; AVX512_32-NEXT:    pushl %ebp
 ; AVX512_32-NEXT:    movl %esp, %ebp
 ; AVX512_32-NEXT:    andl $-8, %esp
@@ -117,12 +117,12 @@ define double @u32_to_d(i32 %a) nounwind
 ; AVX512_32-NEXT:    retl
 ;
 ; AVX512_64-LABEL: u32_to_d:
-; AVX512_64:       # BB#0:
+; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    vcvtusi2sdl %edi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: u32_to_d:
-; SSE2_32:       # BB#0:
+; SSE2_32:       # %bb.0:
 ; SSE2_32-NEXT:    pushl %ebp
 ; SSE2_32-NEXT:    movl %esp, %ebp
 ; SSE2_32-NEXT:    andl $-8, %esp
@@ -138,13 +138,13 @@ define double @u32_to_d(i32 %a) nounwind
 ; SSE2_32-NEXT:    retl
 ;
 ; SSE2_64-LABEL: u32_to_d:
-; SSE2_64:       # BB#0:
+; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    movl %edi, %eax
 ; SSE2_64-NEXT:    cvtsi2sdq %rax, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: u32_to_d:
-; X87:       # BB#0:
+; X87:       # %bb.0:
 ; X87-NEXT:    pushl %ebp
 ; X87-NEXT:    movl %esp, %ebp
 ; X87-NEXT:    andl $-8, %esp
@@ -162,7 +162,7 @@ define double @u32_to_d(i32 %a) nounwind
 
 define double @s32_to_d(i32 %a) nounwind {
 ; AVX512_32-LABEL: s32_to_d:
-; AVX512_32:       # BB#0:
+; AVX512_32:       # %bb.0:
 ; AVX512_32-NEXT:    pushl %ebp
 ; AVX512_32-NEXT:    movl %esp, %ebp
 ; AVX512_32-NEXT:    andl $-8, %esp
@@ -175,12 +175,12 @@ define double @s32_to_d(i32 %a) nounwind
 ; AVX512_32-NEXT:    retl
 ;
 ; AVX512_64-LABEL: s32_to_d:
-; AVX512_64:       # BB#0:
+; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: s32_to_d:
-; SSE2_32:       # BB#0:
+; SSE2_32:       # %bb.0:
 ; SSE2_32-NEXT:    pushl %ebp
 ; SSE2_32-NEXT:    movl %esp, %ebp
 ; SSE2_32-NEXT:    andl $-8, %esp
@@ -193,12 +193,12 @@ define double @s32_to_d(i32 %a) nounwind
 ; SSE2_32-NEXT:    retl
 ;
 ; SSE2_64-LABEL: s32_to_d:
-; SSE2_64:       # BB#0:
+; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    cvtsi2sdl %edi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s32_to_d:
-; X87:       # BB#0:
+; X87:       # %bb.0:
 ; X87-NEXT:    pushl %eax
 ; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X87-NEXT:    movl %eax, (%esp)
@@ -211,7 +211,7 @@ define double @s32_to_d(i32 %a) nounwind
 
 define x86_fp80 @u32_to_x(i32 %a) nounwind {
 ; AVX512_32-LABEL: u32_to_x:
-; AVX512_32:       # BB#0:
+; AVX512_32:       # %bb.0:
 ; AVX512_32-NEXT:    pushl %ebp
 ; AVX512_32-NEXT:    movl %esp, %ebp
 ; AVX512_32-NEXT:    andl $-8, %esp
@@ -227,7 +227,7 @@ define x86_fp80 @u32_to_x(i32 %a) nounwi
 ; AVX512_32-NEXT:    retl
 ;
 ; AVX512_64-LABEL: u32_to_x:
-; AVX512_64:       # BB#0:
+; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX512_64-NEXT:    vmovd %edi, %xmm1
 ; AVX512_64-NEXT:    vpor %xmm0, %xmm1, %xmm1
@@ -237,7 +237,7 @@ define x86_fp80 @u32_to_x(i32 %a) nounwi
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: u32_to_x:
-; SSE2_32:       # BB#0:
+; SSE2_32:       # %bb.0:
 ; SSE2_32-NEXT:    pushl %ebp
 ; SSE2_32-NEXT:    movl %esp, %ebp
 ; SSE2_32-NEXT:    andl $-8, %esp
@@ -253,14 +253,14 @@ define x86_fp80 @u32_to_x(i32 %a) nounwi
 ; SSE2_32-NEXT:    retl
 ;
 ; SSE2_64-LABEL: u32_to_x:
-; SSE2_64:       # BB#0:
+; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    movl %edi, %eax
 ; SSE2_64-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
 ; SSE2_64-NEXT:    fildll -{{[0-9]+}}(%rsp)
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: u32_to_x:
-; X87:       # BB#0:
+; X87:       # %bb.0:
 ; X87-NEXT:    pushl %ebp
 ; X87-NEXT:    movl %esp, %ebp
 ; X87-NEXT:    andl $-8, %esp
@@ -278,7 +278,7 @@ define x86_fp80 @u32_to_x(i32 %a) nounwi
 
 define x86_fp80 @s32_to_x(i32 %a) nounwind {
 ; CHECK32-LABEL: s32_to_x:
-; CHECK32:       # BB#0:
+; CHECK32:       # %bb.0:
 ; CHECK32-NEXT:    pushl %eax
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK32-NEXT:    movl %eax, (%esp)
@@ -287,7 +287,7 @@ define x86_fp80 @s32_to_x(i32 %a) nounwi
 ; CHECK32-NEXT:    retl
 ;
 ; CHECK64-LABEL: s32_to_x:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl %edi, -{{[0-9]+}}(%rsp)
 ; CHECK64-NEXT:    fildl -{{[0-9]+}}(%rsp)
 ; CHECK64-NEXT:    retq
@@ -297,7 +297,7 @@ define x86_fp80 @s32_to_x(i32 %a) nounwi
 
 define float @u64_to_f(i64 %a) nounwind {
 ; AVX512_32-LABEL: u64_to_f:
-; AVX512_32:       # BB#0:
+; AVX512_32:       # %bb.0:
 ; AVX512_32-NEXT:    pushl %ebp
 ; AVX512_32-NEXT:    movl %esp, %ebp
 ; AVX512_32-NEXT:    andl $-8, %esp
@@ -318,12 +318,12 @@ define float @u64_to_f(i64 %a) nounwind
 ; AVX512_32-NEXT:    retl
 ;
 ; AVX512_64-LABEL: u64_to_f:
-; AVX512_64:       # BB#0:
+; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    vcvtusi2ssq %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: u64_to_f:
-; SSE2_32:       # BB#0:
+; SSE2_32:       # %bb.0:
 ; SSE2_32-NEXT:    pushl %ebp
 ; SSE2_32-NEXT:    movl %esp, %ebp
 ; SSE2_32-NEXT:    andl $-8, %esp
@@ -344,10 +344,10 @@ define float @u64_to_f(i64 %a) nounwind
 ; SSE2_32-NEXT:    retl
 ;
 ; SSE2_64-LABEL: u64_to_f:
-; SSE2_64:       # BB#0:
+; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    testq %rdi, %rdi
 ; SSE2_64-NEXT:    js .LBB6_1
-; SSE2_64-NEXT:  # BB#2:
+; SSE2_64-NEXT:  # %bb.2:
 ; SSE2_64-NEXT:    cvtsi2ssq %rdi, %xmm0
 ; SSE2_64-NEXT:    retq
 ; SSE2_64-NEXT:  .LBB6_1:
@@ -360,7 +360,7 @@ define float @u64_to_f(i64 %a) nounwind
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: u64_to_f:
-; X87:       # BB#0:
+; X87:       # %bb.0:
 ; X87-NEXT:    pushl %ebp
 ; X87-NEXT:    movl %esp, %ebp
 ; X87-NEXT:    andl $-8, %esp
@@ -385,7 +385,7 @@ define float @u64_to_f(i64 %a) nounwind
 
 define float @s64_to_f(i64 %a) nounwind {
 ; AVX512_32-LABEL: s64_to_f:
-; AVX512_32:       # BB#0:
+; AVX512_32:       # %bb.0:
 ; AVX512_32-NEXT:    pushl %eax
 ; AVX512_32-NEXT:    fildll {{[0-9]+}}(%esp)
 ; AVX512_32-NEXT:    fstps (%esp)
@@ -394,12 +394,12 @@ define float @s64_to_f(i64 %a) nounwind
 ; AVX512_32-NEXT:    retl
 ;
 ; AVX512_64-LABEL: s64_to_f:
-; AVX512_64:       # BB#0:
+; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: s64_to_f:
-; SSE2_32:       # BB#0:
+; SSE2_32:       # %bb.0:
 ; SSE2_32-NEXT:    pushl %eax
 ; SSE2_32-NEXT:    fildll {{[0-9]+}}(%esp)
 ; SSE2_32-NEXT:    fstps (%esp)
@@ -408,12 +408,12 @@ define float @s64_to_f(i64 %a) nounwind
 ; SSE2_32-NEXT:    retl
 ;
 ; SSE2_64-LABEL: s64_to_f:
-; SSE2_64:       # BB#0:
+; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    cvtsi2ssq %rdi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s64_to_f:
-; X87:       # BB#0:
+; X87:       # %bb.0:
 ; X87-NEXT:    fildll {{[0-9]+}}(%esp)
 ; X87-NEXT:    retl
   %r = sitofp i64 %a to float
@@ -422,7 +422,7 @@ define float @s64_to_f(i64 %a) nounwind
 
 define float @s64_to_f_2(i64 %a) nounwind {
 ; AVX512_32-LABEL: s64_to_f_2:
-; AVX512_32:       # BB#0:
+; AVX512_32:       # %bb.0:
 ; AVX512_32-NEXT:    pushl %ebp
 ; AVX512_32-NEXT:    movl %esp, %ebp
 ; AVX512_32-NEXT:    andl $-8, %esp
@@ -442,13 +442,13 @@ define float @s64_to_f_2(i64 %a) nounwin
 ; AVX512_32-NEXT:    retl
 ;
 ; AVX512_64-LABEL: s64_to_f_2:
-; AVX512_64:       # BB#0:
+; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    addq $5, %rdi
 ; AVX512_64-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: s64_to_f_2:
-; SSE2_32:       # BB#0:
+; SSE2_32:       # %bb.0:
 ; SSE2_32-NEXT:    pushl %ebp
 ; SSE2_32-NEXT:    movl %esp, %ebp
 ; SSE2_32-NEXT:    andl $-8, %esp
@@ -469,13 +469,13 @@ define float @s64_to_f_2(i64 %a) nounwin
 ; SSE2_32-NEXT:    retl
 ;
 ; SSE2_64-LABEL: s64_to_f_2:
-; SSE2_64:       # BB#0:
+; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    addq $5, %rdi
 ; SSE2_64-NEXT:    cvtsi2ssq %rdi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s64_to_f_2:
-; X87:       # BB#0:
+; X87:       # %bb.0:
 ; X87-NEXT:    pushl %ebp
 ; X87-NEXT:    movl %esp, %ebp
 ; X87-NEXT:    andl $-8, %esp
@@ -497,7 +497,7 @@ define float @s64_to_f_2(i64 %a) nounwin
 
 define double @u64_to_d(i64 %a) nounwind {
 ; AVX512_32-LABEL: u64_to_d:
-; AVX512_32:       # BB#0:
+; AVX512_32:       # %bb.0:
 ; AVX512_32-NEXT:    pushl %ebp
 ; AVX512_32-NEXT:    movl %esp, %ebp
 ; AVX512_32-NEXT:    andl $-8, %esp
@@ -513,12 +513,12 @@ define double @u64_to_d(i64 %a) nounwind
 ; AVX512_32-NEXT:    retl
 ;
 ; AVX512_64-LABEL: u64_to_d:
-; AVX512_64:       # BB#0:
+; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    vcvtusi2sdq %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: u64_to_d:
-; SSE2_32:       # BB#0:
+; SSE2_32:       # %bb.0:
 ; SSE2_32-NEXT:    pushl %ebp
 ; SSE2_32-NEXT:    movl %esp, %ebp
 ; SSE2_32-NEXT:    andl $-8, %esp
@@ -535,7 +535,7 @@ define double @u64_to_d(i64 %a) nounwind
 ; SSE2_32-NEXT:    retl
 ;
 ; SSE2_64-LABEL: u64_to_d:
-; SSE2_64:       # BB#0:
+; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    movq %rdi, %xmm1
 ; SSE2_64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
 ; SSE2_64-NEXT:    subpd {{.*}}(%rip), %xmm1
@@ -544,7 +544,7 @@ define double @u64_to_d(i64 %a) nounwind
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: u64_to_d:
-; X87:       # BB#0:
+; X87:       # %bb.0:
 ; X87-NEXT:    pushl %ebp
 ; X87-NEXT:    movl %esp, %ebp
 ; X87-NEXT:    andl $-8, %esp
@@ -569,7 +569,7 @@ define double @u64_to_d(i64 %a) nounwind
 
 define double @s64_to_d(i64 %a) nounwind {
 ; AVX512_32-LABEL: s64_to_d:
-; AVX512_32:       # BB#0:
+; AVX512_32:       # %bb.0:
 ; AVX512_32-NEXT:    pushl %ebp
 ; AVX512_32-NEXT:    movl %esp, %ebp
 ; AVX512_32-NEXT:    andl $-8, %esp
@@ -582,12 +582,12 @@ define double @s64_to_d(i64 %a) nounwind
 ; AVX512_32-NEXT:    retl
 ;
 ; AVX512_64-LABEL: s64_to_d:
-; AVX512_64:       # BB#0:
+; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: s64_to_d:
-; SSE2_32:       # BB#0:
+; SSE2_32:       # %bb.0:
 ; SSE2_32-NEXT:    pushl %ebp
 ; SSE2_32-NEXT:    movl %esp, %ebp
 ; SSE2_32-NEXT:    andl $-8, %esp
@@ -600,12 +600,12 @@ define double @s64_to_d(i64 %a) nounwind
 ; SSE2_32-NEXT:    retl
 ;
 ; SSE2_64-LABEL: s64_to_d:
-; SSE2_64:       # BB#0:
+; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    cvtsi2sdq %rdi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s64_to_d:
-; X87:       # BB#0:
+; X87:       # %bb.0:
 ; X87-NEXT:    fildll {{[0-9]+}}(%esp)
 ; X87-NEXT:    retl
   %r = sitofp i64 %a to double
@@ -614,7 +614,7 @@ define double @s64_to_d(i64 %a) nounwind
 
 define double @s64_to_d_2(i64 %a) nounwind {
 ; AVX512_32-LABEL: s64_to_d_2:
-; AVX512_32:       # BB#0:
+; AVX512_32:       # %bb.0:
 ; AVX512_32-NEXT:    pushl %ebp
 ; AVX512_32-NEXT:    movl %esp, %ebp
 ; AVX512_32-NEXT:    andl $-8, %esp
@@ -634,13 +634,13 @@ define double @s64_to_d_2(i64 %a) nounwi
 ; AVX512_32-NEXT:    retl
 ;
 ; AVX512_64-LABEL: s64_to_d_2:
-; AVX512_64:       # BB#0:
+; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    addq $5, %rdi
 ; AVX512_64-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: s64_to_d_2:
-; SSE2_32:       # BB#0:
+; SSE2_32:       # %bb.0:
 ; SSE2_32-NEXT:    pushl %ebp
 ; SSE2_32-NEXT:    movl %esp, %ebp
 ; SSE2_32-NEXT:    andl $-8, %esp
@@ -661,13 +661,13 @@ define double @s64_to_d_2(i64 %a) nounwi
 ; SSE2_32-NEXT:    retl
 ;
 ; SSE2_64-LABEL: s64_to_d_2:
-; SSE2_64:       # BB#0:
+; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    addq $5, %rdi
 ; SSE2_64-NEXT:    cvtsi2sdq %rdi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s64_to_d_2:
-; X87:       # BB#0:
+; X87:       # %bb.0:
 ; X87-NEXT:    pushl %ebp
 ; X87-NEXT:    movl %esp, %ebp
 ; X87-NEXT:    andl $-8, %esp
@@ -689,7 +689,7 @@ define double @s64_to_d_2(i64 %a) nounwi
 
 define x86_fp80 @u64_to_x(i64 %a) nounwind {
 ; CHECK32-LABEL: u64_to_x:
-; CHECK32:       # BB#0:
+; CHECK32:       # %bb.0:
 ; CHECK32-NEXT:    pushl %ebp
 ; CHECK32-NEXT:    movl %esp, %ebp
 ; CHECK32-NEXT:    andl $-8, %esp
@@ -708,7 +708,7 @@ define x86_fp80 @u64_to_x(i64 %a) nounwi
 ; CHECK32-NEXT:    retl
 ;
 ; CHECK64-LABEL: u64_to_x:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movq %rdi, -{{[0-9]+}}(%rsp)
 ; CHECK64-NEXT:    xorl %eax, %eax
 ; CHECK64-NEXT:    testq %rdi, %rdi
@@ -722,12 +722,12 @@ define x86_fp80 @u64_to_x(i64 %a) nounwi
 
 define x86_fp80 @s64_to_x(i64 %a) nounwind {
 ; CHECK32-LABEL: s64_to_x:
-; CHECK32:       # BB#0:
+; CHECK32:       # %bb.0:
 ; CHECK32-NEXT:    fildll {{[0-9]+}}(%esp)
 ; CHECK32-NEXT:    retl
 ;
 ; CHECK64-LABEL: s64_to_x:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movq %rdi, -{{[0-9]+}}(%rsp)
 ; CHECK64-NEXT:    fildll -{{[0-9]+}}(%rsp)
 ; CHECK64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/scatter-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/scatter-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/scatter-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/scatter-schedule.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gn
 
 define void @test(i64 %x272, <16 x i32*> %x335, <16 x i32> %x270) {
 ; CHECK-LABEL: test:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k2
 ; CHECK-NEXT:    vpscatterqd %ymm2, (,%zmm0) {%k2}

Modified: llvm/trunk/test/CodeGen/X86/schedule-x86_32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/schedule-x86_32.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/schedule-x86_32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/schedule-x86_32.ll Mon Dec  4 09:18:51 2017
@@ -13,7 +13,7 @@
 
 define i8 @test_aaa(i8 %a0) optsize {
 ; GENERIC-LABEL: test_aaa:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    aaa
@@ -21,7 +21,7 @@ define i8 @test_aaa(i8 %a0) optsize {
 ; GENERIC-NEXT:    retl
 ;
 ; ATOM-LABEL: test_aaa:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [1:1.00]
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    aaa # sched: [13:6.50]
@@ -29,7 +29,7 @@ define i8 @test_aaa(i8 %a0) optsize {
 ; ATOM-NEXT:    retl # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_aaa:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [3:1.00]
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    aaa # sched: [100:1.00]
@@ -37,7 +37,7 @@ define i8 @test_aaa(i8 %a0) optsize {
 ; SLM-NEXT:    retl # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_aaa:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    aaa # sched: [100:0.33]
@@ -45,7 +45,7 @@ define i8 @test_aaa(i8 %a0) optsize {
 ; SANDY-NEXT:    retl # sched: [5:1.00]
 ;
 ; HASWELL-LABEL: test_aaa:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [1:0.50]
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    aaa # sched: [100:0.25]
@@ -53,7 +53,7 @@ define i8 @test_aaa(i8 %a0) optsize {
 ; HASWELL-NEXT:    retl # sched: [5:0.50]
 ;
 ; BROADWELL-LABEL: test_aaa:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    aaa # sched: [100:0.25]
@@ -61,7 +61,7 @@ define i8 @test_aaa(i8 %a0) optsize {
 ; BROADWELL-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKYLAKE-LABEL: test_aaa:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    aaa # sched: [100:0.25]
@@ -69,7 +69,7 @@ define i8 @test_aaa(i8 %a0) optsize {
 ; SKYLAKE-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKX-LABEL: test_aaa:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    aaa # sched: [100:0.25]
@@ -77,7 +77,7 @@ define i8 @test_aaa(i8 %a0) optsize {
 ; SKX-NEXT:    retl # sched: [6:0.50]
 ;
 ; BTVER2-LABEL: test_aaa:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:1.00]
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    aaa # sched: [100:0.17]
@@ -85,7 +85,7 @@ define i8 @test_aaa(i8 %a0) optsize {
 ; BTVER2-NEXT:    retl # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_aaa:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [8:0.50]
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    aaa # sched: [100:?]
@@ -97,7 +97,7 @@ define i8 @test_aaa(i8 %a0) optsize {
 
 define i8 @test_aad(i16 %a0) optsize {
 ; GENERIC-LABEL: test_aad:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    aad
@@ -105,7 +105,7 @@ define i8 @test_aad(i16 %a0) optsize {
 ; GENERIC-NEXT:    retl
 ;
 ; ATOM-LABEL: test_aad:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # sched: [1:1.00]
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    aad # sched: [7:3.50]
@@ -113,7 +113,7 @@ define i8 @test_aad(i16 %a0) optsize {
 ; ATOM-NEXT:    retl # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_aad:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # sched: [4:1.00]
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    aad # sched: [100:1.00]
@@ -121,7 +121,7 @@ define i8 @test_aad(i16 %a0) optsize {
 ; SLM-NEXT:    retl # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_aad:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    aad # sched: [100:0.33]
@@ -129,7 +129,7 @@ define i8 @test_aad(i16 %a0) optsize {
 ; SANDY-NEXT:    retl # sched: [5:1.00]
 ;
 ; HASWELL-LABEL: test_aad:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # sched: [4:0.50]
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    aad # sched: [100:0.25]
@@ -137,7 +137,7 @@ define i8 @test_aad(i16 %a0) optsize {
 ; HASWELL-NEXT:    retl # sched: [5:0.50]
 ;
 ; BROADWELL-LABEL: test_aad:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    aad # sched: [100:0.25]
@@ -145,7 +145,7 @@ define i8 @test_aad(i16 %a0) optsize {
 ; BROADWELL-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKYLAKE-LABEL: test_aad:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    aad # sched: [100:0.25]
@@ -153,7 +153,7 @@ define i8 @test_aad(i16 %a0) optsize {
 ; SKYLAKE-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKX-LABEL: test_aad:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # sched: [5:0.50]
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    aad # sched: [100:0.25]
@@ -161,7 +161,7 @@ define i8 @test_aad(i16 %a0) optsize {
 ; SKX-NEXT:    retl # sched: [6:0.50]
 ;
 ; BTVER2-LABEL: test_aad:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # sched: [4:1.00]
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    aad # sched: [100:0.17]
@@ -169,7 +169,7 @@ define i8 @test_aad(i16 %a0) optsize {
 ; BTVER2-NEXT:    retl # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_aad:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # sched: [8:0.50]
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    aad # sched: [100:?]
@@ -181,7 +181,7 @@ define i8 @test_aad(i16 %a0) optsize {
 
 define i16 @test_aam(i8 %a0) optsize {
 ; GENERIC-LABEL: test_aam:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    aam
@@ -189,7 +189,7 @@ define i16 @test_aam(i8 %a0) optsize {
 ; GENERIC-NEXT:    retl
 ;
 ; ATOM-LABEL: test_aam:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [1:1.00]
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    aam # sched: [21:10.50]
@@ -197,7 +197,7 @@ define i16 @test_aam(i8 %a0) optsize {
 ; ATOM-NEXT:    retl # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_aam:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [3:1.00]
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    aam # sched: [100:1.00]
@@ -205,7 +205,7 @@ define i16 @test_aam(i8 %a0) optsize {
 ; SLM-NEXT:    retl # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_aam:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    aam # sched: [100:0.33]
@@ -213,7 +213,7 @@ define i16 @test_aam(i8 %a0) optsize {
 ; SANDY-NEXT:    retl # sched: [5:1.00]
 ;
 ; HASWELL-LABEL: test_aam:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [1:0.50]
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    aam # sched: [100:0.25]
@@ -221,7 +221,7 @@ define i16 @test_aam(i8 %a0) optsize {
 ; HASWELL-NEXT:    retl # sched: [5:0.50]
 ;
 ; BROADWELL-LABEL: test_aam:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    aam # sched: [100:0.25]
@@ -229,7 +229,7 @@ define i16 @test_aam(i8 %a0) optsize {
 ; BROADWELL-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKYLAKE-LABEL: test_aam:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    aam # sched: [100:0.25]
@@ -237,7 +237,7 @@ define i16 @test_aam(i8 %a0) optsize {
 ; SKYLAKE-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKX-LABEL: test_aam:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    aam # sched: [100:0.25]
@@ -245,7 +245,7 @@ define i16 @test_aam(i8 %a0) optsize {
 ; SKX-NEXT:    retl # sched: [6:0.50]
 ;
 ; BTVER2-LABEL: test_aam:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:1.00]
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    aam # sched: [100:0.17]
@@ -253,7 +253,7 @@ define i16 @test_aam(i8 %a0) optsize {
 ; BTVER2-NEXT:    retl # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_aam:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [8:0.50]
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    aam # sched: [100:?]
@@ -265,7 +265,7 @@ define i16 @test_aam(i8 %a0) optsize {
 
 define i8 @test_aas(i8 %a0) optsize {
 ; GENERIC-LABEL: test_aas:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    aas
@@ -273,7 +273,7 @@ define i8 @test_aas(i8 %a0) optsize {
 ; GENERIC-NEXT:    retl
 ;
 ; ATOM-LABEL: test_aas:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [1:1.00]
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    aas # sched: [13:6.50]
@@ -281,7 +281,7 @@ define i8 @test_aas(i8 %a0) optsize {
 ; ATOM-NEXT:    retl # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_aas:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [3:1.00]
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    aas # sched: [100:1.00]
@@ -289,7 +289,7 @@ define i8 @test_aas(i8 %a0) optsize {
 ; SLM-NEXT:    retl # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_aas:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    aas # sched: [100:0.33]
@@ -297,7 +297,7 @@ define i8 @test_aas(i8 %a0) optsize {
 ; SANDY-NEXT:    retl # sched: [5:1.00]
 ;
 ; HASWELL-LABEL: test_aas:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [1:0.50]
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    aas # sched: [100:0.25]
@@ -305,7 +305,7 @@ define i8 @test_aas(i8 %a0) optsize {
 ; HASWELL-NEXT:    retl # sched: [5:0.50]
 ;
 ; BROADWELL-LABEL: test_aas:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    aas # sched: [100:0.25]
@@ -313,7 +313,7 @@ define i8 @test_aas(i8 %a0) optsize {
 ; BROADWELL-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKYLAKE-LABEL: test_aas:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    aas # sched: [100:0.25]
@@ -321,7 +321,7 @@ define i8 @test_aas(i8 %a0) optsize {
 ; SKYLAKE-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKX-LABEL: test_aas:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    aas # sched: [100:0.25]
@@ -329,7 +329,7 @@ define i8 @test_aas(i8 %a0) optsize {
 ; SKX-NEXT:    retl # sched: [6:0.50]
 ;
 ; BTVER2-LABEL: test_aas:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:1.00]
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    aas # sched: [100:0.17]
@@ -337,7 +337,7 @@ define i8 @test_aas(i8 %a0) optsize {
 ; BTVER2-NEXT:    retl # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_aas:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [8:0.50]
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    aas # sched: [100:?]
@@ -351,7 +351,7 @@ define i8 @test_aas(i8 %a0) optsize {
 
 define i8 @test_daa(i8 %a0) optsize {
 ; GENERIC-LABEL: test_daa:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    daa
@@ -359,7 +359,7 @@ define i8 @test_daa(i8 %a0) optsize {
 ; GENERIC-NEXT:    retl
 ;
 ; ATOM-LABEL: test_daa:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [1:1.00]
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    daa # sched: [18:9.00]
@@ -367,7 +367,7 @@ define i8 @test_daa(i8 %a0) optsize {
 ; ATOM-NEXT:    retl # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_daa:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [3:1.00]
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    daa # sched: [100:1.00]
@@ -375,7 +375,7 @@ define i8 @test_daa(i8 %a0) optsize {
 ; SLM-NEXT:    retl # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_daa:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    daa # sched: [100:0.33]
@@ -383,7 +383,7 @@ define i8 @test_daa(i8 %a0) optsize {
 ; SANDY-NEXT:    retl # sched: [5:1.00]
 ;
 ; HASWELL-LABEL: test_daa:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [1:0.50]
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    daa # sched: [100:0.25]
@@ -391,7 +391,7 @@ define i8 @test_daa(i8 %a0) optsize {
 ; HASWELL-NEXT:    retl # sched: [5:0.50]
 ;
 ; BROADWELL-LABEL: test_daa:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    daa # sched: [100:0.25]
@@ -399,7 +399,7 @@ define i8 @test_daa(i8 %a0) optsize {
 ; BROADWELL-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKYLAKE-LABEL: test_daa:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    daa # sched: [100:0.25]
@@ -407,7 +407,7 @@ define i8 @test_daa(i8 %a0) optsize {
 ; SKYLAKE-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKX-LABEL: test_daa:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    daa # sched: [100:0.25]
@@ -415,7 +415,7 @@ define i8 @test_daa(i8 %a0) optsize {
 ; SKX-NEXT:    retl # sched: [6:0.50]
 ;
 ; BTVER2-LABEL: test_daa:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:1.00]
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    daa # sched: [100:0.17]
@@ -423,7 +423,7 @@ define i8 @test_daa(i8 %a0) optsize {
 ; BTVER2-NEXT:    retl # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_daa:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [8:0.50]
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    daa # sched: [100:?]
@@ -435,7 +435,7 @@ define i8 @test_daa(i8 %a0) optsize {
 
 define i8 @test_das(i8 %a0) optsize {
 ; GENERIC-LABEL: test_das:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    das
@@ -443,7 +443,7 @@ define i8 @test_das(i8 %a0) optsize {
 ; GENERIC-NEXT:    retl
 ;
 ; ATOM-LABEL: test_das:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [1:1.00]
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    das # sched: [20:10.00]
@@ -451,7 +451,7 @@ define i8 @test_das(i8 %a0) optsize {
 ; ATOM-NEXT:    retl # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_das:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [3:1.00]
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    das # sched: [100:1.00]
@@ -459,7 +459,7 @@ define i8 @test_das(i8 %a0) optsize {
 ; SLM-NEXT:    retl # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_das:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    das # sched: [100:0.33]
@@ -467,7 +467,7 @@ define i8 @test_das(i8 %a0) optsize {
 ; SANDY-NEXT:    retl # sched: [5:1.00]
 ;
 ; HASWELL-LABEL: test_das:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [1:0.50]
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    das # sched: [100:0.25]
@@ -475,7 +475,7 @@ define i8 @test_das(i8 %a0) optsize {
 ; HASWELL-NEXT:    retl # sched: [5:0.50]
 ;
 ; BROADWELL-LABEL: test_das:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    das # sched: [100:0.25]
@@ -483,7 +483,7 @@ define i8 @test_das(i8 %a0) optsize {
 ; BROADWELL-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKYLAKE-LABEL: test_das:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    das # sched: [100:0.25]
@@ -491,7 +491,7 @@ define i8 @test_das(i8 %a0) optsize {
 ; SKYLAKE-NEXT:    retl # sched: [6:0.50]
 ;
 ; SKX-LABEL: test_das:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:0.50]
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    das # sched: [100:0.25]
@@ -499,7 +499,7 @@ define i8 @test_das(i8 %a0) optsize {
 ; SKX-NEXT:    retl # sched: [6:0.50]
 ;
 ; BTVER2-LABEL: test_das:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [5:1.00]
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    das # sched: [100:0.17]
@@ -507,7 +507,7 @@ define i8 @test_das(i8 %a0) optsize {
 ; BTVER2-NEXT:    retl # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_das:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    movb {{[0-9]+}}(%esp), %al # sched: [8:0.50]
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    das # sched: [100:?]

Modified: llvm/trunk/test/CodeGen/X86/schedule-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/schedule-x86_64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/schedule-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/schedule-x86_64.ll Mon Dec  4 09:18:51 2017
@@ -17,7 +17,7 @@
 
 define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
 ; GENERIC-LABEL: test_bsf16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    bsfw %di, %ax # sched: [3:1.00]
 ; GENERIC-NEXT:    bsfw (%rsi), %cx # sched: [8:1.00]
@@ -27,7 +27,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_bsf16:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    bsfw %di, %ax # sched: [16:8.00]
 ; ATOM-NEXT:    bsfw (%rsi), %cx # sched: [16:8.00]
@@ -37,7 +37,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_bsf16:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    bsfw %di, %ax # sched: [1:1.00]
 ; SLM-NEXT:    bsfw (%rsi), %cx # sched: [4:1.00]
@@ -47,7 +47,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_bsf16:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    bsfw %di, %ax # sched: [3:1.00]
 ; SANDY-NEXT:    bsfw (%rsi), %cx # sched: [8:1.00]
@@ -57,7 +57,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_bsf16:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    bsfw %di, %ax # sched: [3:1.00]
 ; HASWELL-NEXT:    bsfw (%rsi), %cx # sched: [3:1.00]
@@ -67,7 +67,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_bsf16:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    bsfw %di, %ax # sched: [3:1.00]
 ; BROADWELL-NEXT:    bsfw (%rsi), %cx # sched: [8:1.00]
@@ -77,7 +77,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_bsf16:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    bsfw %di, %ax # sched: [3:1.00]
 ; SKYLAKE-NEXT:    bsfw (%rsi), %cx # sched: [8:1.00]
@@ -87,7 +87,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_bsf16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    bsfw %di, %ax # sched: [3:1.00]
 ; SKX-NEXT:    bsfw (%rsi), %cx # sched: [8:1.00]
@@ -97,7 +97,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_bsf16:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    bsfw %di, %ax # sched: [1:0.50]
 ; BTVER2-NEXT:    bsfw (%rsi), %cx # sched: [4:1.00]
@@ -107,7 +107,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_bsf16:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    bsfw %di, %ax # sched: [3:0.25]
 ; ZNVER1-NEXT:    bsfw (%rsi), %cx # sched: [7:0.50]
@@ -123,7 +123,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1
 }
 define i32 @test_bsf32(i32 %a0, i32* %a1) optsize {
 ; GENERIC-LABEL: test_bsf32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    bsfl %edi, %eax # sched: [3:1.00]
 ; GENERIC-NEXT:    bsfl (%rsi), %ecx # sched: [8:1.00]
@@ -132,7 +132,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_bsf32:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    bsfl %edi, %eax # sched: [16:8.00]
 ; ATOM-NEXT:    bsfl (%rsi), %ecx # sched: [16:8.00]
@@ -141,7 +141,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_bsf32:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    bsfl %edi, %eax # sched: [1:1.00]
 ; SLM-NEXT:    bsfl (%rsi), %ecx # sched: [4:1.00]
@@ -150,7 +150,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_bsf32:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    bsfl %edi, %eax # sched: [3:1.00]
 ; SANDY-NEXT:    bsfl (%rsi), %ecx # sched: [8:1.00]
@@ -159,7 +159,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_bsf32:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    bsfl %edi, %eax # sched: [3:1.00]
 ; HASWELL-NEXT:    bsfl (%rsi), %ecx # sched: [3:1.00]
@@ -168,7 +168,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_bsf32:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    bsfl %edi, %eax # sched: [3:1.00]
 ; BROADWELL-NEXT:    bsfl (%rsi), %ecx # sched: [8:1.00]
@@ -177,7 +177,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_bsf32:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    bsfl %edi, %eax # sched: [3:1.00]
 ; SKYLAKE-NEXT:    bsfl (%rsi), %ecx # sched: [8:1.00]
@@ -186,7 +186,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_bsf32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    bsfl %edi, %eax # sched: [3:1.00]
 ; SKX-NEXT:    bsfl (%rsi), %ecx # sched: [8:1.00]
@@ -195,7 +195,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_bsf32:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    bsfl %edi, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    bsfl (%rsi), %ecx # sched: [4:1.00]
@@ -204,7 +204,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_bsf32:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    bsfl %edi, %eax # sched: [3:0.25]
 ; ZNVER1-NEXT:    bsfl (%rsi), %ecx # sched: [7:0.50]
@@ -219,7 +219,7 @@ define i32 @test_bsf32(i32 %a0, i32* %a1
 }
 define i64 @test_bsf64(i64 %a0, i64* %a1) optsize {
 ; GENERIC-LABEL: test_bsf64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    bsfq %rdi, %rax # sched: [3:1.00]
 ; GENERIC-NEXT:    bsfq (%rsi), %rcx # sched: [8:1.00]
@@ -228,7 +228,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_bsf64:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    bsfq %rdi, %rax # sched: [16:8.00]
 ; ATOM-NEXT:    bsfq (%rsi), %rcx # sched: [16:8.00]
@@ -237,7 +237,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_bsf64:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    bsfq %rdi, %rax # sched: [1:1.00]
 ; SLM-NEXT:    bsfq (%rsi), %rcx # sched: [4:1.00]
@@ -246,7 +246,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_bsf64:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    bsfq %rdi, %rax # sched: [3:1.00]
 ; SANDY-NEXT:    bsfq (%rsi), %rcx # sched: [8:1.00]
@@ -255,7 +255,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_bsf64:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    bsfq %rdi, %rax # sched: [3:1.00]
 ; HASWELL-NEXT:    bsfq (%rsi), %rcx # sched: [3:1.00]
@@ -264,7 +264,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_bsf64:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    bsfq %rdi, %rax # sched: [3:1.00]
 ; BROADWELL-NEXT:    bsfq (%rsi), %rcx # sched: [8:1.00]
@@ -273,7 +273,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_bsf64:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    bsfq %rdi, %rax # sched: [3:1.00]
 ; SKYLAKE-NEXT:    bsfq (%rsi), %rcx # sched: [8:1.00]
@@ -282,7 +282,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_bsf64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    bsfq %rdi, %rax # sched: [3:1.00]
 ; SKX-NEXT:    bsfq (%rsi), %rcx # sched: [8:1.00]
@@ -291,7 +291,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_bsf64:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    bsfq %rdi, %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    bsfq (%rsi), %rcx # sched: [4:1.00]
@@ -300,7 +300,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_bsf64:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    bsfq %rdi, %rax # sched: [3:0.25]
 ; ZNVER1-NEXT:    bsfq (%rsi), %rcx # sched: [7:0.50]
@@ -316,7 +316,7 @@ define i64 @test_bsf64(i64 %a0, i64* %a1
 
 define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
 ; GENERIC-LABEL: test_bsr16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    bsrw %di, %ax # sched: [3:1.00]
 ; GENERIC-NEXT:    bsrw (%rsi), %cx # sched: [8:1.00]
@@ -326,7 +326,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_bsr16:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    bsrw %di, %ax # sched: [16:8.00]
 ; ATOM-NEXT:    bsrw (%rsi), %cx # sched: [16:8.00]
@@ -336,7 +336,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_bsr16:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    bsrw %di, %ax # sched: [1:1.00]
 ; SLM-NEXT:    bsrw (%rsi), %cx # sched: [4:1.00]
@@ -346,7 +346,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_bsr16:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    bsrw %di, %ax # sched: [3:1.00]
 ; SANDY-NEXT:    bsrw (%rsi), %cx # sched: [8:1.00]
@@ -356,7 +356,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_bsr16:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    bsrw %di, %ax # sched: [3:1.00]
 ; HASWELL-NEXT:    bsrw (%rsi), %cx # sched: [3:1.00]
@@ -366,7 +366,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_bsr16:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    bsrw %di, %ax # sched: [3:1.00]
 ; BROADWELL-NEXT:    bsrw (%rsi), %cx # sched: [8:1.00]
@@ -376,7 +376,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_bsr16:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    bsrw %di, %ax # sched: [3:1.00]
 ; SKYLAKE-NEXT:    bsrw (%rsi), %cx # sched: [8:1.00]
@@ -386,7 +386,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_bsr16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    bsrw %di, %ax # sched: [3:1.00]
 ; SKX-NEXT:    bsrw (%rsi), %cx # sched: [8:1.00]
@@ -396,7 +396,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_bsr16:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    bsrw %di, %ax # sched: [1:0.50]
 ; BTVER2-NEXT:    bsrw (%rsi), %cx # sched: [4:1.00]
@@ -406,7 +406,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_bsr16:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    bsrw %di, %ax # sched: [3:0.25]
 ; ZNVER1-NEXT:    bsrw (%rsi), %cx # sched: [7:0.50]
@@ -422,7 +422,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1
 }
 define i32 @test_bsr32(i32 %a0, i32* %a1) optsize {
 ; GENERIC-LABEL: test_bsr32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    bsrl %edi, %eax # sched: [3:1.00]
 ; GENERIC-NEXT:    bsrl (%rsi), %ecx # sched: [8:1.00]
@@ -431,7 +431,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_bsr32:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    bsrl %edi, %eax # sched: [16:8.00]
 ; ATOM-NEXT:    bsrl (%rsi), %ecx # sched: [16:8.00]
@@ -440,7 +440,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_bsr32:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    bsrl %edi, %eax # sched: [1:1.00]
 ; SLM-NEXT:    bsrl (%rsi), %ecx # sched: [4:1.00]
@@ -449,7 +449,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_bsr32:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    bsrl %edi, %eax # sched: [3:1.00]
 ; SANDY-NEXT:    bsrl (%rsi), %ecx # sched: [8:1.00]
@@ -458,7 +458,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_bsr32:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    bsrl %edi, %eax # sched: [3:1.00]
 ; HASWELL-NEXT:    bsrl (%rsi), %ecx # sched: [3:1.00]
@@ -467,7 +467,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_bsr32:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    bsrl %edi, %eax # sched: [3:1.00]
 ; BROADWELL-NEXT:    bsrl (%rsi), %ecx # sched: [8:1.00]
@@ -476,7 +476,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_bsr32:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    bsrl %edi, %eax # sched: [3:1.00]
 ; SKYLAKE-NEXT:    bsrl (%rsi), %ecx # sched: [8:1.00]
@@ -485,7 +485,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_bsr32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    bsrl %edi, %eax # sched: [3:1.00]
 ; SKX-NEXT:    bsrl (%rsi), %ecx # sched: [8:1.00]
@@ -494,7 +494,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_bsr32:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    bsrl %edi, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    bsrl (%rsi), %ecx # sched: [4:1.00]
@@ -503,7 +503,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_bsr32:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    bsrl %edi, %eax # sched: [3:0.25]
 ; ZNVER1-NEXT:    bsrl (%rsi), %ecx # sched: [7:0.50]
@@ -518,7 +518,7 @@ define i32 @test_bsr32(i32 %a0, i32* %a1
 }
 define i64 @test_bsr64(i64 %a0, i64* %a1) optsize {
 ; GENERIC-LABEL: test_bsr64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    bsrq %rdi, %rax # sched: [3:1.00]
 ; GENERIC-NEXT:    bsrq (%rsi), %rcx # sched: [8:1.00]
@@ -527,7 +527,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_bsr64:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    bsrq %rdi, %rax # sched: [16:8.00]
 ; ATOM-NEXT:    bsrq (%rsi), %rcx # sched: [16:8.00]
@@ -536,7 +536,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_bsr64:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    bsrq %rdi, %rax # sched: [1:1.00]
 ; SLM-NEXT:    bsrq (%rsi), %rcx # sched: [4:1.00]
@@ -545,7 +545,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_bsr64:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    bsrq %rdi, %rax # sched: [3:1.00]
 ; SANDY-NEXT:    bsrq (%rsi), %rcx # sched: [8:1.00]
@@ -554,7 +554,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_bsr64:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    bsrq %rdi, %rax # sched: [3:1.00]
 ; HASWELL-NEXT:    bsrq (%rsi), %rcx # sched: [3:1.00]
@@ -563,7 +563,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_bsr64:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    bsrq %rdi, %rax # sched: [3:1.00]
 ; BROADWELL-NEXT:    bsrq (%rsi), %rcx # sched: [8:1.00]
@@ -572,7 +572,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_bsr64:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    bsrq %rdi, %rax # sched: [3:1.00]
 ; SKYLAKE-NEXT:    bsrq (%rsi), %rcx # sched: [8:1.00]
@@ -581,7 +581,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_bsr64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    bsrq %rdi, %rax # sched: [3:1.00]
 ; SKX-NEXT:    bsrq (%rsi), %rcx # sched: [8:1.00]
@@ -590,7 +590,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_bsr64:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    bsrq %rdi, %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    bsrq (%rsi), %rcx # sched: [4:1.00]
@@ -599,7 +599,7 @@ define i64 @test_bsr64(i64 %a0, i64* %a1
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_bsr64:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    bsrq %rdi, %rax # sched: [3:0.25]
 ; ZNVER1-NEXT:    bsrq (%rsi), %rcx # sched: [7:0.50]
@@ -615,61 +615,61 @@ define i64 @test_bsr64(i64 %a0, i64* %a1
 
 define i32 @test_bswap32(i32 %a0) optsize {
 ; GENERIC-LABEL: test_bswap32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    bswapl %edi # sched: [2:1.00]
 ; GENERIC-NEXT:    movl %edi, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_bswap32:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    bswapl %edi # sched: [1:1.00]
 ; ATOM-NEXT:    movl %edi, %eax # sched: [1:0.50]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_bswap32:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    bswapl %edi # sched: [1:0.50]
 ; SLM-NEXT:    movl %edi, %eax # sched: [1:0.50]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_bswap32:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    bswapl %edi # sched: [2:1.00]
 ; SANDY-NEXT:    movl %edi, %eax # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_bswap32:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    bswapl %edi # sched: [2:0.50]
 ; HASWELL-NEXT:    movl %edi, %eax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_bswap32:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    bswapl %edi # sched: [2:0.50]
 ; BROADWELL-NEXT:    movl %edi, %eax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_bswap32:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    bswapl %edi # sched: [2:0.50]
 ; SKYLAKE-NEXT:    movl %edi, %eax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_bswap32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    bswapl %edi # sched: [2:0.50]
 ; SKX-NEXT:    movl %edi, %eax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_bswap32:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    bswapl %edi # sched: [1:0.50]
 ; BTVER2-NEXT:    movl %edi, %eax # sched: [1:0.17]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_bswap32:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    bswapl %edi # sched: [1:1.00]
 ; ZNVER1-NEXT:    movl %edi, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -678,61 +678,61 @@ define i32 @test_bswap32(i32 %a0) optsiz
 }
 define i64 @test_bswap64(i64 %a0) optsize {
 ; GENERIC-LABEL: test_bswap64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    bswapq %rdi # sched: [2:1.00]
 ; GENERIC-NEXT:    movq %rdi, %rax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_bswap64:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    bswapq %rdi # sched: [1:1.00]
 ; ATOM-NEXT:    movq %rdi, %rax # sched: [1:0.50]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_bswap64:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    bswapq %rdi # sched: [1:0.50]
 ; SLM-NEXT:    movq %rdi, %rax # sched: [1:0.50]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_bswap64:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    bswapq %rdi # sched: [2:1.00]
 ; SANDY-NEXT:    movq %rdi, %rax # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_bswap64:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    bswapq %rdi # sched: [2:0.50]
 ; HASWELL-NEXT:    movq %rdi, %rax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_bswap64:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    bswapq %rdi # sched: [2:0.50]
 ; BROADWELL-NEXT:    movq %rdi, %rax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_bswap64:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    bswapq %rdi # sched: [2:0.50]
 ; SKYLAKE-NEXT:    movq %rdi, %rax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_bswap64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    bswapq %rdi # sched: [2:0.50]
 ; SKX-NEXT:    movq %rdi, %rax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_bswap64:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    bswapq %rdi # sched: [1:0.50]
 ; BTVER2-NEXT:    movq %rdi, %rax # sched: [1:0.17]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_bswap64:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    bswapq %rdi # sched: [1:1.00]
 ; ZNVER1-NEXT:    movq %rdi, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -749,7 +749,7 @@ define i64 @test_bswap64(i64 %a0) optsiz
 
 define void @test_cbw_cdq_cdqe_cqo_cwd_cwde() optsize {
 ; GENERIC-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    cbtw # sched: [1:0.33]
 ; GENERIC-NEXT:    cltd # sched: [1:0.50]
@@ -761,7 +761,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_c
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    cbtw # sched: [4:2.00]
 ; ATOM-NEXT:    cltd # sched: [4:2.00]
@@ -773,7 +773,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_c
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    cbtw # sched: [1:0.50]
 ; SLM-NEXT:    cltd # sched: [1:0.50]
@@ -785,7 +785,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_c
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    cbtw # sched: [1:0.33]
 ; SANDY-NEXT:    cltd # sched: [1:0.50]
@@ -797,7 +797,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_c
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    cbtw # sched: [1:0.25]
 ; HASWELL-NEXT:    cltd # sched: [1:0.50]
@@ -809,7 +809,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_c
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    cbtw # sched: [1:0.25]
 ; BROADWELL-NEXT:    cltd # sched: [1:0.50]
@@ -821,7 +821,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_c
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    cbtw # sched: [1:0.25]
 ; SKYLAKE-NEXT:    cltd # sched: [1:0.50]
@@ -833,7 +833,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_c
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    cbtw # sched: [1:0.25]
 ; SKX-NEXT:    cltd # sched: [1:0.50]
@@ -845,7 +845,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_c
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    cbtw # sched: [1:0.50]
 ; BTVER2-NEXT:    cltd # sched: [1:0.50]
@@ -857,7 +857,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_c
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cbw_cdq_cdqe_cqo_cwd_cwde:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    cbtw # sched: [1:0.25]
 ; ZNVER1-NEXT:    cltd # sched: [1:0.25]
@@ -873,7 +873,7 @@ define void @test_cbw_cdq_cdqe_cqo_cwd_c
 
 define void @test_clc_cld_cmc() optsize {
 ; GENERIC-LABEL: test_clc_cld_cmc:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    clc # sched: [1:0.33]
 ; GENERIC-NEXT:    cld # sched: [1:0.33]
@@ -882,7 +882,7 @@ define void @test_clc_cld_cmc() optsize
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_clc_cld_cmc:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    clc # sched: [1:0.50]
 ; ATOM-NEXT:    cld # sched: [3:1.50]
@@ -891,7 +891,7 @@ define void @test_clc_cld_cmc() optsize
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_clc_cld_cmc:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    clc # sched: [1:0.50]
 ; SLM-NEXT:    cld # sched: [1:0.50]
@@ -900,7 +900,7 @@ define void @test_clc_cld_cmc() optsize
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_clc_cld_cmc:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    clc # sched: [1:0.33]
 ; SANDY-NEXT:    cld # sched: [1:0.33]
@@ -909,7 +909,7 @@ define void @test_clc_cld_cmc() optsize
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_clc_cld_cmc:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    clc # sched: [1:0.25]
 ; HASWELL-NEXT:    cld # sched: [3:1.00]
@@ -918,7 +918,7 @@ define void @test_clc_cld_cmc() optsize
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_clc_cld_cmc:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    clc # sched: [1:0.25]
 ; BROADWELL-NEXT:    cld # sched: [3:1.00]
@@ -927,7 +927,7 @@ define void @test_clc_cld_cmc() optsize
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_clc_cld_cmc:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    clc # sched: [1:0.25]
 ; SKYLAKE-NEXT:    cld # sched: [3:1.00]
@@ -936,7 +936,7 @@ define void @test_clc_cld_cmc() optsize
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_clc_cld_cmc:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    clc # sched: [1:0.25]
 ; SKX-NEXT:    cld # sched: [3:1.00]
@@ -945,7 +945,7 @@ define void @test_clc_cld_cmc() optsize
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_clc_cld_cmc:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    clc # sched: [1:0.50]
 ; BTVER2-NEXT:    cld # sched: [1:0.50]
@@ -954,7 +954,7 @@ define void @test_clc_cld_cmc() optsize
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_clc_cld_cmc:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    clc # sched: [1:0.25]
 ; ZNVER1-NEXT:    cld # sched: [1:0.25]
@@ -980,70 +980,70 @@ define void @test_clc_cld_cmc() optsize
 
 define void @test_cpuid() optsize {
 ; GENERIC-LABEL: test_cpuid:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    cpuid # sched: [100:0.33]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_cpuid:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    cpuid # sched: [121:60.50]
 ; ATOM-NEXT:    #NO_APP
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_cpuid:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    cpuid # sched: [100:1.00]
 ; SLM-NEXT:    #NO_APP
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_cpuid:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    cpuid # sched: [100:0.33]
 ; SANDY-NEXT:    #NO_APP
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cpuid:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    cpuid # sched: [18:2.00]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cpuid:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    cpuid # sched: [18:2.00]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cpuid:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    cpuid # sched: [18:2.00]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cpuid:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    cpuid # sched: [18:2.00]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cpuid:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    cpuid # sched: [100:0.17]
 ; BTVER2-NEXT:    #NO_APP
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cpuid:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    cpuid # sched: [100:?]
 ; ZNVER1-NEXT:    #NO_APP
@@ -1074,7 +1074,7 @@ define void @test_cpuid() optsize {
 
 define void @test_invlpg_invlpga(i8 *%a0) optsize {
 ; GENERIC-LABEL: test_invlpg_invlpga:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    invlpg (%rdi) # sched: [100:0.33]
 ; GENERIC-NEXT:    invlpga %ecx, %rax # sched: [100:0.33]
@@ -1082,7 +1082,7 @@ define void @test_invlpg_invlpga(i8 *%a0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_invlpg_invlpga:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    invlpg (%rdi) # sched: [71:35.50]
 ; ATOM-NEXT:    invlpga %ecx, %rax # sched: [71:35.50]
@@ -1090,7 +1090,7 @@ define void @test_invlpg_invlpga(i8 *%a0
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_invlpg_invlpga:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    invlpg (%rdi) # sched: [100:1.00]
 ; SLM-NEXT:    invlpga %ecx, %rax # sched: [100:1.00]
@@ -1098,7 +1098,7 @@ define void @test_invlpg_invlpga(i8 *%a0
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_invlpg_invlpga:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    invlpg (%rdi) # sched: [100:0.33]
 ; SANDY-NEXT:    invlpga %ecx, %rax # sched: [100:0.33]
@@ -1106,7 +1106,7 @@ define void @test_invlpg_invlpga(i8 *%a0
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_invlpg_invlpga:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    invlpg (%rdi) # sched: [100:0.25]
 ; HASWELL-NEXT:    invlpga %ecx, %rax # sched: [100:0.25]
@@ -1114,7 +1114,7 @@ define void @test_invlpg_invlpga(i8 *%a0
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_invlpg_invlpga:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    invlpg (%rdi) # sched: [100:0.25]
 ; BROADWELL-NEXT:    invlpga %ecx, %rax # sched: [100:0.25]
@@ -1122,7 +1122,7 @@ define void @test_invlpg_invlpga(i8 *%a0
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_invlpg_invlpga:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    invlpg (%rdi) # sched: [100:0.25]
 ; SKYLAKE-NEXT:    invlpga %ecx, %rax # sched: [100:0.25]
@@ -1130,7 +1130,7 @@ define void @test_invlpg_invlpga(i8 *%a0
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_invlpg_invlpga:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    invlpg (%rdi) # sched: [100:0.25]
 ; SKX-NEXT:    invlpga %ecx, %rax # sched: [100:0.25]
@@ -1138,7 +1138,7 @@ define void @test_invlpg_invlpga(i8 *%a0
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_invlpg_invlpga:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    invlpg (%rdi) # sched: [100:0.17]
 ; BTVER2-NEXT:    invlpga %ecx, %rax # sched: [100:0.17]
@@ -1146,7 +1146,7 @@ define void @test_invlpg_invlpga(i8 *%a0
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_invlpg_invlpga:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    invlpg (%rdi) # sched: [100:?]
 ; ZNVER1-NEXT:    invlpga %ecx, %rax # sched: [100:?]
@@ -1261,7 +1261,7 @@ define void @test_invlpg_invlpga(i8 *%a0
 
 define void @test_shld_shrd_16(i16 %a0, i16 %a1, i16 *%a2) optsize {
 ; GENERIC-LABEL: test_shld_shrd_16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    shldw %cl, %si, %di # sched: [4:1.50]
 ; GENERIC-NEXT:    shrdw %cl, %si, %di # sched: [4:1.50]
@@ -1275,7 +1275,7 @@ define void @test_shld_shrd_16(i16 %a0,
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_shld_shrd_16:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    shldw %cl, %si, %di # sched: [6:3.00]
 ; ATOM-NEXT:    shrdw %cl, %si, %di # sched: [6:3.00]
@@ -1289,7 +1289,7 @@ define void @test_shld_shrd_16(i16 %a0,
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_shld_shrd_16:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    shldw %cl, %si, %di # sched: [1:1.00]
 ; SLM-NEXT:    shrdw %cl, %si, %di # sched: [1:1.00]
@@ -1303,7 +1303,7 @@ define void @test_shld_shrd_16(i16 %a0,
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_shld_shrd_16:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    shldw %cl, %si, %di # sched: [4:1.50]
 ; SANDY-NEXT:    shrdw %cl, %si, %di # sched: [4:1.50]
@@ -1317,7 +1317,7 @@ define void @test_shld_shrd_16(i16 %a0,
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_shld_shrd_16:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    shldw %cl, %si, %di # sched: [6:1.00]
 ; HASWELL-NEXT:    shrdw %cl, %si, %di # sched: [6:1.00]
@@ -1331,7 +1331,7 @@ define void @test_shld_shrd_16(i16 %a0,
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_shld_shrd_16:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    shldw %cl, %si, %di # sched: [6:1.00]
 ; BROADWELL-NEXT:    shrdw %cl, %si, %di # sched: [6:1.00]
@@ -1345,7 +1345,7 @@ define void @test_shld_shrd_16(i16 %a0,
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_shld_shrd_16:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    shldw %cl, %si, %di # sched: [6:1.00]
 ; SKYLAKE-NEXT:    shrdw %cl, %si, %di # sched: [6:1.00]
@@ -1359,7 +1359,7 @@ define void @test_shld_shrd_16(i16 %a0,
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_shld_shrd_16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    shldw %cl, %si, %di # sched: [6:1.00]
 ; SKX-NEXT:    shrdw %cl, %si, %di # sched: [6:1.00]
@@ -1373,7 +1373,7 @@ define void @test_shld_shrd_16(i16 %a0,
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_shld_shrd_16:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    shldw %cl, %si, %di # sched: [4:4.00]
 ; BTVER2-NEXT:    shrdw %cl, %si, %di # sched: [4:4.00]
@@ -1387,7 +1387,7 @@ define void @test_shld_shrd_16(i16 %a0,
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_shld_shrd_16:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    shldw %cl, %si, %di # sched: [100:?]
 ; ZNVER1-NEXT:    shrdw %cl, %si, %di # sched: [100:?]
@@ -1404,7 +1404,7 @@ define void @test_shld_shrd_16(i16 %a0,
 }
 define void @test_shld_shrd_32(i32 %a0, i32 %a1, i32 *%a2) optsize {
 ; GENERIC-LABEL: test_shld_shrd_32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    shldl %cl, %esi, %edi # sched: [4:1.50]
 ; GENERIC-NEXT:    shrdl %cl, %esi, %edi # sched: [4:1.50]
@@ -1418,7 +1418,7 @@ define void @test_shld_shrd_32(i32 %a0,
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_shld_shrd_32:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    shldl %cl, %esi, %edi # sched: [2:1.00]
 ; ATOM-NEXT:    shrdl %cl, %esi, %edi # sched: [2:1.00]
@@ -1432,7 +1432,7 @@ define void @test_shld_shrd_32(i32 %a0,
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_shld_shrd_32:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    shldl %cl, %esi, %edi # sched: [1:1.00]
 ; SLM-NEXT:    shrdl %cl, %esi, %edi # sched: [1:1.00]
@@ -1446,7 +1446,7 @@ define void @test_shld_shrd_32(i32 %a0,
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_shld_shrd_32:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    shldl %cl, %esi, %edi # sched: [4:1.50]
 ; SANDY-NEXT:    shrdl %cl, %esi, %edi # sched: [4:1.50]
@@ -1460,7 +1460,7 @@ define void @test_shld_shrd_32(i32 %a0,
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_shld_shrd_32:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    shldl %cl, %esi, %edi # sched: [6:1.00]
 ; HASWELL-NEXT:    shrdl %cl, %esi, %edi # sched: [6:1.00]
@@ -1474,7 +1474,7 @@ define void @test_shld_shrd_32(i32 %a0,
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_shld_shrd_32:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    shldl %cl, %esi, %edi # sched: [6:1.00]
 ; BROADWELL-NEXT:    shrdl %cl, %esi, %edi # sched: [6:1.00]
@@ -1488,7 +1488,7 @@ define void @test_shld_shrd_32(i32 %a0,
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_shld_shrd_32:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    shldl %cl, %esi, %edi # sched: [6:1.00]
 ; SKYLAKE-NEXT:    shrdl %cl, %esi, %edi # sched: [6:1.00]
@@ -1502,7 +1502,7 @@ define void @test_shld_shrd_32(i32 %a0,
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_shld_shrd_32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    shldl %cl, %esi, %edi # sched: [6:1.00]
 ; SKX-NEXT:    shrdl %cl, %esi, %edi # sched: [6:1.00]
@@ -1516,7 +1516,7 @@ define void @test_shld_shrd_32(i32 %a0,
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_shld_shrd_32:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    shldl %cl, %esi, %edi # sched: [4:4.00]
 ; BTVER2-NEXT:    shrdl %cl, %esi, %edi # sched: [4:4.00]
@@ -1530,7 +1530,7 @@ define void @test_shld_shrd_32(i32 %a0,
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_shld_shrd_32:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    shldl %cl, %esi, %edi # sched: [100:?]
 ; ZNVER1-NEXT:    shrdl %cl, %esi, %edi # sched: [100:?]
@@ -1547,7 +1547,7 @@ define void @test_shld_shrd_32(i32 %a0,
 }
 define void @test_shld_shrd_64(i64 %a0, i64 %a1, i64 *%a2) optsize {
 ; GENERIC-LABEL: test_shld_shrd_64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    shldq %cl, %rsi, %rdi # sched: [4:1.50]
 ; GENERIC-NEXT:    shrdq %cl, %rsi, %rdi # sched: [4:1.50]
@@ -1561,7 +1561,7 @@ define void @test_shld_shrd_64(i64 %a0,
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_shld_shrd_64:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    #APP
 ; ATOM-NEXT:    shldq %cl, %rsi, %rdi # sched: [8:4.00]
 ; ATOM-NEXT:    shrdq %cl, %rsi, %rdi # sched: [8:4.00]
@@ -1575,7 +1575,7 @@ define void @test_shld_shrd_64(i64 %a0,
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_shld_shrd_64:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    #APP
 ; SLM-NEXT:    shldq %cl, %rsi, %rdi # sched: [1:1.00]
 ; SLM-NEXT:    shrdq %cl, %rsi, %rdi # sched: [1:1.00]
@@ -1589,7 +1589,7 @@ define void @test_shld_shrd_64(i64 %a0,
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_shld_shrd_64:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    shldq %cl, %rsi, %rdi # sched: [4:1.50]
 ; SANDY-NEXT:    shrdq %cl, %rsi, %rdi # sched: [4:1.50]
@@ -1603,7 +1603,7 @@ define void @test_shld_shrd_64(i64 %a0,
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_shld_shrd_64:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    shldq %cl, %rsi, %rdi # sched: [6:1.00]
 ; HASWELL-NEXT:    shrdq %cl, %rsi, %rdi # sched: [6:1.00]
@@ -1617,7 +1617,7 @@ define void @test_shld_shrd_64(i64 %a0,
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_shld_shrd_64:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    shldq %cl, %rsi, %rdi # sched: [6:1.00]
 ; BROADWELL-NEXT:    shrdq %cl, %rsi, %rdi # sched: [6:1.00]
@@ -1631,7 +1631,7 @@ define void @test_shld_shrd_64(i64 %a0,
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_shld_shrd_64:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    shldq %cl, %rsi, %rdi # sched: [6:1.00]
 ; SKYLAKE-NEXT:    shrdq %cl, %rsi, %rdi # sched: [6:1.00]
@@ -1645,7 +1645,7 @@ define void @test_shld_shrd_64(i64 %a0,
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_shld_shrd_64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    shldq %cl, %rsi, %rdi # sched: [6:1.00]
 ; SKX-NEXT:    shrdq %cl, %rsi, %rdi # sched: [6:1.00]
@@ -1659,7 +1659,7 @@ define void @test_shld_shrd_64(i64 %a0,
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_shld_shrd_64:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    shldq %cl, %rsi, %rdi # sched: [4:4.00]
 ; BTVER2-NEXT:    shrdq %cl, %rsi, %rdi # sched: [4:4.00]
@@ -1673,7 +1673,7 @@ define void @test_shld_shrd_64(i64 %a0,
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_shld_shrd_64:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    shldq %cl, %rsi, %rdi # sched: [100:?]
 ; ZNVER1-NEXT:    shrdq %cl, %rsi, %rdi # sched: [100:?]

Modified: llvm/trunk/test/CodeGen/X86/select-mmx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/select-mmx.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/select-mmx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/select-mmx.ll Mon Dec  4 09:18:51 2017
@@ -13,7 +13,7 @@
 define i64 @test47(i64 %arg)  {
 ;
 ; X64-LABEL: test47:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    testq %rdi, %rdi
 ; X64-NEXT:    movl $7, %ecx
@@ -24,7 +24,7 @@ define i64 @test47(i64 %arg)  {
 ; X64-NEXT:    retq
 ;
 ; I32-LABEL: test47:
-; I32:       # BB#0:
+; I32:       # %bb.0:
 ; I32-NEXT:    pushl %ebp
 ; I32-NEXT:    .cfi_def_cfa_offset 8
 ; I32-NEXT:    .cfi_offset %ebp, -8
@@ -36,7 +36,7 @@ define i64 @test47(i64 %arg)  {
 ; I32-NEXT:    orl 12(%ebp), %eax
 ; I32-NEXT:    movl $7, %eax
 ; I32-NEXT:    je .LBB0_2
-; I32-NEXT:  # BB#1:
+; I32-NEXT:  # %bb.1:
 ; I32-NEXT:    xorl %eax, %eax
 ; I32-NEXT:  .LBB0_2:
 ; I32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
@@ -67,7 +67,7 @@ define i64 @test47(i64 %arg)  {
 define i64 @test49(i64 %arg, i64 %x, i64 %y) {
 ;
 ; X64-LABEL: test49:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    testq %rdi, %rdi
 ; X64-NEXT:    cmovneq %rdx, %rsi
 ; X64-NEXT:    movd %rsi, %mm0
@@ -76,7 +76,7 @@ define i64 @test49(i64 %arg, i64 %x, i64
 ; X64-NEXT:    retq
 ;
 ; I32-LABEL: test49:
-; I32:       # BB#0:
+; I32:       # %bb.0:
 ; I32-NEXT:    pushl %ebp
 ; I32-NEXT:    .cfi_def_cfa_offset 8
 ; I32-NEXT:    .cfi_offset %ebp, -8
@@ -87,7 +87,7 @@ define i64 @test49(i64 %arg, i64 %x, i64
 ; I32-NEXT:    movl 8(%ebp), %eax
 ; I32-NEXT:    orl 12(%ebp), %eax
 ; I32-NEXT:    je .LBB1_1
-; I32-NEXT:  # BB#2:
+; I32-NEXT:  # %bb.2:
 ; I32-NEXT:    leal 24(%ebp), %eax
 ; I32-NEXT:    jmp .LBB1_3
 ; I32-NEXT:  .LBB1_1:

Modified: llvm/trunk/test/CodeGen/X86/select-with-and-or.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/select-with-and-or.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/select-with-and-or.ll (original)
+++ llvm/trunk/test/CodeGen/X86/select-with-and-or.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <4 x i32> @test1(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpnleps %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -14,7 +14,7 @@ define <4 x i32> @test1(<4 x float> %a,
 
 define <4 x i32> @test2(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpnleps %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    vorps %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -25,7 +25,7 @@ define <4 x i32> @test2(<4 x float> %a,
 
 define <4 x i32> @test3(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpleps %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -36,7 +36,7 @@ define <4 x i32> @test3(<4 x float> %a,
 
 define <4 x i32> @test4(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpleps %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    vorps %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -47,7 +47,7 @@ define <4 x i32> @test4(<4 x float> %a,
 
 define <4 x i32> @test5(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpnleps %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %f = fcmp ult <4 x float> %a, %b
@@ -57,7 +57,7 @@ define <4 x i32> @test5(<4 x float> %a,
 
 define <4 x i32> @test6(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpleps %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %not.f = fcmp oge <4 x float> %a, %b
@@ -67,7 +67,7 @@ define <4 x i32> @test6(<4 x float> %a,
 
 define <4 x i32> @test7(<4 x float> %a, <4 x float> %b, <4 x i32>* %p) {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpnleps %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    vandps (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -81,7 +81,7 @@ define <4 x i32> @test7(<4 x float> %a,
 
 define <2 x double> @test1f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test1f:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    vandpd %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -92,7 +92,7 @@ define <2 x double> @test1f(<2 x double>
 
 define <2 x double> @test2f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test2f:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmplepd %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    vorpd %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -103,7 +103,7 @@ define <2 x double> @test2f(<2 x double>
 
 define <2 x double> @test3f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test3f:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpnltpd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vandpd %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -114,7 +114,7 @@ define <2 x double> @test3f(<2 x double>
 
 define <2 x double> @test4f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test4f:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpnlepd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vorpd %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -125,7 +125,7 @@ define <2 x double> @test4f(<2 x double>
 
 define <2 x double> @test5f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test5f:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpnlepd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %f = fcmp ugt <2 x double> %a, %b
@@ -135,7 +135,7 @@ define <2 x double> @test5f(<2 x double>
 
 define <2 x double> @test6f(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
 ; CHECK-LABEL: test6f:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %f = fcmp ule <2 x double> %a, %b
@@ -145,7 +145,7 @@ define <2 x double> @test6f(<2 x double>
 
 define <2 x double> @test7f(<2 x double> %a, <2 x double> %b, <2 x double>* %p) {
 ; CHECK-LABEL: test7f:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpeqpd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vandpd (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/select.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/select.ll (original)
+++ llvm/trunk/test/CodeGen/X86/select.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
 ; CHECK-LABEL: test1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    addq $8, %rdi
 ; CHECK-NEXT:    addq $8, %rsi
 ; CHECK-NEXT:    testb $1, %dl
@@ -17,10 +17,10 @@ define i32 @test1(%0* %p, %0* %q, i1 %r)
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: test1:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    testb $1, %cl
 ; MCU-NEXT:    jne .LBB0_1
-; MCU-NEXT:  # BB#2:
+; MCU-NEXT:  # %bb.2:
 ; MCU-NEXT:    addl $8, %edx
 ; MCU-NEXT:    movl %edx, %eax
 ; MCU-NEXT:    movl (%eax), %eax
@@ -39,7 +39,7 @@ define i32 @test1(%0* %p, %0* %q, i1 %r)
 ; PR2139
 define i32 @test2() nounwind {
 ; GENERIC-LABEL: test2:
-; GENERIC:       ## BB#0: ## %entry
+; GENERIC:       ## %bb.0: ## %entry
 ; GENERIC-NEXT:    pushq %rax
 ; GENERIC-NEXT:    callq _return_false
 ; GENERIC-NEXT:    xorl %ecx, %ecx
@@ -49,14 +49,14 @@ define i32 @test2() nounwind {
 ; GENERIC-NEXT:    shll $3, %eax
 ; GENERIC-NEXT:    cmpl $32768, %eax ## imm = 0x8000
 ; GENERIC-NEXT:    jge LBB1_1
-; GENERIC-NEXT:  ## BB#2: ## %bb91
+; GENERIC-NEXT:  ## %bb.2: ## %bb91
 ; GENERIC-NEXT:    xorl %eax, %eax
 ; GENERIC-NEXT:    popq %rcx
 ; GENERIC-NEXT:    retq
 ; GENERIC-NEXT:  LBB1_1: ## %bb90
 ;
 ; ATOM-LABEL: test2:
-; ATOM:       ## BB#0: ## %entry
+; ATOM:       ## %bb.0: ## %entry
 ; ATOM-NEXT:    pushq %rax
 ; ATOM-NEXT:    callq _return_false
 ; ATOM-NEXT:    xorl %ecx, %ecx
@@ -66,25 +66,25 @@ define i32 @test2() nounwind {
 ; ATOM-NEXT:    shll $3, %edx
 ; ATOM-NEXT:    cmpl $32768, %edx ## imm = 0x8000
 ; ATOM-NEXT:    jge LBB1_1
-; ATOM-NEXT:  ## BB#2: ## %bb91
+; ATOM-NEXT:  ## %bb.2: ## %bb91
 ; ATOM-NEXT:    xorl %eax, %eax
 ; ATOM-NEXT:    popq %rcx
 ; ATOM-NEXT:    retq
 ; ATOM-NEXT:  LBB1_1: ## %bb90
 ;
 ; MCU-LABEL: test2:
-; MCU:       # BB#0: # %entry
+; MCU:       # %bb.0: # %entry
 ; MCU-NEXT:    calll return_false
 ; MCU-NEXT:    xorl %ecx, %ecx
 ; MCU-NEXT:    testb $1, %al
 ; MCU-NEXT:    jne .LBB1_2
-; MCU-NEXT:  # BB#1: # %entry
+; MCU-NEXT:  # %bb.1: # %entry
 ; MCU-NEXT:    movl $-480, %ecx # imm = 0xFE20
 ; MCU-NEXT:  .LBB1_2: # %entry
 ; MCU-NEXT:    shll $3, %ecx
 ; MCU-NEXT:    cmpl $32768, %ecx # imm = 0x8000
 ; MCU-NEXT:    jge .LBB1_3
-; MCU-NEXT:  # BB#4: # %bb91
+; MCU-NEXT:  # %bb.4: # %bb91
 ; MCU-NEXT:    xorl %eax, %eax
 ; MCU-NEXT:    retl
 ; MCU-NEXT:  .LBB1_3: # %bb90
@@ -106,7 +106,7 @@ declare i1 @return_false()
 ;; Select between two floating point constants.
 define float @test3(i32 %x) nounwind readnone {
 ; CHECK-LABEL: test3:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    sete %al
@@ -115,7 +115,7 @@ define float @test3(i32 %x) nounwind rea
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: test3:
-; MCU:       # BB#0: # %entry
+; MCU:       # %bb.0: # %entry
 ; MCU-NEXT:    xorl %ecx, %ecx
 ; MCU-NEXT:    testl %eax, %eax
 ; MCU-NEXT:    sete %cl
@@ -129,7 +129,7 @@ entry:
 
 define signext i8 @test4(i8* nocapture %P, double %F) nounwind readonly {
 ; CHECK-LABEL: test4:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    ucomisd %xmm0, %xmm1
@@ -138,7 +138,7 @@ define signext i8 @test4(i8* nocapture %
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: test4:
-; MCU:       # BB#0: # %entry
+; MCU:       # %bb.0: # %entry
 ; MCU-NEXT:    movl %eax, %ecx
 ; MCU-NEXT:    fldl {{[0-9]+}}(%esp)
 ; MCU-NEXT:    flds {{\.LCPI.*}}
@@ -160,10 +160,10 @@ entry:
 
 define void @test5(i1 %c, <2 x i16> %a, <2 x i16> %b, <2 x i16>* %p) nounwind {
 ; CHECK-LABEL: test5:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    jne LBB4_2
-; CHECK-NEXT:  ## BB#1:
+; CHECK-NEXT:  ## %bb.1:
 ; CHECK-NEXT:    movdqa %xmm1, %xmm0
 ; CHECK-NEXT:  LBB4_2:
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -172,12 +172,12 @@ define void @test5(i1 %c, <2 x i16> %a,
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: test5:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    pushl %esi
 ; MCU-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; MCU-NEXT:    testb $1, %al
 ; MCU-NEXT:    jne .LBB4_2
-; MCU-NEXT:  # BB#1:
+; MCU-NEXT:  # %bb.1:
 ; MCU-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; MCU-NEXT:    movzwl {{[0-9]+}}(%esp), %edx
 ; MCU-NEXT:  .LBB4_2:
@@ -193,10 +193,10 @@ define void @test5(i1 %c, <2 x i16> %a,
 ; Verify that the fmul gets sunk into the one part of the diamond where it is needed.
 define void @test6(i32 %C, <4 x float>* %A, <4 x float>* %B) nounwind {
 ; CHECK-LABEL: test6:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    je LBB5_1
-; CHECK-NEXT:  ## BB#2:
+; CHECK-NEXT:  ## %bb.2:
 ; CHECK-NEXT:    movaps (%rsi), %xmm0
 ; CHECK-NEXT:    movaps %xmm0, (%rsi)
 ; CHECK-NEXT:    retq
@@ -207,7 +207,7 @@ define void @test6(i32 %C, <4 x float>*
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: test6:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    pushl %eax
 ; MCU-NEXT:    flds 12(%edx)
 ; MCU-NEXT:    fstps (%esp) # 4-byte Folded Spill
@@ -227,7 +227,7 @@ define void @test6(i32 %C, <4 x float>*
 ; MCU-NEXT:    testl %eax, %eax
 ; MCU-NEXT:    flds (%edx)
 ; MCU-NEXT:    je .LBB5_2
-; MCU-NEXT:  # BB#1:
+; MCU-NEXT:  # %bb.1:
 ; MCU-NEXT:    fstp %st(1)
 ; MCU-NEXT:    fstp %st(3)
 ; MCU-NEXT:    fstp %st(1)
@@ -268,7 +268,7 @@ define void @test6(i32 %C, <4 x float>*
 ; Select with fp80's
 define x86_fp80 @test7(i32 %tmp8) nounwind {
 ; CHECK-LABEL: test7:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    setns %al
@@ -278,7 +278,7 @@ define x86_fp80 @test7(i32 %tmp8) nounwi
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: test7:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    xorl %ecx, %ecx
 ; MCU-NEXT:    testl %eax, %eax
 ; MCU-NEXT:    setns %cl
@@ -293,10 +293,10 @@ define x86_fp80 @test7(i32 %tmp8) nounwi
 ; widening select v6i32 and then a sub
 define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2) nounwind {
 ; GENERIC-LABEL: test8:
-; GENERIC:       ## BB#0:
+; GENERIC:       ## %bb.0:
 ; GENERIC-NEXT:    testb $1, %dil
 ; GENERIC-NEXT:    jne LBB7_1
-; GENERIC-NEXT:  ## BB#2:
+; GENERIC-NEXT:  ## %bb.2:
 ; GENERIC-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; GENERIC-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; GENERIC-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -327,10 +327,10 @@ define void @test8(i1 %c, <6 x i32>* %ds
 ; GENERIC-NEXT:    retq
 ;
 ; ATOM-LABEL: test8:
-; ATOM:       ## BB#0:
+; ATOM:       ## %bb.0:
 ; ATOM-NEXT:    testb $1, %dil
 ; ATOM-NEXT:    jne LBB7_1
-; ATOM-NEXT:  ## BB#2:
+; ATOM-NEXT:  ## %bb.2:
 ; ATOM-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; ATOM-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
 ; ATOM-NEXT:    movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
@@ -359,14 +359,14 @@ define void @test8(i1 %c, <6 x i32>* %ds
 ; ATOM-NEXT:    retq
 ;
 ; MCU-LABEL: test8:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    pushl %ebp
 ; MCU-NEXT:    pushl %ebx
 ; MCU-NEXT:    pushl %edi
 ; MCU-NEXT:    pushl %esi
 ; MCU-NEXT:    testb $1, %al
 ; MCU-NEXT:    jne .LBB7_1
-; MCU-NEXT:  # BB#2:
+; MCU-NEXT:  # %bb.2:
 ; MCU-NEXT:    leal {{[0-9]+}}(%esp), %eax
 ; MCU-NEXT:    movl (%eax), %eax
 ; MCU-NEXT:    je .LBB7_5
@@ -441,14 +441,14 @@ define void @test8(i1 %c, <6 x i32>* %ds
 
 define i64 @test9(i64 %x, i64 %y) nounwind readnone ssp noredzone {
 ; GENERIC-LABEL: test9:
-; GENERIC:       ## BB#0:
+; GENERIC:       ## %bb.0:
 ; GENERIC-NEXT:    cmpq $1, %rdi
 ; GENERIC-NEXT:    sbbq %rax, %rax
 ; GENERIC-NEXT:    orq %rsi, %rax
 ; GENERIC-NEXT:    retq
 ;
 ; ATOM-LABEL: test9:
-; ATOM:       ## BB#0:
+; ATOM:       ## %bb.0:
 ; ATOM-NEXT:    cmpq $1, %rdi
 ; ATOM-NEXT:    sbbq %rax, %rax
 ; ATOM-NEXT:    orq %rsi, %rax
@@ -457,10 +457,10 @@ define i64 @test9(i64 %x, i64 %y) nounwi
 ; ATOM-NEXT:    retq
 ;
 ; MCU-LABEL: test9:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    orl %edx, %eax
 ; MCU-NEXT:    jne .LBB8_1
-; MCU-NEXT:  # BB#2:
+; MCU-NEXT:  # %bb.2:
 ; MCU-NEXT:    movl $-1, %eax
 ; MCU-NEXT:    movl $-1, %edx
 ; MCU-NEXT:    retl
@@ -476,14 +476,14 @@ define i64 @test9(i64 %x, i64 %y) nounwi
 ;; Same as test9
 define i64 @test9a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
 ; GENERIC-LABEL: test9a:
-; GENERIC:       ## BB#0:
+; GENERIC:       ## %bb.0:
 ; GENERIC-NEXT:    cmpq $1, %rdi
 ; GENERIC-NEXT:    sbbq %rax, %rax
 ; GENERIC-NEXT:    orq %rsi, %rax
 ; GENERIC-NEXT:    retq
 ;
 ; ATOM-LABEL: test9a:
-; ATOM:       ## BB#0:
+; ATOM:       ## %bb.0:
 ; ATOM-NEXT:    cmpq $1, %rdi
 ; ATOM-NEXT:    sbbq %rax, %rax
 ; ATOM-NEXT:    orq %rsi, %rax
@@ -492,12 +492,12 @@ define i64 @test9a(i64 %x, i64 %y) nounw
 ; ATOM-NEXT:    retq
 ;
 ; MCU-LABEL: test9a:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    orl %edx, %eax
 ; MCU-NEXT:    movl $-1, %eax
 ; MCU-NEXT:    movl $-1, %edx
 ; MCU-NEXT:    je .LBB9_2
-; MCU-NEXT:  # BB#1:
+; MCU-NEXT:  # %bb.1:
 ; MCU-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; MCU-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; MCU-NEXT:  .LBB9_2:
@@ -509,14 +509,14 @@ define i64 @test9a(i64 %x, i64 %y) nounw
 
 define i64 @test9b(i64 %x, i64 %y) nounwind readnone ssp noredzone {
 ; GENERIC-LABEL: test9b:
-; GENERIC:       ## BB#0:
+; GENERIC:       ## %bb.0:
 ; GENERIC-NEXT:    cmpq $1, %rdi
 ; GENERIC-NEXT:    sbbq %rax, %rax
 ; GENERIC-NEXT:    orq %rsi, %rax
 ; GENERIC-NEXT:    retq
 ;
 ; ATOM-LABEL: test9b:
-; ATOM:       ## BB#0:
+; ATOM:       ## %bb.0:
 ; ATOM-NEXT:    cmpq $1, %rdi
 ; ATOM-NEXT:    sbbq %rax, %rax
 ; ATOM-NEXT:    orq %rsi, %rax
@@ -525,7 +525,7 @@ define i64 @test9b(i64 %x, i64 %y) nounw
 ; ATOM-NEXT:    retq
 ;
 ; MCU-LABEL: test9b:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    movl %edx, %ecx
 ; MCU-NEXT:    xorl %edx, %edx
 ; MCU-NEXT:    orl %ecx, %eax
@@ -544,7 +544,7 @@ define i64 @test9b(i64 %x, i64 %y) nounw
 ;; Select between -1 and 1.
 define i64 @test10(i64 %x, i64 %y) nounwind readnone ssp noredzone {
 ; CHECK-LABEL: test10:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testq %rdi, %rdi
 ; CHECK-NEXT:    setne %al
@@ -552,12 +552,12 @@ define i64 @test10(i64 %x, i64 %y) nounw
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: test10:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    orl %edx, %eax
 ; MCU-NEXT:    movl $-1, %eax
 ; MCU-NEXT:    movl $-1, %edx
 ; MCU-NEXT:    je .LBB11_2
-; MCU-NEXT:  # BB#1:
+; MCU-NEXT:  # %bb.1:
 ; MCU-NEXT:    xorl %edx, %edx
 ; MCU-NEXT:    movl $1, %eax
 ; MCU-NEXT:  .LBB11_2:
@@ -569,7 +569,7 @@ define i64 @test10(i64 %x, i64 %y) nounw
 
 define i64 @test11(i64 %x, i64 %y) nounwind readnone ssp noredzone {
 ; CHECK-LABEL: test11:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq $1, %rdi
 ; CHECK-NEXT:    sbbq %rax, %rax
 ; CHECK-NEXT:    notq %rax
@@ -577,10 +577,10 @@ define i64 @test11(i64 %x, i64 %y) nounw
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: test11:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    orl %edx, %eax
 ; MCU-NEXT:    je .LBB12_1
-; MCU-NEXT:  # BB#2:
+; MCU-NEXT:  # %bb.2:
 ; MCU-NEXT:    movl $-1, %eax
 ; MCU-NEXT:    movl $-1, %edx
 ; MCU-NEXT:    retl
@@ -595,7 +595,7 @@ define i64 @test11(i64 %x, i64 %y) nounw
 
 define i64 @test11a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
 ; CHECK-LABEL: test11a:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cmpq $1, %rdi
 ; CHECK-NEXT:    sbbq %rax, %rax
 ; CHECK-NEXT:    notq %rax
@@ -603,12 +603,12 @@ define i64 @test11a(i64 %x, i64 %y) noun
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: test11a:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    orl %edx, %eax
 ; MCU-NEXT:    movl $-1, %eax
 ; MCU-NEXT:    movl $-1, %edx
 ; MCU-NEXT:    jne .LBB13_2
-; MCU-NEXT:  # BB#1:
+; MCU-NEXT:  # %bb.1:
 ; MCU-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; MCU-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; MCU-NEXT:  .LBB13_2:
@@ -623,7 +623,7 @@ declare noalias i8* @_Znam(i64) noredzon
 
 define noalias i8* @test12(i64 %count) nounwind ssp noredzone {
 ; GENERIC-LABEL: test12:
-; GENERIC:       ## BB#0: ## %entry
+; GENERIC:       ## %bb.0: ## %entry
 ; GENERIC-NEXT:    movl $4, %ecx
 ; GENERIC-NEXT:    movq %rdi, %rax
 ; GENERIC-NEXT:    mulq %rcx
@@ -632,7 +632,7 @@ define noalias i8* @test12(i64 %count) n
 ; GENERIC-NEXT:    jmp __Znam ## TAILCALL
 ;
 ; ATOM-LABEL: test12:
-; ATOM:       ## BB#0: ## %entry
+; ATOM:       ## %bb.0: ## %entry
 ; ATOM-NEXT:    movq %rdi, %rax
 ; ATOM-NEXT:    movl $4, %ecx
 ; ATOM-NEXT:    mulq %rcx
@@ -641,7 +641,7 @@ define noalias i8* @test12(i64 %count) n
 ; ATOM-NEXT:    jmp __Znam ## TAILCALL
 ;
 ; MCU-LABEL: test12:
-; MCU:       # BB#0: # %entry
+; MCU:       # %bb.0: # %entry
 ; MCU-NEXT:    pushl %ebp
 ; MCU-NEXT:    pushl %ebx
 ; MCU-NEXT:    pushl %edi
@@ -663,7 +663,7 @@ define noalias i8* @test12(i64 %count) n
 ; MCU-NEXT:    movl $-1, %eax
 ; MCU-NEXT:    movl $-1, %edx
 ; MCU-NEXT:    jne .LBB14_2
-; MCU-NEXT:  # BB#1: # %entry
+; MCU-NEXT:  # %bb.1: # %entry
 ; MCU-NEXT:    movl %esi, %eax
 ; MCU-NEXT:    movl %edi, %edx
 ; MCU-NEXT:  .LBB14_2: # %entry
@@ -685,13 +685,13 @@ declare { i64, i1 } @llvm.umul.with.over
 
 define i32 @test13(i32 %a, i32 %b) nounwind {
 ; GENERIC-LABEL: test13:
-; GENERIC:       ## BB#0:
+; GENERIC:       ## %bb.0:
 ; GENERIC-NEXT:    cmpl %esi, %edi
 ; GENERIC-NEXT:    sbbl %eax, %eax
 ; GENERIC-NEXT:    retq
 ;
 ; ATOM-LABEL: test13:
-; ATOM:       ## BB#0:
+; ATOM:       ## %bb.0:
 ; ATOM-NEXT:    cmpl %esi, %edi
 ; ATOM-NEXT:    sbbl %eax, %eax
 ; ATOM-NEXT:    nop
@@ -701,7 +701,7 @@ define i32 @test13(i32 %a, i32 %b) nounw
 ; ATOM-NEXT:    retq
 ;
 ; MCU-LABEL: test13:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    cmpl %edx, %eax
 ; MCU-NEXT:    sbbl %eax, %eax
 ; MCU-NEXT:    retl
@@ -712,7 +712,7 @@ define i32 @test13(i32 %a, i32 %b) nounw
 
 define i32 @test14(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: test14:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %esi, %edi
 ; CHECK-NEXT:    setae %al
@@ -720,7 +720,7 @@ define i32 @test14(i32 %a, i32 %b) nounw
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: test14:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    xorl %ecx, %ecx
 ; MCU-NEXT:    cmpl %edx, %eax
 ; MCU-NEXT:    setae %cl
@@ -735,13 +735,13 @@ define i32 @test14(i32 %a, i32 %b) nounw
 ; rdar://10961709
 define i32 @test15(i32 %x) nounwind {
 ; GENERIC-LABEL: test15:
-; GENERIC:       ## BB#0: ## %entry
+; GENERIC:       ## %bb.0: ## %entry
 ; GENERIC-NEXT:    negl %edi
 ; GENERIC-NEXT:    sbbl %eax, %eax
 ; GENERIC-NEXT:    retq
 ;
 ; ATOM-LABEL: test15:
-; ATOM:       ## BB#0: ## %entry
+; ATOM:       ## %bb.0: ## %entry
 ; ATOM-NEXT:    negl %edi
 ; ATOM-NEXT:    sbbl %eax, %eax
 ; ATOM-NEXT:    nop
@@ -751,7 +751,7 @@ define i32 @test15(i32 %x) nounwind {
 ; ATOM-NEXT:    retq
 ;
 ; MCU-LABEL: test15:
-; MCU:       # BB#0: # %entry
+; MCU:       # %bb.0: # %entry
 ; MCU-NEXT:    negl %eax
 ; MCU-NEXT:    sbbl %eax, %eax
 ; MCU-NEXT:    retl
@@ -763,13 +763,13 @@ entry:
 
 define i64 @test16(i64 %x) nounwind uwtable readnone ssp {
 ; GENERIC-LABEL: test16:
-; GENERIC:       ## BB#0: ## %entry
+; GENERIC:       ## %bb.0: ## %entry
 ; GENERIC-NEXT:    negq %rdi
 ; GENERIC-NEXT:    sbbq %rax, %rax
 ; GENERIC-NEXT:    retq
 ;
 ; ATOM-LABEL: test16:
-; ATOM:       ## BB#0: ## %entry
+; ATOM:       ## %bb.0: ## %entry
 ; ATOM-NEXT:    negq %rdi
 ; ATOM-NEXT:    sbbq %rax, %rax
 ; ATOM-NEXT:    nop
@@ -779,7 +779,7 @@ define i64 @test16(i64 %x) nounwind uwta
 ; ATOM-NEXT:    retq
 ;
 ; MCU-LABEL: test16:
-; MCU:       # BB#0: # %entry
+; MCU:       # %bb.0: # %entry
 ; MCU-NEXT:    movl %eax, %ecx
 ; MCU-NEXT:    xorl %eax, %eax
 ; MCU-NEXT:    orl %edx, %ecx
@@ -795,14 +795,14 @@ entry:
 
 define i16 @test17(i16 %x) nounwind {
 ; GENERIC-LABEL: test17:
-; GENERIC:       ## BB#0: ## %entry
+; GENERIC:       ## %bb.0: ## %entry
 ; GENERIC-NEXT:    negw %di
 ; GENERIC-NEXT:    sbbl %eax, %eax
 ; GENERIC-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
 ; GENERIC-NEXT:    retq
 ;
 ; ATOM-LABEL: test17:
-; ATOM:       ## BB#0: ## %entry
+; ATOM:       ## %bb.0: ## %entry
 ; ATOM-NEXT:    negw %di
 ; ATOM-NEXT:    sbbl %eax, %eax
 ; ATOM-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -813,7 +813,7 @@ define i16 @test17(i16 %x) nounwind {
 ; ATOM-NEXT:    retq
 ;
 ; MCU-LABEL: test17:
-; MCU:       # BB#0: # %entry
+; MCU:       # %bb.0: # %entry
 ; MCU-NEXT:    negw %ax
 ; MCU-NEXT:    sbbl %eax, %eax
 ; MCU-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
@@ -826,14 +826,14 @@ entry:
 
 define i8 @test18(i32 %x, i8 zeroext %a, i8 zeroext %b) nounwind {
 ; GENERIC-LABEL: test18:
-; GENERIC:       ## BB#0:
+; GENERIC:       ## %bb.0:
 ; GENERIC-NEXT:    cmpl $15, %edi
 ; GENERIC-NEXT:    cmovgel %edx, %esi
 ; GENERIC-NEXT:    movl %esi, %eax
 ; GENERIC-NEXT:    retq
 ;
 ; ATOM-LABEL: test18:
-; ATOM:       ## BB#0:
+; ATOM:       ## %bb.0:
 ; ATOM-NEXT:    cmpl $15, %edi
 ; ATOM-NEXT:    cmovgel %edx, %esi
 ; ATOM-NEXT:    movl %esi, %eax
@@ -842,10 +842,10 @@ define i8 @test18(i32 %x, i8 zeroext %a,
 ; ATOM-NEXT:    retq
 ;
 ; MCU-LABEL: test18:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    cmpl $15, %eax
 ; MCU-NEXT:    jl .LBB20_2
-; MCU-NEXT:  # BB#1:
+; MCU-NEXT:  # %bb.1:
 ; MCU-NEXT:    movl %ecx, %edx
 ; MCU-NEXT:  .LBB20_2:
 ; MCU-NEXT:    movl %edx, %eax
@@ -857,7 +857,7 @@ define i8 @test18(i32 %x, i8 zeroext %a,
 
 define i32 @trunc_select_miscompile(i32 %a, i1 zeroext %cc) {
 ; CHECK-LABEL: trunc_select_miscompile:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    orb $2, %sil
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    shll %cl, %edi
@@ -865,7 +865,7 @@ define i32 @trunc_select_miscompile(i32
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: trunc_select_miscompile:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    orb $2, %dl
 ; MCU-NEXT:    movl %edx, %ecx
 ; MCU-NEXT:    shll %cl, %eax
@@ -878,45 +878,45 @@ define i32 @trunc_select_miscompile(i32
 ; reproducer for pr29002
 define void @clamp_i8(i32 %src, i8* %dst) {
 ; GENERIC-LABEL: clamp_i8:
-; GENERIC:       ## BB#0:
+; GENERIC:       ## %bb.0:
 ; GENERIC-NEXT:    cmpl $127, %edi
 ; GENERIC-NEXT:    movl $127, %eax
 ; GENERIC-NEXT:    cmovlel %edi, %eax
 ; GENERIC-NEXT:    cmpl $-128, %eax
 ; GENERIC-NEXT:    movb $-128, %cl
 ; GENERIC-NEXT:    jl LBB22_2
-; GENERIC-NEXT:  ## BB#1:
+; GENERIC-NEXT:  ## %bb.1:
 ; GENERIC-NEXT:    movl %eax, %ecx
 ; GENERIC-NEXT:  LBB22_2:
 ; GENERIC-NEXT:    movb %cl, (%rsi)
 ; GENERIC-NEXT:    retq
 ;
 ; ATOM-LABEL: clamp_i8:
-; ATOM:       ## BB#0:
+; ATOM:       ## %bb.0:
 ; ATOM-NEXT:    cmpl $127, %edi
 ; ATOM-NEXT:    movl $127, %eax
 ; ATOM-NEXT:    cmovlel %edi, %eax
 ; ATOM-NEXT:    movb $-128, %cl
 ; ATOM-NEXT:    cmpl $-128, %eax
 ; ATOM-NEXT:    jl LBB22_2
-; ATOM-NEXT:  ## BB#1:
+; ATOM-NEXT:  ## %bb.1:
 ; ATOM-NEXT:    movl %eax, %ecx
 ; ATOM-NEXT:  LBB22_2:
 ; ATOM-NEXT:    movb %cl, (%rsi)
 ; ATOM-NEXT:    retq
 ;
 ; MCU-LABEL: clamp_i8:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    cmpl $127, %eax
 ; MCU-NEXT:    movl $127, %ecx
 ; MCU-NEXT:    jg .LBB22_2
-; MCU-NEXT:  # BB#1:
+; MCU-NEXT:  # %bb.1:
 ; MCU-NEXT:    movl %eax, %ecx
 ; MCU-NEXT:  .LBB22_2:
 ; MCU-NEXT:    cmpl $-128, %ecx
 ; MCU-NEXT:    movb $-128, %al
 ; MCU-NEXT:    jl .LBB22_4
-; MCU-NEXT:  # BB#3:
+; MCU-NEXT:  # %bb.3:
 ; MCU-NEXT:    movl %ecx, %eax
 ; MCU-NEXT:  .LBB22_4:
 ; MCU-NEXT:    movb %al, (%edx)
@@ -933,7 +933,7 @@ define void @clamp_i8(i32 %src, i8* %dst
 ; reproducer for pr29002
 define void @clamp(i32 %src, i16* %dst) {
 ; GENERIC-LABEL: clamp:
-; GENERIC:       ## BB#0:
+; GENERIC:       ## %bb.0:
 ; GENERIC-NEXT:    cmpl $32767, %edi ## imm = 0x7FFF
 ; GENERIC-NEXT:    movl $32767, %eax ## imm = 0x7FFF
 ; GENERIC-NEXT:    cmovlel %edi, %eax
@@ -944,7 +944,7 @@ define void @clamp(i32 %src, i16* %dst)
 ; GENERIC-NEXT:    retq
 ;
 ; ATOM-LABEL: clamp:
-; ATOM:       ## BB#0:
+; ATOM:       ## %bb.0:
 ; ATOM-NEXT:    cmpl $32767, %edi ## imm = 0x7FFF
 ; ATOM-NEXT:    movl $32767, %eax ## imm = 0x7FFF
 ; ATOM-NEXT:    cmovlel %edi, %eax
@@ -955,17 +955,17 @@ define void @clamp(i32 %src, i16* %dst)
 ; ATOM-NEXT:    retq
 ;
 ; MCU-LABEL: clamp:
-; MCU:       # BB#0:
+; MCU:       # %bb.0:
 ; MCU-NEXT:    cmpl $32767, %eax # imm = 0x7FFF
 ; MCU-NEXT:    movl $32767, %ecx # imm = 0x7FFF
 ; MCU-NEXT:    jg .LBB23_2
-; MCU-NEXT:  # BB#1:
+; MCU-NEXT:  # %bb.1:
 ; MCU-NEXT:    movl %eax, %ecx
 ; MCU-NEXT:  .LBB23_2:
 ; MCU-NEXT:    cmpl $-32768, %ecx # imm = 0x8000
 ; MCU-NEXT:    movw $-32768, %ax # imm = 0x8000
 ; MCU-NEXT:    jl .LBB23_4
-; MCU-NEXT:  # BB#3:
+; MCU-NEXT:  # %bb.3:
 ; MCU-NEXT:    movl %ecx, %eax
 ; MCU-NEXT:  .LBB23_4:
 ; MCU-NEXT:    movw %ax, (%edx)
@@ -987,7 +987,7 @@ define void @test19() {
 ; that code path, it can be deleted.
 ;
 ; CHECK-LABEL: test19:
-; CHECK:       ## BB#0: ## %BB
+; CHECK:       ## %bb.0: ## %BB
 ; CHECK-NEXT:    movl $-1, %eax
 ; CHECK-NEXT:    movb $1, %cl
 ; CHECK-NEXT:    .p2align 4, 0x90
@@ -995,7 +995,7 @@ define void @test19() {
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb %cl, %cl
 ; CHECK-NEXT:    jne LBB24_1
-; CHECK-NEXT:  ## BB#2: ## %CF250
+; CHECK-NEXT:  ## %bb.2: ## %CF250
 ; CHECK-NEXT:    ## in Loop: Header=BB24_1 Depth=1
 ; CHECK-NEXT:    jne LBB24_1
 ; CHECK-NEXT:    .p2align 4, 0x90
@@ -1004,11 +1004,11 @@ define void @test19() {
 ; CHECK-NEXT:    cmpl %eax, %eax
 ; CHECK-NEXT:    ucomiss %xmm0, %xmm0
 ; CHECK-NEXT:    jp LBB24_3
-; CHECK-NEXT:  ## BB#4: ## %CF244
+; CHECK-NEXT:  ## %bb.4: ## %CF244
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: test19:
-; MCU:       # BB#0: # %BB
+; MCU:       # %bb.0: # %BB
 ; MCU-NEXT:    movl $-1, %ecx
 ; MCU-NEXT:    movb $1, %al
 ; MCU-NEXT:    .p2align 4, 0x90
@@ -1016,10 +1016,10 @@ define void @test19() {
 ; MCU-NEXT:    # =>This Inner Loop Header: Depth=1
 ; MCU-NEXT:    testb %al, %al
 ; MCU-NEXT:    jne .LBB24_1
-; MCU-NEXT:  # BB#2: # %CF250
+; MCU-NEXT:  # %bb.2: # %CF250
 ; MCU-NEXT:    # in Loop: Header=BB24_1 Depth=1
 ; MCU-NEXT:    jne .LBB24_1
-; MCU-NEXT:  # BB#3: # %CF242.preheader
+; MCU-NEXT:  # %bb.3: # %CF242.preheader
 ; MCU-NEXT:    fldz
 ; MCU-NEXT:    .p2align 4, 0x90
 ; MCU-NEXT:  .LBB24_4: # %CF242
@@ -1030,7 +1030,7 @@ define void @test19() {
 ; MCU-NEXT:    # kill: %ah<def> %ah<kill> %ax<kill>
 ; MCU-NEXT:    sahf
 ; MCU-NEXT:    jp .LBB24_4
-; MCU-NEXT:  # BB#5: # %CF244
+; MCU-NEXT:  # %bb.5: # %CF244
 ; MCU-NEXT:    fstp %st(0)
 ; MCU-NEXT:    retl
 BB:
@@ -1059,7 +1059,7 @@ CF244:
 
 define i16 @select_xor_1(i16 %A, i8 %cond) {
 ; CHECK-LABEL: select_xor_1:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    xorl $43, %eax
 ; CHECK-NEXT:    testb $1, %sil
@@ -1068,7 +1068,7 @@ define i16 @select_xor_1(i16 %A, i8 %con
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: select_xor_1:
-; MCU:       # BB#0: # %entry
+; MCU:       # %bb.0: # %entry
 ; MCU-NEXT:    andl $1, %edx
 ; MCU-NEXT:    negl %edx
 ; MCU-NEXT:    andl $43, %edx
@@ -1085,7 +1085,7 @@ entry:
 
 define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
 ; CHECK-LABEL: select_xor_2:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    xorl %edi, %esi
 ; CHECK-NEXT:    testb $1, %dl
 ; CHECK-NEXT:    cmovel %edi, %esi
@@ -1093,7 +1093,7 @@ define i32 @select_xor_2(i32 %A, i32 %B,
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: select_xor_2:
-; MCU:       # BB#0: # %entry
+; MCU:       # %bb.0: # %entry
 ; MCU-NEXT:    andl $1, %ecx
 ; MCU-NEXT:    negl %ecx
 ; MCU-NEXT:    andl %edx, %ecx
@@ -1109,7 +1109,7 @@ entry:
 
 define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
 ; CHECK-LABEL: select_or:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    orl %edi, %esi
 ; CHECK-NEXT:    testb $1, %dl
 ; CHECK-NEXT:    cmovel %edi, %esi
@@ -1117,7 +1117,7 @@ define i32 @select_or(i32 %A, i32 %B, i8
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: select_or:
-; MCU:       # BB#0: # %entry
+; MCU:       # %bb.0: # %entry
 ; MCU-NEXT:    andl $1, %ecx
 ; MCU-NEXT:    negl %ecx
 ; MCU-NEXT:    andl %edx, %ecx
@@ -1133,7 +1133,7 @@ entry:
 
 define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
 ; CHECK-LABEL: select_or_1:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    orl %edi, %esi
 ; CHECK-NEXT:    testb $1, %dl
 ; CHECK-NEXT:    cmovel %edi, %esi
@@ -1141,7 +1141,7 @@ define i32 @select_or_1(i32 %A, i32 %B,
 ; CHECK-NEXT:    retq
 ;
 ; MCU-LABEL: select_or_1:
-; MCU:       # BB#0: # %entry
+; MCU:       # %bb.0: # %entry
 ; MCU-NEXT:    andl $1, %ecx
 ; MCU-NEXT:    negl %ecx
 ; MCU-NEXT:    andl %edx, %ecx

Modified: llvm/trunk/test/CodeGen/X86/select_const.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/select_const.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/select_const.ll (original)
+++ llvm/trunk/test/CodeGen/X86/select_const.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define i32 @select_0_or_1(i1 %cond) {
 ; CHECK-LABEL: select_0_or_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    notb %dil
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    andl $1, %eax
@@ -19,7 +19,7 @@ define i32 @select_0_or_1(i1 %cond) {
 
 define i32 @select_0_or_1_zeroext(i1 zeroext %cond) {
 ; CHECK-LABEL: select_0_or_1_zeroext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorb $1, %dil
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    retq
@@ -29,7 +29,7 @@ define i32 @select_0_or_1_zeroext(i1 zer
 
 define i32 @select_0_or_1_signext(i1 signext %cond) {
 ; CHECK-LABEL: select_0_or_1_signext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    notb %dil
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    andl $1, %eax
@@ -42,7 +42,7 @@ define i32 @select_0_or_1_signext(i1 sig
 
 define i32 @select_1_or_0(i1 %cond) {
 ; CHECK-LABEL: select_1_or_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $1, %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -52,7 +52,7 @@ define i32 @select_1_or_0(i1 %cond) {
 
 define i32 @select_1_or_0_zeroext(i1 zeroext %cond) {
 ; CHECK-LABEL: select_1_or_0_zeroext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %sel = select i1 %cond, i32 1, i32 0
@@ -61,7 +61,7 @@ define i32 @select_1_or_0_zeroext(i1 zer
 
 define i32 @select_1_or_0_signext(i1 signext %cond) {
 ; CHECK-LABEL: select_1_or_0_signext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $1, %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -73,7 +73,7 @@ define i32 @select_1_or_0_signext(i1 sig
 
 define i32 @select_0_or_neg1(i1 %cond) {
 ; CHECK-LABEL: select_0_or_neg1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    andl $1, %edi
 ; CHECK-NEXT:    leal -1(%rdi), %eax
@@ -84,7 +84,7 @@ define i32 @select_0_or_neg1(i1 %cond) {
 
 define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
 ; CHECK-LABEL: select_0_or_neg1_zeroext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    leal -1(%rdi), %eax
 ; CHECK-NEXT:    retq
@@ -94,7 +94,7 @@ define i32 @select_0_or_neg1_zeroext(i1
 
 define i32 @select_0_or_neg1_signext(i1 signext %cond) {
 ; CHECK-LABEL: select_0_or_neg1_signext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    notl %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -106,7 +106,7 @@ define i32 @select_0_or_neg1_signext(i1
 
 define i32 @select_neg1_or_0(i1 %cond) {
 ; CHECK-LABEL: select_neg1_or_0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $1, %edi
 ; CHECK-NEXT:    negl %edi
 ; CHECK-NEXT:    movl %edi, %eax
@@ -117,7 +117,7 @@ define i32 @select_neg1_or_0(i1 %cond) {
 
 define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
 ; CHECK-LABEL: select_neg1_or_0_zeroext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    negl %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -127,7 +127,7 @@ define i32 @select_neg1_or_0_zeroext(i1
 
 define i32 @select_neg1_or_0_signext(i1 signext %cond) {
 ; CHECK-LABEL: select_neg1_or_0_signext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %sel = select i1 %cond, i32 -1, i32 0
@@ -138,7 +138,7 @@ define i32 @select_neg1_or_0_signext(i1
 
 define i32 @select_Cplus1_C(i1 %cond) {
 ; CHECK-LABEL: select_Cplus1_C:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    andl $1, %edi
 ; CHECK-NEXT:    leal 41(%rdi), %eax
@@ -149,7 +149,7 @@ define i32 @select_Cplus1_C(i1 %cond) {
 
 define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
 ; CHECK-LABEL: select_Cplus1_C_zeroext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    leal 41(%rdi), %eax
 ; CHECK-NEXT:    retq
@@ -159,7 +159,7 @@ define i32 @select_Cplus1_C_zeroext(i1 z
 
 define i32 @select_Cplus1_C_signext(i1 signext %cond) {
 ; CHECK-LABEL: select_Cplus1_C_signext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl $41, %eax
 ; CHECK-NEXT:    subl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -171,7 +171,7 @@ define i32 @select_Cplus1_C_signext(i1 s
 
 define i32 @select_C_Cplus1(i1 %cond) {
 ; CHECK-LABEL: select_C_Cplus1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $1, %edi
 ; CHECK-NEXT:    movl $42, %eax
 ; CHECK-NEXT:    subl %edi, %eax
@@ -182,7 +182,7 @@ define i32 @select_C_Cplus1(i1 %cond) {
 
 define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
 ; CHECK-LABEL: select_C_Cplus1_zeroext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl $42, %eax
 ; CHECK-NEXT:    subl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -192,7 +192,7 @@ define i32 @select_C_Cplus1_zeroext(i1 z
 
 define i32 @select_C_Cplus1_signext(i1 signext %cond) {
 ; CHECK-LABEL: select_C_Cplus1_signext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $1, %edi
 ; CHECK-NEXT:    movl $42, %eax
 ; CHECK-NEXT:    subl %edi, %eax
@@ -206,7 +206,7 @@ define i32 @select_C_Cplus1_signext(i1 s
 
 define i32 @select_lea_2(i1 zeroext %cond) {
 ; CHECK-LABEL: select_lea_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorb $1, %dil
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    leal -1(%rax,%rax), %eax
@@ -217,7 +217,7 @@ define i32 @select_lea_2(i1 zeroext %con
 
 define i64 @select_lea_3(i1 zeroext %cond) {
 ; CHECK-LABEL: select_lea_3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorb $1, %dil
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    leaq -2(%rax,%rax,2), %rax
@@ -228,7 +228,7 @@ define i64 @select_lea_3(i1 zeroext %con
 
 define i32 @select_lea_5(i1 zeroext %cond) {
 ; CHECK-LABEL: select_lea_5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorb $1, %dil
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    leal -2(%rax,%rax,4), %eax
@@ -239,7 +239,7 @@ define i32 @select_lea_5(i1 zeroext %con
 
 define i64 @select_lea_9(i1 zeroext %cond) {
 ; CHECK-LABEL: select_lea_9:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorb $1, %dil
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    leaq -7(%rax,%rax,8), %rax
@@ -252,7 +252,7 @@ define i64 @select_lea_9(i1 zeroext %con
 
 define i64 @sel_1_2(i64 %x, i64 %y) {
 ; CHECK-LABEL: sel_1_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpq $42, %rdi
 ; CHECK-NEXT:    sbbq $0, %rsi
 ; CHECK-NEXT:    leaq 2(%rsi), %rax
@@ -267,7 +267,7 @@ define i64 @sel_1_2(i64 %x, i64 %y) {
 
 define i8 @sel_1_neg1(i32 %x) {
 ; CHECK-LABEL: sel_1_neg1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $42, %edi
 ; CHECK-NEXT:    setg %al
 ; CHECK-NEXT:    shlb $2, %al
@@ -282,7 +282,7 @@ define i8 @sel_1_neg1(i32 %x) {
 
 define i16 @sel_neg1_1(i32 %x) {
 ; CHECK-LABEL: sel_neg1_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl $43, %edi
 ; CHECK-NEXT:    setl %al
@@ -298,7 +298,7 @@ define i16 @sel_neg1_1(i32 %x) {
 
 define i32 @sel_1_neg1_32(i32 %x) {
 ; CHECK-LABEL: sel_1_neg1_32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl $42, %edi
 ; CHECK-NEXT:    setg %al
@@ -311,7 +311,7 @@ define i32 @sel_1_neg1_32(i32 %x) {
 
 define i32 @sel_neg1_1_32(i32 %x) {
 ; CHECK-LABEL: sel_neg1_1_32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl $43, %edi
 ; CHECK-NEXT:    setl %al
@@ -328,7 +328,7 @@ define i32 @sel_neg1_1_32(i32 %x) {
 
 define i8 @select_pow2_diff(i1 zeroext %cond) {
 ; CHECK-LABEL: select_pow2_diff:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    shlb $4, %dil
 ; CHECK-NEXT:    orb $3, %dil
 ; CHECK-NEXT:    movl %edi, %eax
@@ -339,7 +339,7 @@ define i8 @select_pow2_diff(i1 zeroext %
 
 define i16 @select_pow2_diff_invert(i1 zeroext %cond) {
 ; CHECK-LABEL: select_pow2_diff_invert:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorb $1, %dil
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    shll $6, %eax
@@ -352,7 +352,7 @@ define i16 @select_pow2_diff_invert(i1 z
 
 define i32 @select_pow2_diff_neg(i1 zeroext %cond) {
 ; CHECK-LABEL: select_pow2_diff_neg:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    shlb $4, %dil
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    orl $-25, %eax
@@ -363,7 +363,7 @@ define i32 @select_pow2_diff_neg(i1 zero
 
 define i64 @select_pow2_diff_neg_invert(i1 zeroext %cond) {
 ; CHECK-LABEL: select_pow2_diff_neg_invert:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorb $1, %dil
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    shlq $7, %rax
@@ -377,11 +377,11 @@ define i64 @select_pow2_diff_neg_invert(
 
 define i8 @sel_67_neg125(i32 %x) {
 ; CHECK-LABEL: sel_67_neg125:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $42, %edi
 ; CHECK-NEXT:    movb $67, %al
 ; CHECK-NEXT:    jg .LBB31_2
-; CHECK-NEXT:  # BB#1:
+; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    movb $-125, %al
 ; CHECK-NEXT:  .LBB31_2:
 ; CHECK-NEXT:    retq
@@ -396,7 +396,7 @@ define i8 @sel_67_neg125(i32 %x) {
 
 define i32 @select_C1_C2(i1 %cond) {
 ; CHECK-LABEL: select_C1_C2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    movl $421, %ecx # imm = 0x1A5
 ; CHECK-NEXT:    movl $42, %eax
@@ -408,7 +408,7 @@ define i32 @select_C1_C2(i1 %cond) {
 
 define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
 ; CHECK-LABEL: select_C1_C2_zeroext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    movl $421, %ecx # imm = 0x1A5
 ; CHECK-NEXT:    movl $42, %eax
@@ -420,7 +420,7 @@ define i32 @select_C1_C2_zeroext(i1 zero
 
 define i32 @select_C1_C2_signext(i1 signext %cond) {
 ; CHECK-LABEL: select_C1_C2_signext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    movl $421, %ecx # imm = 0x1A5
 ; CHECK-NEXT:    movl $42, %eax
@@ -434,7 +434,7 @@ define i32 @select_C1_C2_signext(i1 sign
 
 define i64 @select_2_or_inc(i64 %x) {
 ; CHECK-LABEL: select_2_or_inc:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    leaq 1(%rdi), %rax
 ; CHECK-NEXT:    cmpq $2, %rdi
 ; CHECK-NEXT:    cmoveq %rdi, %rax
@@ -447,10 +447,10 @@ define i64 @select_2_or_inc(i64 %x) {
 
 define <4 x i32> @sel_constants_add_constant_vec(i1 %cond) {
 ; CHECK-LABEL: sel_constants_add_constant_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    jne .LBB36_1
-; CHECK-NEXT:  # BB#2:
+; CHECK-NEXT:  # %bb.2:
 ; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [12,13,14,15]
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB36_1:
@@ -463,10 +463,10 @@ define <4 x i32> @sel_constants_add_cons
 
 define <2 x double> @sel_constants_fmul_constant_vec(i1 %cond) {
 ; CHECK-LABEL: sel_constants_fmul_constant_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    jne .LBB37_1
-; CHECK-NEXT:  # BB#2:
+; CHECK-NEXT:  # %bb.2:
 ; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [1.188300e+02,3.454000e+01]
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB37_1:
@@ -482,7 +482,7 @@ define <2 x double> @sel_constants_fmul_
 
 define i64 @opaque_constant(i1 %cond, i64 %x) {
 ; CHECK-LABEL: opaque_constant:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    movl $23, %ecx
 ; CHECK-NEXT:    movq $-4, %rax

Modified: llvm/trunk/test/CodeGen/X86/setcc-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/setcc-combine.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/setcc-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/setcc-combine.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @test_eq_1(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_eq_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpgtd %xmm0, %xmm1
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
 ; CHECK-NEXT:    pxor %xmm1, %xmm0
@@ -20,7 +20,7 @@ define i32 @test_eq_1(<4 x i32> %A, <4 x
 
 define i32 @test_ne_1(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_ne_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpgtd %xmm0, %xmm1
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
 ; CHECK-NEXT:    movd %xmm0, %eax
@@ -35,7 +35,7 @@ define i32 @test_ne_1(<4 x i32> %A, <4 x
 
 define i32 @test_le_1(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_le_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl $-1, %eax
 ; CHECK-NEXT:    retq
   %cmp = icmp slt <4 x i32> %A, %B
@@ -48,7 +48,7 @@ define i32 @test_le_1(<4 x i32> %A, <4 x
 
 define i32 @test_ge_1(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_ge_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpgtd %xmm0, %xmm1
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
 ; CHECK-NEXT:    pxor %xmm1, %xmm0
@@ -65,7 +65,7 @@ define i32 @test_ge_1(<4 x i32> %A, <4 x
 
 define i32 @test_lt_1(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_lt_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpgtd %xmm0, %xmm1
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
 ; CHECK-NEXT:    movd %xmm0, %eax
@@ -80,7 +80,7 @@ define i32 @test_lt_1(<4 x i32> %A, <4 x
 
 define i32 @test_gt_1(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_gt_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    retq
   %cmp = icmp slt <4 x i32> %A, %B
@@ -93,7 +93,7 @@ define i32 @test_gt_1(<4 x i32> %A, <4 x
 
 define i32 @test_eq_2(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_eq_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
 ; CHECK-NEXT:    pxor %xmm0, %xmm1
@@ -110,7 +110,7 @@ define i32 @test_eq_2(<4 x i32> %A, <4 x
 
 define i32 @test_ne_2(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_ne_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; CHECK-NEXT:    movd %xmm0, %eax
@@ -125,7 +125,7 @@ define i32 @test_ne_2(<4 x i32> %A, <4 x
 
 define i32 @test_le_2(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_le_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
 ; CHECK-NEXT:    pxor %xmm0, %xmm1
@@ -142,7 +142,7 @@ define i32 @test_le_2(<4 x i32> %A, <4 x
 
 define i32 @test_ge_2(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_ge_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl $-1, %eax
 ; CHECK-NEXT:    retq
   %cmp = icmp slt <4 x i32> %B, %A
@@ -155,7 +155,7 @@ define i32 @test_ge_2(<4 x i32> %A, <4 x
 
 define i32 @test_lt_2(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_lt_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; CHECK-NEXT:    movd %xmm0, %eax
@@ -170,7 +170,7 @@ define i32 @test_lt_2(<4 x i32> %A, <4 x
 
 define i32 @test_gt_2(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-LABEL: test_gt_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; CHECK-NEXT:    movd %xmm0, %eax

Modified: llvm/trunk/test/CodeGen/X86/setcc-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/setcc-logic.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/setcc-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/setcc-logic.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: all_bits_clear:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    orl %esi, %edi
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    retq
@@ -15,7 +15,7 @@ define zeroext i1 @all_bits_clear(i32 %P
 
 define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: all_sign_bits_clear:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    orl %esi, %edi
 ; CHECK-NEXT:    setns %al
 ; CHECK-NEXT:    retq
@@ -27,7 +27,7 @@ define zeroext i1 @all_sign_bits_clear(i
 
 define zeroext i1 @all_bits_set(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: all_bits_set:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl %esi, %edi
 ; CHECK-NEXT:    cmpl $-1, %edi
 ; CHECK-NEXT:    sete %al
@@ -40,7 +40,7 @@ define zeroext i1 @all_bits_set(i32 %P,
 
 define zeroext i1 @all_sign_bits_set(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: all_sign_bits_set:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl %esi, %edi
 ; CHECK-NEXT:    shrl $31, %edi
 ; CHECK-NEXT:    movl %edi, %eax
@@ -53,7 +53,7 @@ define zeroext i1 @all_sign_bits_set(i32
 
 define zeroext i1 @any_bits_set(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: any_bits_set:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    orl %esi, %edi
 ; CHECK-NEXT:    setne %al
 ; CHECK-NEXT:    retq
@@ -65,7 +65,7 @@ define zeroext i1 @any_bits_set(i32 %P,
 
 define zeroext i1 @any_sign_bits_set(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: any_sign_bits_set:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    orl %esi, %edi
 ; CHECK-NEXT:    shrl $31, %edi
 ; CHECK-NEXT:    movl %edi, %eax
@@ -78,7 +78,7 @@ define zeroext i1 @any_sign_bits_set(i32
 
 define zeroext i1 @any_bits_clear(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: any_bits_clear:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl %esi, %edi
 ; CHECK-NEXT:    cmpl $-1, %edi
 ; CHECK-NEXT:    setne %al
@@ -91,7 +91,7 @@ define zeroext i1 @any_bits_clear(i32 %P
 
 define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: any_sign_bits_clear:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %esi, %edi
 ; CHECK-NEXT:    setns %al
 ; CHECK-NEXT:    retq
@@ -104,10 +104,10 @@ define zeroext i1 @any_sign_bits_clear(i
 ; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0
 define i32 @all_bits_clear_branch(i32* %P, i32* %Q) nounwind {
 ; CHECK-LABEL: all_bits_clear_branch:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orq %rsi, %rdi
 ; CHECK-NEXT:    jne .LBB8_2
-; CHECK-NEXT:  # BB#1: # %bb1
+; CHECK-NEXT:  # %bb.1: # %bb1
 ; CHECK-NEXT:    movl $4, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB8_2: # %return
@@ -128,13 +128,13 @@ return:
 
 define i32 @all_sign_bits_clear_branch(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: all_sign_bits_clear_branch:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    js .LBB9_3
-; CHECK-NEXT:  # BB#1: # %entry
+; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    testl %esi, %esi
 ; CHECK-NEXT:    js .LBB9_3
-; CHECK-NEXT:  # BB#2: # %bb1
+; CHECK-NEXT:  # %bb.2: # %bb1
 ; CHECK-NEXT:    movl $4, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB9_3: # %return
@@ -155,13 +155,13 @@ return:
 
 define i32 @all_bits_set_branch(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: all_bits_set_branch:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpl $-1, %edi
 ; CHECK-NEXT:    jne .LBB10_3
-; CHECK-NEXT:  # BB#1: # %entry
+; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    cmpl $-1, %esi
 ; CHECK-NEXT:    jne .LBB10_3
-; CHECK-NEXT:  # BB#2: # %bb1
+; CHECK-NEXT:  # %bb.2: # %bb1
 ; CHECK-NEXT:    movl $4, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB10_3: # %return
@@ -182,13 +182,13 @@ return:
 
 define i32 @all_sign_bits_set_branch(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: all_sign_bits_set_branch:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jns .LBB11_3
-; CHECK-NEXT:  # BB#1: # %entry
+; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    testl %esi, %esi
 ; CHECK-NEXT:    jns .LBB11_3
-; CHECK-NEXT:  # BB#2: # %bb1
+; CHECK-NEXT:  # %bb.2: # %bb1
 ; CHECK-NEXT:    movl $4, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB11_3: # %return
@@ -210,10 +210,10 @@ return:
 ; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0
 define i32 @any_bits_set_branch(i32* %P, i32* %Q) nounwind {
 ; CHECK-LABEL: any_bits_set_branch:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orq %rsi, %rdi
 ; CHECK-NEXT:    je .LBB12_2
-; CHECK-NEXT:  # BB#1: # %bb1
+; CHECK-NEXT:  # %bb.1: # %bb1
 ; CHECK-NEXT:    movl $4, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB12_2: # %return
@@ -234,13 +234,13 @@ return:
 
 define i32 @any_sign_bits_set_branch(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: any_sign_bits_set_branch:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    js .LBB13_2
-; CHECK-NEXT:  # BB#1: # %entry
+; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    testl %esi, %esi
 ; CHECK-NEXT:    js .LBB13_2
-; CHECK-NEXT:  # BB#3: # %return
+; CHECK-NEXT:  # %bb.3: # %return
 ; CHECK-NEXT:    movl $192, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB13_2: # %bb1
@@ -261,13 +261,13 @@ return:
 
 define i32 @any_bits_clear_branch(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: any_bits_clear_branch:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpl $-1, %edi
 ; CHECK-NEXT:    jne .LBB14_2
-; CHECK-NEXT:  # BB#1: # %entry
+; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    cmpl $-1, %esi
 ; CHECK-NEXT:    jne .LBB14_2
-; CHECK-NEXT:  # BB#3: # %return
+; CHECK-NEXT:  # %bb.3: # %return
 ; CHECK-NEXT:    movl $192, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB14_2: # %bb1
@@ -288,13 +288,13 @@ return:
 
 define i32 @any_sign_bits_clear_branch(i32 %P, i32 %Q) nounwind {
 ; CHECK-LABEL: any_sign_bits_clear_branch:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jns .LBB15_2
-; CHECK-NEXT:  # BB#1: # %entry
+; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    testl %esi, %esi
 ; CHECK-NEXT:    jns .LBB15_2
-; CHECK-NEXT:  # BB#3: # %return
+; CHECK-NEXT:  # %bb.3: # %return
 ; CHECK-NEXT:    movl $192, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB15_2: # %bb1
@@ -315,7 +315,7 @@ return:
 
 define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
 ; CHECK-LABEL: all_bits_clear_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    por %xmm1, %xmm0
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm0
@@ -328,7 +328,7 @@ define <4 x i1> @all_bits_clear_vec(<4 x
 
 define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
 ; CHECK-LABEL: all_sign_bits_clear_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    por %xmm1, %xmm0
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
 ; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
@@ -341,7 +341,7 @@ define <4 x i1> @all_sign_bits_clear_vec
 
 define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
 ; CHECK-LABEL: all_bits_set_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pand %xmm1, %xmm0
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm0
@@ -354,7 +354,7 @@ define <4 x i1> @all_bits_set_vec(<4 x i
 
 define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
 ; CHECK-LABEL: all_sign_bits_set_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pand %xmm1, %xmm0
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pcmpgtd %xmm0, %xmm1
@@ -368,7 +368,7 @@ define <4 x i1> @all_sign_bits_set_vec(<
 
 define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
 ; CHECK-LABEL: any_bits_set_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    por %xmm1, %xmm0
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm0
@@ -383,7 +383,7 @@ define <4 x i1> @any_bits_set_vec(<4 x i
 
 define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
 ; CHECK-LABEL: any_sign_bits_set_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    por %xmm1, %xmm0
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pcmpgtd %xmm0, %xmm1
@@ -397,7 +397,7 @@ define <4 x i1> @any_sign_bits_set_vec(<
 
 define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
 ; CHECK-LABEL: any_bits_clear_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pand %xmm1, %xmm0
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm0
@@ -411,7 +411,7 @@ define <4 x i1> @any_bits_clear_vec(<4 x
 
 define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
 ; CHECK-LABEL: any_sign_bits_clear_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pand %xmm1, %xmm0
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
 ; CHECK-NEXT:    pcmpgtd %xmm1, %xmm0
@@ -424,7 +424,7 @@ define <4 x i1> @any_sign_bits_clear_vec
 
 define zeroext i1 @ne_neg1_and_ne_zero(i64 %x) nounwind {
 ; CHECK-LABEL: ne_neg1_and_ne_zero:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    incq %rdi
 ; CHECK-NEXT:    cmpq $1, %rdi
 ; CHECK-NEXT:    seta %al
@@ -439,7 +439,7 @@ define zeroext i1 @ne_neg1_and_ne_zero(i
 
 define zeroext i1 @and_eq(i8 %a, i8 %b, i8 %c, i8 %d) nounwind {
 ; CHECK-LABEL: and_eq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %esi, %edi
 ; CHECK-NEXT:    xorl %ecx, %edx
 ; CHECK-NEXT:    orb %dl, %dil
@@ -453,7 +453,7 @@ define zeroext i1 @and_eq(i8 %a, i8 %b,
 
 define zeroext i1 @or_ne(i8 %a, i8 %b, i8 %c, i8 %d) nounwind {
 ; CHECK-LABEL: or_ne:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %esi, %edi
 ; CHECK-NEXT:    xorl %ecx, %edx
 ; CHECK-NEXT:    orb %dl, %dil
@@ -469,7 +469,7 @@ define zeroext i1 @or_ne(i8 %a, i8 %b, i
 
 define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) nounwind {
 ; CHECK-LABEL: and_eq_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm0
 ; CHECK-NEXT:    pcmpeqd %xmm3, %xmm2
 ; CHECK-NEXT:    pand %xmm2, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/setcc-lowering.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/setcc-lowering.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/setcc-lowering.ll (original)
+++ llvm/trunk/test/CodeGen/X86/setcc-lowering.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define <8 x i16> @pr25080(<8 x i32> %a) {
 ; AVX-LABEL: pr25080:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
@@ -22,7 +22,7 @@ define <8 x i16> @pr25080(<8 x i32> %a)
 ; AVX-NEXT:    retq
 ;
 ; KNL-32-LABEL: pr25080:
-; KNL-32:       # BB#0: # %entry
+; KNL-32:       # %bb.0: # %entry
 ; KNL-32-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; KNL-32-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [8388607,8388607,8388607,8388607,8388607,8388607,8388607,8388607]
 ; KNL-32-NEXT:    vptestnmd %zmm1, %zmm0, %k0
@@ -42,7 +42,7 @@ entry:
 
 define void @pr26232(i64 %a, <16 x i1> %b) {
 ; AVX-LABEL: pr26232:
-; AVX:       # BB#0: # %for_loop599.preheader
+; AVX:       # %bb.0: # %for_loop599.preheader
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX-NEXT:    .p2align 4, 0x90
@@ -60,11 +60,11 @@ define void @pr26232(i64 %a, <16 x i1> %
 ; AVX-NEXT:    vpmovmskb %xmm3, %eax
 ; AVX-NEXT:    testw %ax, %ax
 ; AVX-NEXT:    jne .LBB1_1
-; AVX-NEXT:  # BB#2: # %for_exit600
+; AVX-NEXT:  # %bb.2: # %for_exit600
 ; AVX-NEXT:    retq
 ;
 ; KNL-32-LABEL: pr26232:
-; KNL-32:       # BB#0: # %for_loop599.preheader
+; KNL-32:       # %bb.0: # %for_loop599.preheader
 ; KNL-32-NEXT:    pushl %esi
 ; KNL-32-NEXT:    .cfi_def_cfa_offset 8
 ; KNL-32-NEXT:    .cfi_offset %esi, -8
@@ -87,7 +87,7 @@ define void @pr26232(i64 %a, <16 x i1> %
 ; KNL-32-NEXT:    kmovw %k1, %esi
 ; KNL-32-NEXT:    testw %si, %si
 ; KNL-32-NEXT:    jne .LBB1_1
-; KNL-32-NEXT:  # BB#2: # %for_exit600
+; KNL-32-NEXT:  # %bb.2: # %for_exit600
 ; KNL-32-NEXT:    popl %esi
 ; KNL-32-NEXT:    retl
 allocas:

Modified: llvm/trunk/test/CodeGen/X86/setcc-narrowing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/setcc-narrowing.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/setcc-narrowing.ll (original)
+++ llvm/trunk/test/CodeGen/X86/setcc-narrowing.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i32 @t1() nounwind ssp {
 ; CHECK-LABEL: t1:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl $0, _t1.global
 ; CHECK-NEXT:    setne %al

Modified: llvm/trunk/test/CodeGen/X86/setcc-wide-types.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/setcc-wide-types.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/setcc-wide-types.ll (original)
+++ llvm/trunk/test/CodeGen/X86/setcc-wide-types.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i32 @ne_i128(<2 x i64> %x, <2 x i64> %y) {
 ; SSE2-LABEL: ne_i128:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pcmpeqb %xmm1, %xmm0
 ; SSE2-NEXT:    pmovmskb %xmm0, %ecx
 ; SSE2-NEXT:    xorl %eax, %eax
@@ -15,7 +15,7 @@ define i32 @ne_i128(<2 x i64> %x, <2 x i
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: ne_i128:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX2-NEXT:    xorl %eax, %eax
@@ -31,7 +31,7 @@ define i32 @ne_i128(<2 x i64> %x, <2 x i
 
 define i32 @eq_i128(<2 x i64> %x, <2 x i64> %y) {
 ; SSE2-LABEL: eq_i128:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pcmpeqb %xmm1, %xmm0
 ; SSE2-NEXT:    pmovmskb %xmm0, %ecx
 ; SSE2-NEXT:    xorl %eax, %eax
@@ -40,7 +40,7 @@ define i32 @eq_i128(<2 x i64> %x, <2 x i
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: eq_i128:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX2-NEXT:    xorl %eax, %eax
@@ -56,7 +56,7 @@ define i32 @eq_i128(<2 x i64> %x, <2 x i
 
 define i32 @ne_i256(<4 x i64> %x, <4 x i64> %y) {
 ; SSE2-LABEL: ne_i256:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm4, %rax
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
@@ -81,7 +81,7 @@ define i32 @ne_i256(<4 x i64> %x, <4 x i
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: ne_i256:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmovmskb %ymm0, %ecx
 ; AVX2-NEXT:    xorl %eax, %eax
@@ -98,7 +98,7 @@ define i32 @ne_i256(<4 x i64> %x, <4 x i
 
 define i32 @eq_i256(<4 x i64> %x, <4 x i64> %y) {
 ; SSE2-LABEL: eq_i256:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm4, %rax
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
@@ -123,7 +123,7 @@ define i32 @eq_i256(<4 x i64> %x, <4 x i
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: eq_i256:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmovmskb %ymm0, %ecx
 ; AVX2-NEXT:    xorl %eax, %eax

Modified: llvm/trunk/test/CodeGen/X86/setcc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/setcc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/setcc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/setcc.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define zeroext i16 @t1(i16 zeroext %x) nounwind readnone ssp {
 ; CHECK-LABEL: t1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl $26, %edi
 ; CHECK-NEXT:    seta %al
@@ -20,7 +20,7 @@ define zeroext i16 @t1(i16 zeroext %x) n
 
 define zeroext i16 @t2(i16 zeroext %x) nounwind readnone ssp {
 ; CHECK-LABEL: t2:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl $26, %edi
 ; CHECK-NEXT:    setb %al
@@ -33,7 +33,7 @@ define zeroext i16 @t2(i16 zeroext %x) n
 
 define i64 @t3(i64 %x) nounwind readnone ssp {
 ; CHECK-LABEL: t3:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpq $18, %rdi
 ; CHECK-NEXT:    setb %al
@@ -48,7 +48,7 @@ define i64 @t3(i64 %x) nounwind readnone
 
 define i32 @t4(i32 %a) {
 ; CHECK-LABEL: t4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq _v4@{{.*}}(%rip), %rax
 ; CHECK-NEXT:    cmpl $1, (%rax)
 ; CHECK-NEXT:    movw $1, %ax
@@ -67,7 +67,7 @@ define i32 @t4(i32 %a) {
 
 define i8 @t5(i32 %a) #0 {
 ; CHECK-LABEL: t5:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    setns %al
 ; CHECK-NEXT:    retq
@@ -79,7 +79,7 @@ define i8 @t5(i32 %a) #0 {
 
 define zeroext i1 @t6(i32 %a) #0 {
 ; CHECK-LABEL: t6:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    setns %al
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/sext-i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sext-i1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sext-i1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sext-i1.ll Mon Dec  4 09:18:51 2017
@@ -7,13 +7,13 @@
 
 define i32 @t1(i32 %x) nounwind readnone ssp {
 ; X32-LABEL: t1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpl $1, {{[0-9]+}}(%esp)
 ; X32-NEXT:    sbbl %eax, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpl $1, %edi
 ; X64-NEXT:    sbbl %eax, %eax
 ; X64-NEXT:    retq
@@ -24,13 +24,13 @@ define i32 @t1(i32 %x) nounwind readnone
 
 define i32 @t2(i32 %x) nounwind readnone ssp {
 ; X32-LABEL: t2:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpl $1, {{[0-9]+}}(%esp)
 ; X32-NEXT:    sbbl %eax, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpl $1, %edi
 ; X64-NEXT:    sbbl %eax, %eax
 ; X64-NEXT:    retq
@@ -41,7 +41,7 @@ define i32 @t2(i32 %x) nounwind readnone
 
 define i32 @t3() nounwind readonly {
 ; X32-LABEL: t3:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    cmpl $1, %eax
 ; X32-NEXT:    sbbl %eax, %eax
 ; X32-NEXT:    cmpl %eax, %eax
@@ -50,7 +50,7 @@ define i32 @t3() nounwind readonly {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t3:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    testl %eax, %eax
 ; X64-NEXT:    sete %al
@@ -76,7 +76,7 @@ if.end:
 
 define i32 @t4(i64 %x) nounwind readnone ssp {
 ; X32-LABEL: t4:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    orl {{[0-9]+}}(%esp), %ecx
@@ -85,7 +85,7 @@ define i32 @t4(i64 %x) nounwind readnone
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t4:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpq $1, %rdi
 ; X64-NEXT:    sbbl %eax, %eax
 ; X64-NEXT:    retq
@@ -96,14 +96,14 @@ define i32 @t4(i64 %x) nounwind readnone
 
 define i64 @t5(i32 %x) nounwind readnone ssp {
 ; X32-LABEL: t5:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpl $1, {{[0-9]+}}(%esp)
 ; X32-NEXT:    sbbl %eax, %eax
 ; X32-NEXT:    movl %eax, %edx
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t5:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpl $1, %edi
 ; X64-NEXT:    sbbq %rax, %rax
 ; X64-NEXT:    retq
@@ -116,14 +116,14 @@ define i64 @t5(i32 %x) nounwind readnone
 
 define i32 @select_0_or_1s(i1 %cond) {
 ; X32-LABEL: select_0_or_1s:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andl $1, %eax
 ; X32-NEXT:    decl %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: select_0_or_1s:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    andl $1, %edi
 ; X64-NEXT:    leal -1(%rdi), %eax
@@ -137,13 +137,13 @@ define i32 @select_0_or_1s(i1 %cond) {
 
 define i32 @select_0_or_1s_zeroext(i1 zeroext %cond) {
 ; X32-LABEL: select_0_or_1s_zeroext:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    decl %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: select_0_or_1s_zeroext:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    leal -1(%rdi), %eax
 ; X64-NEXT:    retq
@@ -156,7 +156,7 @@ define i32 @select_0_or_1s_zeroext(i1 ze
 
 define i32 @select_0_or_1s_signext(i1 signext %cond) {
 ; X32-LABEL: select_0_or_1s_signext:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    andb $1, %al
 ; X32-NEXT:    movzbl %al, %eax
@@ -164,7 +164,7 @@ define i32 @select_0_or_1s_signext(i1 si
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: select_0_or_1s_signext:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    notl %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/sext-setcc-self.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sext-setcc-self.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sext-setcc-self.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sext-setcc-self.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <4 x i32> @test_ueq(<4 x float> %in) {
 ; CHECK-LABEL: test_ueq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t0 = fcmp ueq <4 x float> %in, %in
@@ -13,7 +13,7 @@ define <4 x i32> @test_ueq(<4 x float> %
 
 define <4 x i32> @test_uge(<4 x float> %in) {
 ; CHECK-LABEL: test_uge:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t0 = fcmp uge <4 x float> %in, %in
@@ -23,7 +23,7 @@ define <4 x i32> @test_uge(<4 x float> %
 
 define <4 x i32> @test_ule(<4 x float> %in) {
 ; CHECK-LABEL: test_ule:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t0 = fcmp ule <4 x float> %in, %in
@@ -33,7 +33,7 @@ define <4 x i32> @test_ule(<4 x float> %
 
 define <4 x i32> @test_one(<4 x float> %in) {
 ; CHECK-LABEL: test_one:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t0 = fcmp one <4 x float> %in, %in
@@ -43,7 +43,7 @@ define <4 x i32> @test_one(<4 x float> %
 
 define <4 x i32> @test_ogt(<4 x float> %in) {
 ; CHECK-LABEL: test_ogt:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t0 = fcmp ogt <4 x float> %in, %in
@@ -53,7 +53,7 @@ define <4 x i32> @test_ogt(<4 x float> %
 
 define <4 x i32> @test_olt(<4 x float> %in) {
 ; CHECK-LABEL: test_olt:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t0 = fcmp olt <4 x float> %in, %in

Modified: llvm/trunk/test/CodeGen/X86/sha-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sha-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sha-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sha-schedule.ll Mon Dec  4 09:18:51 2017
@@ -10,25 +10,25 @@
 
 define <4 x i32> @test_sha1msg1(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
 ; GENERIC-LABEL: test_sha1msg1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    sha1msg1 %xmm1, %xmm0
 ; GENERIC-NEXT:    sha1msg1 (%rdi), %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; GOLDMONT-LABEL: test_sha1msg1:
-; GOLDMONT:       # BB#0:
+; GOLDMONT:       # %bb.0:
 ; GOLDMONT-NEXT:    sha1msg1 %xmm1, %xmm0
 ; GOLDMONT-NEXT:    sha1msg1 (%rdi), %xmm0
 ; GOLDMONT-NEXT:    retq # sched: [4:1.00]
 ;
 ; CANNONLAKE-LABEL: test_sha1msg1:
-; CANNONLAKE:       # BB#0:
+; CANNONLAKE:       # %bb.0:
 ; CANNONLAKE-NEXT:    sha1msg1 %xmm1, %xmm0
 ; CANNONLAKE-NEXT:    sha1msg1 (%rdi), %xmm0
 ; CANNONLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_sha1msg1:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    sha1msg1 %xmm1, %xmm0 # sched: [2:1.00]
 ; ZNVER1-NEXT:    sha1msg1 (%rdi), %xmm0 # sched: [9:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -41,25 +41,25 @@ declare <4 x i32> @llvm.x86.sha1msg1(<4
 
 define <4 x i32> @test_sha1msg2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
 ; GENERIC-LABEL: test_sha1msg2:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    sha1msg2 %xmm1, %xmm0
 ; GENERIC-NEXT:    sha1msg2 (%rdi), %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; GOLDMONT-LABEL: test_sha1msg2:
-; GOLDMONT:       # BB#0:
+; GOLDMONT:       # %bb.0:
 ; GOLDMONT-NEXT:    sha1msg2 %xmm1, %xmm0
 ; GOLDMONT-NEXT:    sha1msg2 (%rdi), %xmm0
 ; GOLDMONT-NEXT:    retq # sched: [4:1.00]
 ;
 ; CANNONLAKE-LABEL: test_sha1msg2:
-; CANNONLAKE:       # BB#0:
+; CANNONLAKE:       # %bb.0:
 ; CANNONLAKE-NEXT:    sha1msg2 %xmm1, %xmm0
 ; CANNONLAKE-NEXT:    sha1msg2 (%rdi), %xmm0
 ; CANNONLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_sha1msg2:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    sha1msg2 %xmm1, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    sha1msg2 (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -72,25 +72,25 @@ declare <4 x i32> @llvm.x86.sha1msg2(<4
 
 define <4 x i32> @test_sha1nexte(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
 ; GENERIC-LABEL: test_sha1nexte:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    sha1nexte %xmm1, %xmm0
 ; GENERIC-NEXT:    sha1nexte (%rdi), %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; GOLDMONT-LABEL: test_sha1nexte:
-; GOLDMONT:       # BB#0:
+; GOLDMONT:       # %bb.0:
 ; GOLDMONT-NEXT:    sha1nexte %xmm1, %xmm0
 ; GOLDMONT-NEXT:    sha1nexte (%rdi), %xmm0
 ; GOLDMONT-NEXT:    retq # sched: [4:1.00]
 ;
 ; CANNONLAKE-LABEL: test_sha1nexte:
-; CANNONLAKE:       # BB#0:
+; CANNONLAKE:       # %bb.0:
 ; CANNONLAKE-NEXT:    sha1nexte %xmm1, %xmm0
 ; CANNONLAKE-NEXT:    sha1nexte (%rdi), %xmm0
 ; CANNONLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_sha1nexte:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    sha1nexte %xmm1, %xmm0 # sched: [1:1.00]
 ; ZNVER1-NEXT:    sha1nexte (%rdi), %xmm0 # sched: [8:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -103,25 +103,25 @@ declare <4 x i32> @llvm.x86.sha1nexte(<4
 
 define <4 x i32> @test_sha1rnds4(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
 ; GENERIC-LABEL: test_sha1rnds4:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    sha1rnds4 $3, %xmm1, %xmm0
 ; GENERIC-NEXT:    sha1rnds4 $3, (%rdi), %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; GOLDMONT-LABEL: test_sha1rnds4:
-; GOLDMONT:       # BB#0:
+; GOLDMONT:       # %bb.0:
 ; GOLDMONT-NEXT:    sha1rnds4 $3, %xmm1, %xmm0
 ; GOLDMONT-NEXT:    sha1rnds4 $3, (%rdi), %xmm0
 ; GOLDMONT-NEXT:    retq # sched: [4:1.00]
 ;
 ; CANNONLAKE-LABEL: test_sha1rnds4:
-; CANNONLAKE:       # BB#0:
+; CANNONLAKE:       # %bb.0:
 ; CANNONLAKE-NEXT:    sha1rnds4 $3, %xmm1, %xmm0
 ; CANNONLAKE-NEXT:    sha1rnds4 $3, (%rdi), %xmm0
 ; CANNONLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_sha1rnds4:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    sha1rnds4 $3, %xmm1, %xmm0 # sched: [6:1.00]
 ; ZNVER1-NEXT:    sha1rnds4 $3, (%rdi), %xmm0 # sched: [13:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -138,25 +138,25 @@ declare <4 x i32> @llvm.x86.sha1rnds4(<4
 
 define <4 x i32> @test_sha256msg1(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
 ; GENERIC-LABEL: test_sha256msg1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    sha256msg1 %xmm1, %xmm0
 ; GENERIC-NEXT:    sha256msg1 (%rdi), %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; GOLDMONT-LABEL: test_sha256msg1:
-; GOLDMONT:       # BB#0:
+; GOLDMONT:       # %bb.0:
 ; GOLDMONT-NEXT:    sha256msg1 %xmm1, %xmm0
 ; GOLDMONT-NEXT:    sha256msg1 (%rdi), %xmm0
 ; GOLDMONT-NEXT:    retq # sched: [4:1.00]
 ;
 ; CANNONLAKE-LABEL: test_sha256msg1:
-; CANNONLAKE:       # BB#0:
+; CANNONLAKE:       # %bb.0:
 ; CANNONLAKE-NEXT:    sha256msg1 %xmm1, %xmm0
 ; CANNONLAKE-NEXT:    sha256msg1 (%rdi), %xmm0
 ; CANNONLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_sha256msg1:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    sha256msg1 %xmm1, %xmm0 # sched: [2:1.00]
 ; ZNVER1-NEXT:    sha256msg1 (%rdi), %xmm0 # sched: [9:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -169,25 +169,25 @@ declare <4 x i32> @llvm.x86.sha256msg1(<
 
 define <4 x i32> @test_sha256msg2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
 ; GENERIC-LABEL: test_sha256msg2:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    sha256msg2 %xmm1, %xmm0
 ; GENERIC-NEXT:    sha256msg2 (%rdi), %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; GOLDMONT-LABEL: test_sha256msg2:
-; GOLDMONT:       # BB#0:
+; GOLDMONT:       # %bb.0:
 ; GOLDMONT-NEXT:    sha256msg2 %xmm1, %xmm0
 ; GOLDMONT-NEXT:    sha256msg2 (%rdi), %xmm0
 ; GOLDMONT-NEXT:    retq # sched: [4:1.00]
 ;
 ; CANNONLAKE-LABEL: test_sha256msg2:
-; CANNONLAKE:       # BB#0:
+; CANNONLAKE:       # %bb.0:
 ; CANNONLAKE-NEXT:    sha256msg2 %xmm1, %xmm0
 ; CANNONLAKE-NEXT:    sha256msg2 (%rdi), %xmm0
 ; CANNONLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_sha256msg2:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    sha256msg2 %xmm1, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    sha256msg2 (%rdi), %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -200,7 +200,7 @@ declare <4 x i32> @llvm.x86.sha256msg2(<
 
 define <4 x i32> @test_sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> *%a3) {
 ; GENERIC-LABEL: test_sha256rnds2:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movaps %xmm0, %xmm3 # sched: [1:1.00]
 ; GENERIC-NEXT:    movaps %xmm2, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    sha256rnds2 %xmm0, %xmm1, %xmm3
@@ -209,7 +209,7 @@ define <4 x i32> @test_sha256rnds2(<4 x
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; GOLDMONT-LABEL: test_sha256rnds2:
-; GOLDMONT:       # BB#0:
+; GOLDMONT:       # %bb.0:
 ; GOLDMONT-NEXT:    movaps %xmm0, %xmm3 # sched: [1:1.00]
 ; GOLDMONT-NEXT:    movaps %xmm2, %xmm0 # sched: [1:1.00]
 ; GOLDMONT-NEXT:    sha256rnds2 %xmm0, %xmm1, %xmm3
@@ -218,7 +218,7 @@ define <4 x i32> @test_sha256rnds2(<4 x
 ; GOLDMONT-NEXT:    retq # sched: [4:1.00]
 ;
 ; CANNONLAKE-LABEL: test_sha256rnds2:
-; CANNONLAKE:       # BB#0:
+; CANNONLAKE:       # %bb.0:
 ; CANNONLAKE-NEXT:    vmovaps %xmm0, %xmm3 # sched: [1:1.00]
 ; CANNONLAKE-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:1.00]
 ; CANNONLAKE-NEXT:    sha256rnds2 %xmm0, %xmm1, %xmm3
@@ -227,7 +227,7 @@ define <4 x i32> @test_sha256rnds2(<4 x
 ; CANNONLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_sha256rnds2:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovaps %xmm0, %xmm3 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    sha256rnds2 %xmm0, %xmm1, %xmm3 # sched: [4:1.00]

Modified: llvm/trunk/test/CodeGen/X86/shift-and.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-and.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-and.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-and.ll Mon Dec  4 09:18:51 2017
@@ -4,14 +4,14 @@
 
 define i32 @t1(i32 %t, i32 %val) nounwind {
 ; X32-LABEL: t1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    shll %cl, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %ecx
 ; X64-NEXT:    shll %cl, %esi
 ; X64-NEXT:    movl %esi, %eax
@@ -23,14 +23,14 @@ define i32 @t1(i32 %t, i32 %val) nounwin
 
 define i32 @t2(i32 %t, i32 %val) nounwind {
 ; X32-LABEL: t2:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    shll %cl, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %ecx
 ; X64-NEXT:    shll %cl, %esi
 ; X64-NEXT:    movl %esi, %eax
@@ -44,13 +44,13 @@ define i32 @t2(i32 %t, i32 %val) nounwin
 
 define void @t3(i16 %t) nounwind {
 ; X32-LABEL: t3:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X32-NEXT:    sarw %cl, X
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t3:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %ecx
 ; X64-NEXT:    sarw %cl, {{.*}}(%rip)
 ; X64-NEXT:    retq
@@ -63,7 +63,7 @@ define void @t3(i16 %t) nounwind {
 
 define i64 @t4(i64 %t, i64 %val) nounwind {
 ; X32-LABEL: t4:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esi
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -73,7 +73,7 @@ define i64 @t4(i64 %t, i64 %val) nounwin
 ; X32-NEXT:    shrdl %cl, %esi, %eax
 ; X32-NEXT:    testb $32, %cl
 ; X32-NEXT:    je .LBB3_2
-; X32-NEXT:  # BB#1:
+; X32-NEXT:  # %bb.1:
 ; X32-NEXT:    movl %edx, %eax
 ; X32-NEXT:    xorl %edx, %edx
 ; X32-NEXT:  .LBB3_2:
@@ -81,7 +81,7 @@ define i64 @t4(i64 %t, i64 %val) nounwin
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t4:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %ecx
 ; X64-NEXT:    shrq %cl, %rsi
 ; X64-NEXT:    movq %rsi, %rax
@@ -93,7 +93,7 @@ define i64 @t4(i64 %t, i64 %val) nounwin
 
 define i64 @t5(i64 %t, i64 %val) nounwind {
 ; X32-LABEL: t5:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esi
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -103,7 +103,7 @@ define i64 @t5(i64 %t, i64 %val) nounwin
 ; X32-NEXT:    shrdl %cl, %esi, %eax
 ; X32-NEXT:    testb $32, %cl
 ; X32-NEXT:    je .LBB4_2
-; X32-NEXT:  # BB#1:
+; X32-NEXT:  # %bb.1:
 ; X32-NEXT:    movl %edx, %eax
 ; X32-NEXT:    xorl %edx, %edx
 ; X32-NEXT:  .LBB4_2:
@@ -111,7 +111,7 @@ define i64 @t5(i64 %t, i64 %val) nounwin
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t5:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %ecx
 ; X64-NEXT:    shrq %cl, %rsi
 ; X64-NEXT:    movq %rsi, %rax
@@ -123,7 +123,7 @@ define i64 @t5(i64 %t, i64 %val) nounwin
 
 define void @t5ptr(i64 %t, i64* %ptr) nounwind {
 ; X32-LABEL: t5ptr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %edi
 ; X32-NEXT:    pushl %esi
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
@@ -135,7 +135,7 @@ define void @t5ptr(i64 %t, i64* %ptr) no
 ; X32-NEXT:    shrdl %cl, %edi, %edx
 ; X32-NEXT:    testb $32, %cl
 ; X32-NEXT:    je .LBB5_2
-; X32-NEXT:  # BB#1:
+; X32-NEXT:  # %bb.1:
 ; X32-NEXT:    movl %esi, %edx
 ; X32-NEXT:    xorl %esi, %esi
 ; X32-NEXT:  .LBB5_2:
@@ -146,7 +146,7 @@ define void @t5ptr(i64 %t, i64* %ptr) no
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t5ptr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %ecx
 ; X64-NEXT:    shrq %cl, (%rsi)
 ; X64-NEXT:    retq
@@ -161,7 +161,7 @@ define void @t5ptr(i64 %t, i64* %ptr) no
 ; rdar://11866926
 define i64 @t6(i64 %key, i64* nocapture %val) nounwind {
 ; X32-LABEL: t6:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %edi
 ; X32-NEXT:    pushl %esi
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -181,7 +181,7 @@ define i64 @t6(i64 %key, i64* nocapture
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: t6:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shrq $3, %rdi
 ; X64-NEXT:    movq (%rsi), %rax
 ; X64-NEXT:    decq %rax
@@ -196,7 +196,7 @@ define i64 @t6(i64 %key, i64* nocapture
 
 define i64 @big_mask_constant(i64 %x) nounwind {
 ; X32-LABEL: big_mask_constant:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andl $4, %eax
 ; X32-NEXT:    shll $25, %eax
@@ -204,7 +204,7 @@ define i64 @big_mask_constant(i64 %x) no
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: big_mask_constant:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shrq $7, %rdi
 ; X64-NEXT:    andl $134217728, %edi # imm = 0x8000000
 ; X64-NEXT:    movq %rdi, %rax

Modified: llvm/trunk/test/CodeGen/X86/shift-bmi2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-bmi2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-bmi2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-bmi2.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define i32 @shl32(i32 %x, i32 %shamt) nounwind uwtable readnone {
 ; BMI2-LABEL: shl32:
-; BMI2:       # BB#0:
+; BMI2:       # %bb.0:
 ; BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; BMI2-NEXT:    shlxl %eax, {{[0-9]+}}(%esp), %eax
 ; BMI2-NEXT:    retl
 ;
 ; BMI264-LABEL: shl32:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shlxl %esi, %edi, %eax
 ; BMI264-NEXT:    retq
   %shl = shl i32 %x, %shamt
@@ -19,13 +19,13 @@ define i32 @shl32(i32 %x, i32 %shamt) no
 
 define i32 @shl32i(i32 %x) nounwind uwtable readnone {
 ; BMI2-LABEL: shl32i:
-; BMI2:       # BB#0:
+; BMI2:       # %bb.0:
 ; BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BMI2-NEXT:    shll $5, %eax
 ; BMI2-NEXT:    retl
 ;
 ; BMI264-LABEL: shl32i:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shll $5, %edi
 ; BMI264-NEXT:    movl %edi, %eax
 ; BMI264-NEXT:    retq
@@ -35,14 +35,14 @@ define i32 @shl32i(i32 %x) nounwind uwta
 
 define i32 @shl32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
 ; BMI2-LABEL: shl32p:
-; BMI2:       # BB#0:
+; BMI2:       # %bb.0:
 ; BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; BMI2-NEXT:    shlxl %ecx, (%eax), %eax
 ; BMI2-NEXT:    retl
 ;
 ; BMI264-LABEL: shl32p:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shlxl %esi, (%rdi), %eax
 ; BMI264-NEXT:    retq
   %x = load i32, i32* %p
@@ -52,14 +52,14 @@ define i32 @shl32p(i32* %p, i32 %shamt)
 
 define i32 @shl32pi(i32* %p) nounwind uwtable readnone {
 ; BMI2-LABEL: shl32pi:
-; BMI2:       # BB#0:
+; BMI2:       # %bb.0:
 ; BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BMI2-NEXT:    movl (%eax), %eax
 ; BMI2-NEXT:    shll $5, %eax
 ; BMI2-NEXT:    retl
 ;
 ; BMI264-LABEL: shl32pi:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    movl (%rdi), %eax
 ; BMI264-NEXT:    shll $5, %eax
 ; BMI264-NEXT:    retq
@@ -70,7 +70,7 @@ define i32 @shl32pi(i32* %p) nounwind uw
 
 define i64 @shl64(i64 %x, i64 %shamt) nounwind uwtable readnone {
 ; BMI264-LABEL: shl64:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shlxq %rsi, %rdi, %rax
 ; BMI264-NEXT:    retq
   %shl = shl i64 %x, %shamt
@@ -79,7 +79,7 @@ define i64 @shl64(i64 %x, i64 %shamt) no
 
 define i64 @shl64i(i64 %x) nounwind uwtable readnone {
 ; BMI264-LABEL: shl64i:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shlq $7, %rdi
 ; BMI264-NEXT:    movq %rdi, %rax
 ; BMI264-NEXT:    retq
@@ -89,7 +89,7 @@ define i64 @shl64i(i64 %x) nounwind uwta
 
 define i64 @shl64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
 ; BMI264-LABEL: shl64p:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shlxq %rsi, (%rdi), %rax
 ; BMI264-NEXT:    retq
   %x = load i64, i64* %p
@@ -99,7 +99,7 @@ define i64 @shl64p(i64* %p, i64 %shamt)
 
 define i64 @shl64pi(i64* %p) nounwind uwtable readnone {
 ; BMI264-LABEL: shl64pi:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    movq (%rdi), %rax
 ; BMI264-NEXT:    shlq $7, %rax
 ; BMI264-NEXT:    retq
@@ -110,13 +110,13 @@ define i64 @shl64pi(i64* %p) nounwind uw
 
 define i32 @lshr32(i32 %x, i32 %shamt) nounwind uwtable readnone {
 ; BMI2-LABEL: lshr32:
-; BMI2:       # BB#0:
+; BMI2:       # %bb.0:
 ; BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; BMI2-NEXT:    shrxl %eax, {{[0-9]+}}(%esp), %eax
 ; BMI2-NEXT:    retl
 ;
 ; BMI264-LABEL: lshr32:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shrxl %esi, %edi, %eax
 ; BMI264-NEXT:    retq
   %shl = lshr i32 %x, %shamt
@@ -125,14 +125,14 @@ define i32 @lshr32(i32 %x, i32 %shamt) n
 
 define i32 @lshr32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
 ; BMI2-LABEL: lshr32p:
-; BMI2:       # BB#0:
+; BMI2:       # %bb.0:
 ; BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; BMI2-NEXT:    shrxl %ecx, (%eax), %eax
 ; BMI2-NEXT:    retl
 ;
 ; BMI264-LABEL: lshr32p:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shrxl %esi, (%rdi), %eax
 ; BMI264-NEXT:    retq
   %x = load i32, i32* %p
@@ -142,7 +142,7 @@ define i32 @lshr32p(i32* %p, i32 %shamt)
 
 define i64 @lshr64(i64 %x, i64 %shamt) nounwind uwtable readnone {
 ; BMI264-LABEL: lshr64:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shrxq %rsi, %rdi, %rax
 ; BMI264-NEXT:    retq
   %shl = lshr i64 %x, %shamt
@@ -151,7 +151,7 @@ define i64 @lshr64(i64 %x, i64 %shamt) n
 
 define i64 @lshr64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
 ; BMI264-LABEL: lshr64p:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shrxq %rsi, (%rdi), %rax
 ; BMI264-NEXT:    retq
   %x = load i64, i64* %p
@@ -161,13 +161,13 @@ define i64 @lshr64p(i64* %p, i64 %shamt)
 
 define i32 @ashr32(i32 %x, i32 %shamt) nounwind uwtable readnone {
 ; BMI2-LABEL: ashr32:
-; BMI2:       # BB#0:
+; BMI2:       # %bb.0:
 ; BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; BMI2-NEXT:    sarxl %eax, {{[0-9]+}}(%esp), %eax
 ; BMI2-NEXT:    retl
 ;
 ; BMI264-LABEL: ashr32:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    sarxl %esi, %edi, %eax
 ; BMI264-NEXT:    retq
   %shl = ashr i32 %x, %shamt
@@ -176,14 +176,14 @@ define i32 @ashr32(i32 %x, i32 %shamt) n
 
 define i32 @ashr32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
 ; BMI2-LABEL: ashr32p:
-; BMI2:       # BB#0:
+; BMI2:       # %bb.0:
 ; BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; BMI2-NEXT:    sarxl %ecx, (%eax), %eax
 ; BMI2-NEXT:    retl
 ;
 ; BMI264-LABEL: ashr32p:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    sarxl %esi, (%rdi), %eax
 ; BMI264-NEXT:    retq
   %x = load i32, i32* %p
@@ -193,7 +193,7 @@ define i32 @ashr32p(i32* %p, i32 %shamt)
 
 define i64 @ashr64(i64 %x, i64 %shamt) nounwind uwtable readnone {
 ; BMI264-LABEL: ashr64:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    sarxq %rsi, %rdi, %rax
 ; BMI264-NEXT:    retq
   %shl = ashr i64 %x, %shamt
@@ -202,7 +202,7 @@ define i64 @ashr64(i64 %x, i64 %shamt) n
 
 define i64 @ashr64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
 ; BMI264-LABEL: ashr64p:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    sarxq %rsi, (%rdi), %rax
 ; BMI264-NEXT:    retq
   %x = load i64, i64* %p
@@ -212,13 +212,13 @@ define i64 @ashr64p(i64* %p, i64 %shamt)
 
 define i32 @shl32and(i32 %t, i32 %val) nounwind {
 ; BMI2-LABEL: shl32and:
-; BMI2:       # BB#0:
+; BMI2:       # %bb.0:
 ; BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; BMI2-NEXT:    shlxl %eax, {{[0-9]+}}(%esp), %eax
 ; BMI2-NEXT:    retl
 ;
 ; BMI264-LABEL: shl32and:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shlxl %edi, %esi, %eax
 ; BMI264-NEXT:    retq
   %shamt = and i32 %t, 31
@@ -228,7 +228,7 @@ define i32 @shl32and(i32 %t, i32 %val) n
 
 define i64 @shl64and(i64 %t, i64 %val) nounwind {
 ; BMI264-LABEL: shl64and:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shlxq %rdi, %rsi, %rax
 ; BMI264-NEXT:    retq
   %shamt = and i64 %t, 63
@@ -238,13 +238,13 @@ define i64 @shl64and(i64 %t, i64 %val) n
 
 define i32 @lshr32and(i32 %t, i32 %val) nounwind {
 ; BMI2-LABEL: lshr32and:
-; BMI2:       # BB#0:
+; BMI2:       # %bb.0:
 ; BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; BMI2-NEXT:    shrxl %eax, {{[0-9]+}}(%esp), %eax
 ; BMI2-NEXT:    retl
 ;
 ; BMI264-LABEL: lshr32and:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shrxl %edi, %esi, %eax
 ; BMI264-NEXT:    retq
   %shamt = and i32 %t, 31
@@ -254,7 +254,7 @@ define i32 @lshr32and(i32 %t, i32 %val)
 
 define i64 @lshr64and(i64 %t, i64 %val) nounwind {
 ; BMI264-LABEL: lshr64and:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    shrxq %rdi, %rsi, %rax
 ; BMI264-NEXT:    retq
   %shamt = and i64 %t, 63
@@ -264,13 +264,13 @@ define i64 @lshr64and(i64 %t, i64 %val)
 
 define i32 @ashr32and(i32 %t, i32 %val) nounwind {
 ; BMI2-LABEL: ashr32and:
-; BMI2:       # BB#0:
+; BMI2:       # %bb.0:
 ; BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; BMI2-NEXT:    sarxl %eax, {{[0-9]+}}(%esp), %eax
 ; BMI2-NEXT:    retl
 ;
 ; BMI264-LABEL: ashr32and:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    sarxl %edi, %esi, %eax
 ; BMI264-NEXT:    retq
   %shamt = and i32 %t, 31
@@ -280,7 +280,7 @@ define i32 @ashr32and(i32 %t, i32 %val)
 
 define i64 @ashr64and(i64 %t, i64 %val) nounwind {
 ; BMI264-LABEL: ashr64and:
-; BMI264:       # BB#0:
+; BMI264:       # %bb.0:
 ; BMI264-NEXT:    sarxq %rdi, %rsi, %rax
 ; BMI264-NEXT:    retq
   %shamt = and i64 %t, 63

Modified: llvm/trunk/test/CodeGen/X86/shift-codegen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-codegen.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-codegen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-codegen.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@ target triple = "i686-apple-darwin8"
 
 define void @fn1() {
 ; CHECK-LABEL: fn1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl Y, %eax
 ; CHECK-NEXT:    shll $3, %eax
 ; CHECK-NEXT:    orl %eax, X
@@ -24,7 +24,7 @@ define void @fn1() {
 
 define i32 @fn2(i32 %X, i32 %Y) {
 ; CHECK-LABEL: fn2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    shll $3, %eax
 ; CHECK-NEXT:    orl {{[0-9]+}}(%esp), %eax

Modified: llvm/trunk/test/CodeGen/X86/shift-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-combine.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-combine.ll Mon Dec  4 09:18:51 2017
@@ -6,14 +6,14 @@
 
 define i32 @test_lshr_and(i32 %x) {
 ; X32-LABEL: test_lshr_and:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andl $12, %eax
 ; X32-NEXT:    movl array(%eax), %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_lshr_and:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    shrl $2, %edi
 ; X64-NEXT:    andl $3, %edi
@@ -28,7 +28,7 @@ define i32 @test_lshr_and(i32 %x) {
 
 define i32* @test_exact1(i32 %a, i32 %b, i32* %x)  {
 ; X32-LABEL: test_exact1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    subl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    sarl %eax
@@ -36,7 +36,7 @@ define i32* @test_exact1(i32 %a, i32 %b,
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_exact1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subl %edi, %esi
 ; X64-NEXT:    sarl $3, %esi
 ; X64-NEXT:    movslq %esi, %rax
@@ -50,7 +50,7 @@ define i32* @test_exact1(i32 %a, i32 %b,
 
 define i32* @test_exact2(i32 %a, i32 %b, i32* %x)  {
 ; X32-LABEL: test_exact2:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    subl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    sarl %eax
@@ -58,7 +58,7 @@ define i32* @test_exact2(i32 %a, i32 %b,
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_exact2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subl %edi, %esi
 ; X64-NEXT:    sarl $3, %esi
 ; X64-NEXT:    movslq %esi, %rax
@@ -72,14 +72,14 @@ define i32* @test_exact2(i32 %a, i32 %b,
 
 define i32* @test_exact3(i32 %a, i32 %b, i32* %x)  {
 ; X32-LABEL: test_exact3:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    subl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_exact3:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subl %edi, %esi
 ; X64-NEXT:    sarl $2, %esi
 ; X64-NEXT:    movslq %esi, %rax
@@ -93,7 +93,7 @@ define i32* @test_exact3(i32 %a, i32 %b,
 
 define i32* @test_exact4(i32 %a, i32 %b, i32* %x)  {
 ; X32-LABEL: test_exact4:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    subl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    shrl %eax
@@ -101,7 +101,7 @@ define i32* @test_exact4(i32 %a, i32 %b,
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_exact4:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    subl %edi, %esi
 ; X64-NEXT:    shrl $3, %esi
@@ -115,7 +115,7 @@ define i32* @test_exact4(i32 %a, i32 %b,
 
 define i32* @test_exact5(i32 %a, i32 %b, i32* %x)  {
 ; X32-LABEL: test_exact5:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    subl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    shrl %eax
@@ -123,7 +123,7 @@ define i32* @test_exact5(i32 %a, i32 %b,
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_exact5:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    subl %edi, %esi
 ; X64-NEXT:    shrl $3, %esi
@@ -137,14 +137,14 @@ define i32* @test_exact5(i32 %a, i32 %b,
 
 define i32* @test_exact6(i32 %a, i32 %b, i32* %x)  {
 ; X32-LABEL: test_exact6:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    subl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_exact6:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    subl %edi, %esi
 ; X64-NEXT:    leaq (%rsi,%rdx), %rax

Modified: llvm/trunk/test/CodeGen/X86/shift-double-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-double-x86_64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-double-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-double-x86_64.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define i64 @test1(i64 %hi, i64 %lo, i64 %bits) nounwind {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $63, %edx
 ; CHECK-NEXT:    movl %edx, %ecx
 ; CHECK-NEXT:    shldq %cl, %rsi, %rdi
@@ -21,7 +21,7 @@ define i64 @test1(i64 %hi, i64 %lo, i64
 
 define i64 @test2(i64 %hi, i64 %lo, i64 %bits) nounwind {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $63, %edx
 ; CHECK-NEXT:    movl %edx, %ecx
 ; CHECK-NEXT:    shrdq %cl, %rdi, %rsi
@@ -37,7 +37,7 @@ define i64 @test2(i64 %hi, i64 %lo, i64
 
 define i64 @test3(i64 %hi, i64 %lo, i64 %bits) nounwind {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edx, %ecx
 ; CHECK-NEXT:    shldq %cl, %rsi, %rdi
 ; CHECK-NEXT:    movq %rdi, %rax
@@ -51,7 +51,7 @@ define i64 @test3(i64 %hi, i64 %lo, i64
 
 define i64 @test4(i64 %hi, i64 %lo, i64 %bits) nounwind {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edx, %ecx
 ; CHECK-NEXT:    shrdq %cl, %rdi, %rsi
 ; CHECK-NEXT:    movq %rsi, %rax
@@ -65,7 +65,7 @@ define i64 @test4(i64 %hi, i64 %lo, i64
 
 define i64 @test5(i64 %hi, i64 %lo, i64 %bits) nounwind {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edx, %ecx
 ; CHECK-NEXT:    shldq %cl, %rsi, %rdi
 ; CHECK-NEXT:    movq %rdi, %rax
@@ -80,7 +80,7 @@ define i64 @test5(i64 %hi, i64 %lo, i64
 
 define i64 @test6(i64 %hi, i64 %lo, i64 %bits) nounwind {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edx, %ecx
 ; CHECK-NEXT:    shrdq %cl, %rsi, %rdi
 ; CHECK-NEXT:    movq %rdi, %rax
@@ -95,7 +95,7 @@ define i64 @test6(i64 %hi, i64 %lo, i64
 
 define i64 @test7(i64 %hi, i64 %lo, i64 %bits) nounwind {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edx, %ecx
 ; CHECK-NEXT:    shrdq %cl, %rsi, %rdi
 ; CHECK-NEXT:    movq %rdi, %rax

Modified: llvm/trunk/test/CodeGen/X86/shift-double.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-double.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-double.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-double.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i64 @test1(i64 %X, i8 %C) nounwind {
 ; X86-LABEL: test1:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -16,7 +16,7 @@ define i64 @test1(i64 %X, i8 %C) nounwin
 ; X86-NEXT:    shldl %cl, %esi, %edx
 ; X86-NEXT:    testb $32, %cl
 ; X86-NEXT:    je .LBB0_2
-; X86-NEXT:  # BB#1:
+; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    movl %eax, %edx
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:  .LBB0_2:
@@ -24,7 +24,7 @@ define i64 @test1(i64 %X, i8 %C) nounwin
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %esi, %ecx
 ; X64-NEXT:    shlq %cl, %rdi
 ; X64-NEXT:    movq %rdi, %rax
@@ -36,7 +36,7 @@ define i64 @test1(i64 %X, i8 %C) nounwin
 
 define i64 @test2(i64 %X, i8 %C) nounwind {
 ; X86-LABEL: test2:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -46,7 +46,7 @@ define i64 @test2(i64 %X, i8 %C) nounwin
 ; X86-NEXT:    shrdl %cl, %esi, %eax
 ; X86-NEXT:    testb $32, %cl
 ; X86-NEXT:    je .LBB1_2
-; X86-NEXT:  # BB#1:
+; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    sarl $31, %esi
 ; X86-NEXT:    movl %edx, %eax
 ; X86-NEXT:    movl %esi, %edx
@@ -55,7 +55,7 @@ define i64 @test2(i64 %X, i8 %C) nounwin
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %esi, %ecx
 ; X64-NEXT:    sarq %cl, %rdi
 ; X64-NEXT:    movq %rdi, %rax
@@ -67,7 +67,7 @@ define i64 @test2(i64 %X, i8 %C) nounwin
 
 define i64 @test3(i64 %X, i8 %C) nounwind {
 ; X86-LABEL: test3:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -77,7 +77,7 @@ define i64 @test3(i64 %X, i8 %C) nounwin
 ; X86-NEXT:    shrdl %cl, %esi, %eax
 ; X86-NEXT:    testb $32, %cl
 ; X86-NEXT:    je .LBB2_2
-; X86-NEXT:  # BB#1:
+; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    movl %edx, %eax
 ; X86-NEXT:    xorl %edx, %edx
 ; X86-NEXT:  .LBB2_2:
@@ -85,7 +85,7 @@ define i64 @test3(i64 %X, i8 %C) nounwin
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test3:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %esi, %ecx
 ; X64-NEXT:    shrq %cl, %rdi
 ; X64-NEXT:    movq %rdi, %rax
@@ -99,7 +99,7 @@ define i64 @test3(i64 %X, i8 %C) nounwin
 
 define i32 @test4(i32 %A, i32 %B, i8 %C) nounwind {
 ; X86-LABEL: test4:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -107,7 +107,7 @@ define i32 @test4(i32 %A, i32 %B, i8 %C)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test4:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shldl %cl, %esi, %edi
 ; X64-NEXT:    movl %edi, %eax
@@ -123,7 +123,7 @@ define i32 @test4(i32 %A, i32 %B, i8 %C)
 
 define i16 @test5(i16 %A, i16 %B, i8 %C) nounwind {
 ; X86-LABEL: test5:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
@@ -131,7 +131,7 @@ define i16 @test5(i16 %A, i16 %B, i8 %C)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test5:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shldw %cl, %si, %di
 ; X64-NEXT:    movl %edi, %eax
@@ -149,7 +149,7 @@ define i16 @test5(i16 %A, i16 %B, i8 %C)
 
 define i32 @test6(i32 %A, i32 %B, i8 %C) nounwind {
 ; X86-LABEL: test6:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -157,7 +157,7 @@ define i32 @test6(i32 %A, i32 %B, i8 %C)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test6:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shrdl %cl, %esi, %edi
 ; X64-NEXT:    movl %edi, %eax
@@ -173,7 +173,7 @@ define i32 @test6(i32 %A, i32 %B, i8 %C)
 
 define i16 @test7(i16 %A, i16 %B, i8 %C) nounwind {
 ; X86-LABEL: test7:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
@@ -181,7 +181,7 @@ define i16 @test7(i16 %A, i16 %B, i8 %C)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test7:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shrdw %cl, %si, %di
 ; X64-NEXT:    movl %edi, %eax
@@ -199,7 +199,7 @@ define i16 @test7(i16 %A, i16 %B, i8 %C)
 
 define i64 @test8(i64 %val, i32 %bits) nounwind {
 ; X86-LABEL: test8:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -211,7 +211,7 @@ define i64 @test8(i64 %val, i32 %bits) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andb $31, %sil
 ; X64-NEXT:    movl %esi, %ecx
 ; X64-NEXT:    shlq %cl, %rdi
@@ -225,7 +225,7 @@ define i64 @test8(i64 %val, i32 %bits) n
 
 define i64 @test9(i64 %val, i32 %bits) nounwind {
 ; X86-LABEL: test9:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -234,7 +234,7 @@ define i64 @test9(i64 %val, i32 %bits) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test9:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andb $31, %sil
 ; X64-NEXT:    movl %esi, %ecx
 ; X64-NEXT:    sarq %cl, %rdi
@@ -248,7 +248,7 @@ define i64 @test9(i64 %val, i32 %bits) n
 
 define i64 @test10(i64 %val, i32 %bits) nounwind {
 ; X86-LABEL: test10:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -257,7 +257,7 @@ define i64 @test10(i64 %val, i32 %bits)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test10:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andb $31, %sil
 ; X64-NEXT:    movl %esi, %ecx
 ; X64-NEXT:    shrq %cl, %rdi
@@ -273,7 +273,7 @@ define i64 @test10(i64 %val, i32 %bits)
 
 define i32 @test11(i32 %hi, i32 %lo, i32 %bits) nounwind {
 ; X86-LABEL: test11:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -283,7 +283,7 @@ define i32 @test11(i32 %hi, i32 %lo, i32
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test11:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andl $31, %edx
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shldl %cl, %esi, %edi
@@ -299,7 +299,7 @@ define i32 @test11(i32 %hi, i32 %lo, i32
 
 define i32 @test12(i32 %hi, i32 %lo, i32 %bits) nounwind {
 ; X86-LABEL: test12:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -309,7 +309,7 @@ define i32 @test12(i32 %hi, i32 %lo, i32
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test12:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andl $31, %edx
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shrdl %cl, %edi, %esi
@@ -325,7 +325,7 @@ define i32 @test12(i32 %hi, i32 %lo, i32
 
 define i32 @test13(i32 %hi, i32 %lo, i32 %bits) nounwind {
 ; X86-LABEL: test13:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -333,7 +333,7 @@ define i32 @test13(i32 %hi, i32 %lo, i32
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test13:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shldl %cl, %esi, %edi
 ; X64-NEXT:    movl %edi, %eax
@@ -347,7 +347,7 @@ define i32 @test13(i32 %hi, i32 %lo, i32
 
 define i32 @test14(i32 %hi, i32 %lo, i32 %bits) nounwind {
 ; X86-LABEL: test14:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -355,7 +355,7 @@ define i32 @test14(i32 %hi, i32 %lo, i32
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test14:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shrdl %cl, %edi, %esi
 ; X64-NEXT:    movl %esi, %eax
@@ -369,7 +369,7 @@ define i32 @test14(i32 %hi, i32 %lo, i32
 
 define i32 @test15(i32 %hi, i32 %lo, i32 %bits) nounwind {
 ; X86-LABEL: test15:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -377,7 +377,7 @@ define i32 @test15(i32 %hi, i32 %lo, i32
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test15:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shldl %cl, %esi, %edi
 ; X64-NEXT:    movl %edi, %eax
@@ -392,7 +392,7 @@ define i32 @test15(i32 %hi, i32 %lo, i32
 
 define i32 @test16(i32 %hi, i32 %lo, i32 %bits) nounwind {
 ; X86-LABEL: test16:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -400,7 +400,7 @@ define i32 @test16(i32 %hi, i32 %lo, i32
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shrdl %cl, %esi, %edi
 ; X64-NEXT:    movl %edi, %eax
@@ -415,7 +415,7 @@ define i32 @test16(i32 %hi, i32 %lo, i32
 
 define i32 @test17(i32 %hi, i32 %lo, i32 %bits) nounwind {
 ; X86-LABEL: test17:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -423,7 +423,7 @@ define i32 @test17(i32 %hi, i32 %lo, i32
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test17:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shrdl %cl, %esi, %edi
 ; X64-NEXT:    movl %edi, %eax

Modified: llvm/trunk/test/CodeGen/X86/shift-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-folding.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-folding.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-folding.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32* @test1(i32* %P, i32 %X) {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    andl $-4, %eax
 ; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %eax
@@ -16,7 +16,7 @@ define i32* @test1(i32* %P, i32 %X) {
 
 define i32* @test2(i32* %P, i32 %X) {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    shll $4, %eax
 ; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %eax
@@ -29,7 +29,7 @@ define i32* @test2(i32* %P, i32 %X) {
 
 define i32* @test3(i32* %P, i32 %X) {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    andl $-4, %eax
 ; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %eax
@@ -41,7 +41,7 @@ define i32* @test3(i32* %P, i32 %X) {
 
 define fastcc i32 @test4(i32* %d) {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movzbl 3(%ecx), %eax
 ; CHECK-NEXT:    retl
   %tmp4 = load i32, i32* %d
@@ -54,7 +54,7 @@ define fastcc i32 @test4(i32* %d) {
 
 define i64 @test5(i16 %i, i32* %arr) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    shrl $11, %eax

Modified: llvm/trunk/test/CodeGen/X86/shift-pcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-pcmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-pcmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-pcmp.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define <8 x i16> @foo(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: foo:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqw %xmm1, %xmm0
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: foo:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -23,13 +23,13 @@ define <8 x i16> @foo(<8 x i16> %a, <8 x
 ; Don't fail with an assert due to an undef in the buildvector
 define <8 x i16> @bar(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: bar:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pcmpeqw %xmm1, %xmm0
 ; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: bar:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/shl-crash-on-legalize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shl-crash-on-legalize.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shl-crash-on-legalize.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shl-crash-on-legalize.ll Mon Dec  4 09:18:51 2017
@@ -11,7 +11,7 @@ target triple = "x86_64-unknown-linux-gn
 ; Function Attrs: norecurse nounwind uwtable
 define i32 @_Z3foov() local_unnamed_addr #0 {
 ; CHECK-LABEL: _Z3foov:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq %rax, {{.*}}(%rip)
 ; CHECK-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/shrink-compare.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shrink-compare.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shrink-compare.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shrink-compare.ll Mon Dec  4 09:18:51 2017
@@ -5,10 +5,10 @@ declare void @bar()
 
 define void @test1(i32* nocapture %X) nounwind minsize {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpb $47, (%rdi)
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %tmp1 = load i32, i32* %X, align 4
@@ -26,10 +26,10 @@ if.end:
 
 define void @test2(i32 %X) nounwind minsize {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpb $47, %dil
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %and = and i32 %X, 255
@@ -46,10 +46,10 @@ if.end:
 
 define void @test3(i32 %X) nounwind minsize {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpb $-1, %dil
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %and = and i32 %X, 255
@@ -67,11 +67,11 @@ if.end:
 ; PR16083
 define i1 @test4(i64 %a, i32 %b) {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movb $1, %al
 ; CHECK-NEXT:    testl %esi, %esi
 ; CHECK-NEXT:    je .LBB3_1
-; CHECK-NEXT:  # BB#2: # %lor.end
+; CHECK-NEXT:  # %bb.2: # %lor.end
 ; CHECK-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB3_1: # %lor.rhs
@@ -97,14 +97,14 @@ lor.end:
 ; PR16551
 define void @test5(i32 %X) nounwind minsize {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movzbl x+{{.*}}(%rip), %eax
 ; CHECK-NEXT:    shll $16, %eax
 ; CHECK-NEXT:    movzwl x+{{.*}}(%rip), %ecx
 ; CHECK-NEXT:    orl %eax, %ecx
 ; CHECK-NEXT:    cmpl $1, %ecx
 ; CHECK-NEXT:    jne bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %bf.load = load i56, i56* bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @x to i56*), align 4
@@ -123,11 +123,11 @@ if.end:
 
 define void @test2_1(i32 %X) nounwind minsize {
 ; CHECK-LABEL: test2_1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    cmpl $256, %eax # imm = 0x100
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %and = and i32 %X, 255
@@ -144,10 +144,10 @@ if.end:
 
 define void @test_sext_i8_icmp_1(i8 %x) nounwind minsize {
 ; CHECK-LABEL: test_sext_i8_icmp_1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpb $1, %dil
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %sext = sext i8 %x to i32
@@ -164,10 +164,10 @@ if.end:
 
 define void @test_sext_i8_icmp_47(i8 %x) nounwind minsize {
 ; CHECK-LABEL: test_sext_i8_icmp_47:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpb $47, %dil
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %sext = sext i8 %x to i32
@@ -184,10 +184,10 @@ if.end:
 
 define void @test_sext_i8_icmp_127(i8 %x) nounwind minsize {
 ; CHECK-LABEL: test_sext_i8_icmp_127:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpb $127, %dil
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %sext = sext i8 %x to i32
@@ -204,10 +204,10 @@ if.end:
 
 define void @test_sext_i8_icmp_neg1(i8 %x) nounwind minsize {
 ; CHECK-LABEL: test_sext_i8_icmp_neg1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpb $-1, %dil
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %sext = sext i8 %x to i32
@@ -224,10 +224,10 @@ if.end:
 
 define void @test_sext_i8_icmp_neg2(i8 %x) nounwind minsize {
 ; CHECK-LABEL: test_sext_i8_icmp_neg2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpb $-2, %dil
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %sext = sext i8 %x to i32
@@ -244,10 +244,10 @@ if.end:
 
 define void @test_sext_i8_icmp_neg127(i8 %x) nounwind minsize {
 ; CHECK-LABEL: test_sext_i8_icmp_neg127:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpb $-127, %dil
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %sext = sext i8 %x to i32
@@ -264,10 +264,10 @@ if.end:
 
 define void @test_sext_i8_icmp_neg128(i8 %x) nounwind minsize {
 ; CHECK-LABEL: test_sext_i8_icmp_neg128:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpb $-128, %dil
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %sext = sext i8 %x to i32
@@ -284,11 +284,11 @@ if.end:
 
 define void @test_sext_i8_icmp_255(i8 %x) nounwind minsize {
 ; CHECK-LABEL: test_sext_i8_icmp_255:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movb $1, %al
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    je bar # TAILCALL
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    retq
 entry:
   %sext = sext i8 %x to i32

Modified: llvm/trunk/test/CodeGen/X86/shrink_vmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shrink_vmul.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shrink_vmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shrink_vmul.ll Mon Dec  4 09:18:51 2017
@@ -12,7 +12,7 @@
 ;
 define void @mul_2xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_2xi8:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -34,7 +34,7 @@ define void @mul_2xi8(i8* nocapture read
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi8:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movzwl (%rdi,%rdx), %ecx
 ; X64-NEXT:    movd %ecx, %xmm0
@@ -72,7 +72,7 @@ entry:
 ;
 define void @mul_4xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_4xi8:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -92,7 +92,7 @@ define void @mul_4xi8(i8* nocapture read
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_4xi8:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -128,7 +128,7 @@ entry:
 ;
 define void @mul_8xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_8xi8:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -151,7 +151,7 @@ define void @mul_8xi8(i8* nocapture read
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_8xi8:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
@@ -190,7 +190,7 @@ entry:
 ;
 define void @mul_16xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_16xi8:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -223,7 +223,7 @@ define void @mul_16xi8(i8* nocapture rea
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_16xi8:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movdqu (%rdi,%rdx), %xmm0
 ; X64-NEXT:    movdqu (%rsi,%rdx), %xmm1
@@ -272,7 +272,7 @@ entry:
 ;
 define void @mul_2xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_2xi16:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -291,7 +291,7 @@ define void @mul_2xi16(i8* nocapture rea
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi16:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -326,7 +326,7 @@ entry:
 ;
 define void @mul_4xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_4xi16:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -345,7 +345,7 @@ define void @mul_4xi16(i8* nocapture rea
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_4xi16:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
@@ -380,7 +380,7 @@ entry:
 ;
 define void @mul_8xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_8xi16:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -402,7 +402,7 @@ define void @mul_8xi16(i8* nocapture rea
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_8xi16:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movdqu (%rdi,%rdx), %xmm0
 ; X64-NEXT:    movdqu (%rsi,%rdx), %xmm1
@@ -440,7 +440,7 @@ entry:
 ;
 define void @mul_16xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_16xi16:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -472,7 +472,7 @@ define void @mul_16xi16(i8* nocapture re
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_16xi16:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movdqu (%rdi,%rdx), %xmm0
 ; X64-NEXT:    movdqu 16(%rdi,%rdx), %xmm1
@@ -520,7 +520,7 @@ entry:
 ;
 define void @mul_2xi8_sext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_2xi8_sext:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -544,7 +544,7 @@ define void @mul_2xi8_sext(i8* nocapture
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi8_sext:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movzwl (%rdi,%rdx), %ecx
 ; X64-NEXT:    movd %ecx, %xmm0
@@ -584,7 +584,7 @@ entry:
 ;
 define void @mul_2xi8_sext_zext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_2xi8_sext_zext:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -609,7 +609,7 @@ define void @mul_2xi8_sext_zext(i8* noca
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi8_sext_zext:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movzwl (%rdi,%rdx), %ecx
 ; X64-NEXT:    movd %ecx, %xmm0
@@ -650,7 +650,7 @@ entry:
 ;
 define void @mul_2xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_2xi16_sext:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -669,7 +669,7 @@ define void @mul_2xi16_sext(i8* nocaptur
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi16_sext:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -704,7 +704,7 @@ entry:
 ;
 define void @mul_2xi16_sext_zext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_2xi16_sext_zext:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -736,7 +736,7 @@ define void @mul_2xi16_sext_zext(i8* noc
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi16_sext_zext:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
@@ -784,7 +784,7 @@ entry:
 ;
 define void @mul_16xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) {
 ; X86-LABEL: mul_16xi16_sext:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -816,7 +816,7 @@ define void @mul_16xi16_sext(i8* nocaptu
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_16xi16_sext:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movdqu (%rdi,%rdx), %xmm0
 ; X64-NEXT:    movdqu 16(%rdi,%rdx), %xmm1
@@ -863,7 +863,7 @@ entry:
 ;
 define void @mul_2xi8_varconst1(i8* nocapture readonly %a, i64 %index) {
 ; X86-LABEL: mul_2xi8_varconst1:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl c, %edx
@@ -877,7 +877,7 @@ define void @mul_2xi8_varconst1(i8* noca
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi8_varconst1:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movzwl (%rdi,%rsi), %ecx
 ; X64-NEXT:    movd %ecx, %xmm0
@@ -907,7 +907,7 @@ entry:
 ;
 define void @mul_2xi8_varconst2(i8* nocapture readonly %a, i64 %index) {
 ; X86-LABEL: mul_2xi8_varconst2:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl c, %edx
@@ -922,7 +922,7 @@ define void @mul_2xi8_varconst2(i8* noca
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi8_varconst2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movzwl (%rdi,%rsi), %ecx
 ; X64-NEXT:    movd %ecx, %xmm0
@@ -953,7 +953,7 @@ entry:
 ;
 define void @mul_2xi8_varconst3(i8* nocapture readonly %a, i64 %index) {
 ; X86-LABEL: mul_2xi8_varconst3:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl c, %edx
@@ -970,7 +970,7 @@ define void @mul_2xi8_varconst3(i8* noca
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi8_varconst3:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movzwl (%rdi,%rsi), %ecx
 ; X64-NEXT:    movd %ecx, %xmm0
@@ -1003,7 +1003,7 @@ entry:
 ;
 define void @mul_2xi8_varconst4(i8* nocapture readonly %a, i64 %index) {
 ; X86-LABEL: mul_2xi8_varconst4:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl c, %edx
@@ -1020,7 +1020,7 @@ define void @mul_2xi8_varconst4(i8* noca
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi8_varconst4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movzwl (%rdi,%rsi), %ecx
 ; X64-NEXT:    movd %ecx, %xmm0
@@ -1053,7 +1053,7 @@ entry:
 ;
 define void @mul_2xi8_varconst5(i8* nocapture readonly %a, i64 %index) {
 ; X86-LABEL: mul_2xi8_varconst5:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl c, %edx
@@ -1070,7 +1070,7 @@ define void @mul_2xi8_varconst5(i8* noca
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi8_varconst5:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movzwl (%rdi,%rsi), %ecx
 ; X64-NEXT:    movd %ecx, %xmm0
@@ -1103,7 +1103,7 @@ entry:
 ;
 define void @mul_2xi8_varconst6(i8* nocapture readonly %a, i64 %index) {
 ; X86-LABEL: mul_2xi8_varconst6:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl c, %edx
@@ -1120,7 +1120,7 @@ define void @mul_2xi8_varconst6(i8* noca
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi8_varconst6:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movzwl (%rdi,%rsi), %ecx
 ; X64-NEXT:    movd %ecx, %xmm0
@@ -1153,7 +1153,7 @@ entry:
 ;
 define void @mul_2xi16_varconst1(i8* nocapture readonly %a, i64 %index) {
 ; X86-LABEL: mul_2xi16_varconst1:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl c, %edx
@@ -1167,7 +1167,7 @@ define void @mul_2xi16_varconst1(i8* noc
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi16_varconst1:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    movdqa {{.*#+}} xmm1 = <0,65535,u,u,u,u,u,u>
@@ -1197,7 +1197,7 @@ entry:
 ;
 define void @mul_2xi16_varconst2(i8* nocapture readonly %a, i64 %index) {
 ; X86-LABEL: mul_2xi16_varconst2:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl c, %edx
@@ -1211,7 +1211,7 @@ define void @mul_2xi16_varconst2(i8* noc
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi16_varconst2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    movdqa {{.*#+}} xmm1 = <32768,32767,u,u,u,u,u,u>
@@ -1241,7 +1241,7 @@ entry:
 ;
 define void @mul_2xi16_varconst3(i8* nocapture readonly %a, i64 %index) {
 ; X86-LABEL: mul_2xi16_varconst3:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl c, %edx
@@ -1261,7 +1261,7 @@ define void @mul_2xi16_varconst3(i8* noc
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi16_varconst3:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    pxor %xmm1, %xmm1
@@ -1299,7 +1299,7 @@ entry:
 ;
 define void @mul_2xi16_varconst4(i8* nocapture readonly %a, i64 %index) {
 ; X86-LABEL: mul_2xi16_varconst4:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl c, %edx
@@ -1319,7 +1319,7 @@ define void @mul_2xi16_varconst4(i8* noc
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_2xi16_varconst4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq {{.*}}(%rip), %rax
 ; X64-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
@@ -1356,7 +1356,7 @@ entry:
 
 define void @PR34947() {
 ; X86-LABEL: PR34947:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movdqa (%eax), %xmm0
 ; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
 ; X86-NEXT:    movd %xmm1, %ecx
@@ -1403,7 +1403,7 @@ define void @PR34947() {
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: PR34947:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movdqa (%rax), %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
 ; X64-NEXT:    movd %xmm1, %ecx

Modified: llvm/trunk/test/CodeGen/X86/shrink_vmul_sse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shrink_vmul_sse.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shrink_vmul_sse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shrink_vmul_sse.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 
 define void @mul_2xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 %index) nounwind {
 ; CHECK-LABEL: mul_2xi8:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushl %ebx
 ; CHECK-NEXT:    pushl %edi
 ; CHECK-NEXT:    pushl %esi

Modified: llvm/trunk/test/CodeGen/X86/shuffle-combine-crash-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shuffle-combine-crash-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shuffle-combine-crash-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shuffle-combine-crash-2.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define <4 x i64> @fold_movsd_zero() {
 ; X86-LABEL: fold_movsd_zero:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    xorps %xmm0, %xmm0
 ; X86-NEXT:    xorps %xmm1, %xmm1
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fold_movsd_zero:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    xorps %xmm1, %xmm1
 ; X64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/shuffle-of-insert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shuffle-of-insert.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shuffle-of-insert.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shuffle-of-insert.ll Mon Dec  4 09:18:51 2017
@@ -5,20 +5,20 @@
 
 define <4 x i32> @ins_elt_0(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
 ; SSE2-LABEL: ins_elt_0:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %edi, %xmm0
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: ins_elt_0:
-; SSE4:       # BB#0:
+; SSE4:       # %bb.0:
 ; SSE4-NEXT:    pinsrd $0, %edi, %xmm0
 ; SSE4-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: ins_elt_0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrd $0, %edi, %xmm0, %xmm0
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; AVX-NEXT:    retq
@@ -29,7 +29,7 @@ define <4 x i32> @ins_elt_0(i32 %x, <4 x
 
 define <4 x i32> @ins_elt_1(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
 ; SSE2-LABEL: ins_elt_1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %edi, %xmm2
 ; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[0,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,0]
@@ -38,13 +38,13 @@ define <4 x i32> @ins_elt_1(i32 %x, <4 x
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: ins_elt_1:
-; SSE4:       # BB#0:
+; SSE4:       # %bb.0:
 ; SSE4-NEXT:    pinsrd $1, %edi, %xmm0
 ; SSE4-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: ins_elt_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm0
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
 ; AVX-NEXT:    retq
@@ -57,7 +57,7 @@ define <4 x i32> @ins_elt_1(i32 %x, <4 x
 
 define <4 x i32> @ins_elt_2_commute(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
 ; SSE2-LABEL: ins_elt_2_commute:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %edi, %xmm2
 ; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[3,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0,2]
@@ -67,13 +67,13 @@ define <4 x i32> @ins_elt_2_commute(i32
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: ins_elt_2_commute:
-; SSE4:       # BB#0:
+; SSE4:       # %bb.0:
 ; SSE4-NEXT:    pinsrd $2, %edi, %xmm0
 ; SSE4-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: ins_elt_2_commute:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
 ; AVX-NEXT:    retq
@@ -84,7 +84,7 @@ define <4 x i32> @ins_elt_2_commute(i32
 
 define <4 x i32> @ins_elt_3_commute(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
 ; SSE2-LABEL: ins_elt_3_commute:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %edi, %xmm2
 ; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[2,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
@@ -94,13 +94,13 @@ define <4 x i32> @ins_elt_3_commute(i32
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: ins_elt_3_commute:
-; SSE4:       # BB#0:
+; SSE4:       # %bb.0:
 ; SSE4-NEXT:    pinsrd $3, %edi, %xmm0
 ; SSE4-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: ins_elt_3_commute:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
 ; AVX-NEXT:    retq
@@ -113,7 +113,7 @@ define <4 x i32> @ins_elt_3_commute(i32
 
 define <4 x i32> @ins_elt_0_to_2(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
 ; SSE2-LABEL: ins_elt_0_to_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %edi, %xmm0
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
@@ -121,14 +121,14 @@ define <4 x i32> @ins_elt_0_to_2(i32 %x,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: ins_elt_0_to_2:
-; SSE4:       # BB#0:
+; SSE4:       # %bb.0:
 ; SSE4-NEXT:    pinsrd $0, %edi, %xmm0
 ; SSE4-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; SSE4-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: ins_elt_0_to_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrd $0, %edi, %xmm0, %xmm0
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
@@ -140,21 +140,21 @@ define <4 x i32> @ins_elt_0_to_2(i32 %x,
 
 define <4 x i32> @ins_elt_1_to_0(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
 ; SSE2-LABEL: ins_elt_1_to_0:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %edi, %xmm0
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: ins_elt_1_to_0:
-; SSE4:       # BB#0:
+; SSE4:       # %bb.0:
 ; SSE4-NEXT:    pinsrd $1, %edi, %xmm0
 ; SSE4-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; SSE4-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: ins_elt_1_to_0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm0
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
@@ -166,7 +166,7 @@ define <4 x i32> @ins_elt_1_to_0(i32 %x,
 
 define <4 x i32> @ins_elt_2_to_3(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
 ; SSE2-LABEL: ins_elt_2_to_3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %edi, %xmm2
 ; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[3,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0,2]
@@ -176,14 +176,14 @@ define <4 x i32> @ins_elt_2_to_3(i32 %x,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: ins_elt_2_to_3:
-; SSE4:       # BB#0:
+; SSE4:       # %bb.0:
 ; SSE4-NEXT:    pinsrd $2, %edi, %xmm0
 ; SSE4-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
 ; SSE4-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: ins_elt_2_to_3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
@@ -195,7 +195,7 @@ define <4 x i32> @ins_elt_2_to_3(i32 %x,
 
 define <4 x i32> @ins_elt_3_to_1(i32 %x, <4 x i32> %v1, <4 x i32> %v2) {
 ; SSE2-LABEL: ins_elt_3_to_1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd %edi, %xmm2
 ; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[2,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
@@ -204,14 +204,14 @@ define <4 x i32> @ins_elt_3_to_1(i32 %x,
 ; SSE2-NEXT:    retq
 ;
 ; SSE4-LABEL: ins_elt_3_to_1:
-; SSE4:       # BB#0:
+; SSE4:       # %bb.0:
 ; SSE4-NEXT:    pinsrd $3, %edi, %xmm0
 ; SSE4-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE4-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
 ; SSE4-NEXT:    retq
 ;
 ; AVX-LABEL: ins_elt_3_to_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]

Modified: llvm/trunk/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shuffle-of-splat-multiuses.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shuffle-of-splat-multiuses.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shuffle-of-splat-multiuses.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <2 x double> @foo2(<2 x double> %v, <2 x double> *%p) nounwind {
 ; AVX2-LABEL: foo2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,1]
 ; AVX2-NEXT:    vmovapd %xmm0, (%rdi)
 ; AVX2-NEXT:    retq
@@ -16,7 +16,7 @@ define <2 x double> @foo2(<2 x double> %
 
 define <4 x double> @foo4(<4 x double> %v, <4 x double> *%p) nounwind {
 ; AVX2-LABEL: foo4:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
 ; AVX2-NEXT:    vmovaps %ymm0, (%rdi)
 ; AVX2-NEXT:    retq
@@ -28,7 +28,7 @@ define <4 x double> @foo4(<4 x double> %
 
 define <8 x float> @foo8(<8 x float> %v, <8 x float> *%p) nounwind {
 ; AVX2-LABEL: foo8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
 ; AVX2-NEXT:    vmovaps %ymm0, (%rdi)
@@ -41,7 +41,7 @@ define <8 x float> @foo8(<8 x float> %v,
 
 define <4 x i32> @undef_splatmask(<4 x i32> %v) nounwind {
 ; AVX2-LABEL: undef_splatmask:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX2-NEXT:    retq
   %res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
@@ -51,7 +51,7 @@ define <4 x i32> @undef_splatmask(<4 x i
 
 define <4 x i32> @undef_splatmask2(<4 x i32> %v) nounwind {
 ; AVX2-LABEL: undef_splatmask2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX2-NEXT:    retq
   %res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 2, i32 undef>
@@ -61,7 +61,7 @@ define <4 x i32> @undef_splatmask2(<4 x
 
 define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind {
 ; AVX2-LABEL: undef_splatmask3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; AVX2-NEXT:    retq
   %res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
@@ -71,7 +71,7 @@ define <4 x i32> @undef_splatmask3(<4 x
 
 define <4 x i32> @undef_splatmask4(<4 x i32> %v, <4 x i32>* %p) nounwind {
 ; AVX2-LABEL: undef_splatmask4:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,2,3,3]
 ; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; AVX2-NEXT:    vmovaps %xmm0, (%rdi)
@@ -85,7 +85,7 @@ define <4 x i32> @undef_splatmask4(<4 x
 
 define <4 x i32> @undef_splatmask5(<4 x i32> %v, <4 x i32>* %p) nounwind {
 ; AVX2-LABEL: undef_splatmask5:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastd %xmm0, %xmm1
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %xmm0
 ; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)

Modified: llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-128.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define void @shuffle_v16i8_to_v8i8_1(<16 x i8>* %L, <8 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v8i8_1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
@@ -30,42 +30,42 @@ define void @shuffle_v16i8_to_v8i8_1(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v8i8_1:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    movq %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v8i8_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v8i8_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v8i8_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; AVX512VL-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v8i8_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v8i8_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrlw $8, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovwb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -77,7 +77,7 @@ define void @shuffle_v16i8_to_v8i8_1(<16
 
 define void @shuffle_v8i16_to_v4i16_1(<8 x i16>* %L, <4 x i16>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v8i16_to_v4i16_1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = mem[3,1,2,3,4,5,6,7]
 ; SSE2-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -86,41 +86,41 @@ define void @shuffle_v8i16_to_v4i16_1(<8
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v8i16_to_v4i16_1:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
 ; SSE42-NEXT:    movq %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v8i16_to_v4i16_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
 ; AVX-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v8i16_to_v4i16_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
 ; AVX512F-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v8i16_to_v4i16_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsrld $16, (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovdw %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v8i16_to_v4i16_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
 ; AVX512BW-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v8i16_to_v4i16_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrld $16, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovdw %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -132,37 +132,37 @@ define void @shuffle_v8i16_to_v4i16_1(<8
 
 define void @shuffle_v4i32_to_v2i32_1(<4 x i32>* %L, <2 x i32>* %S) nounwind {
 ; SSE-LABEL: shuffle_v4i32_to_v2i32_1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[1,3,2,3]
 ; SSE-NEXT:    movq %xmm0, (%rsi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v4i32_to_v2i32_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[1,3,2,3]
 ; AVX-NEXT:    vmovlps %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v4i32_to_v2i32_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpermilps {{.*#+}} xmm0 = mem[1,3,2,3]
 ; AVX512F-NEXT:    vmovlps %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v4i32_to_v2i32_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
 ; AVX512VL-NEXT:    vpmovqd %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v4i32_to_v2i32_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpermilps {{.*#+}} xmm0 = mem[1,3,2,3]
 ; AVX512BW-NEXT:    vmovlps %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v4i32_to_v2i32_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
 ; AVX512BWVL-NEXT:    vpmovqd %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -174,7 +174,7 @@ define void @shuffle_v4i32_to_v2i32_1(<4
 
 define void @shuffle_v16i8_to_v4i8_1(<16 x i8>* %L, <4 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v4i8_1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
@@ -190,42 +190,42 @@ define void @shuffle_v16i8_to_v4i8_1(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v4i8_1:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    movd %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v4i8_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v4i8_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v4i8_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpsrlw $8, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpmovdb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v4i8_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v4i8_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrlw $8, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovdb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -237,7 +237,7 @@ define void @shuffle_v16i8_to_v4i8_1(<16
 
 define void @shuffle_v16i8_to_v4i8_2(<16 x i8>* %L, <4 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v4i8_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
@@ -249,41 +249,41 @@ define void @shuffle_v16i8_to_v4i8_2(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v4i8_2:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    movd %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v4i8_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v4i8_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v4i8_2:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsrld $16, (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovdb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v4i8_2:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v4i8_2:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrld $16, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovdb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -295,7 +295,7 @@ define void @shuffle_v16i8_to_v4i8_2(<16
 
 define void @shuffle_v16i8_to_v4i8_3(<16 x i8>* %L, <4 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v4i8_3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
@@ -311,41 +311,41 @@ define void @shuffle_v16i8_to_v4i8_3(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v4i8_3:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    movd %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v4i8_3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v4i8_3:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v4i8_3:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsrld $24, (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovdb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v4i8_3:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v4i8_3:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrld $24, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovdb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -357,41 +357,41 @@ define void @shuffle_v16i8_to_v4i8_3(<16
 
 define void @shuffle_v8i16_to_v2i16_1(<8 x i16>* %L, <2 x i16>* %S) nounwind {
 ; SSE-LABEL: shuffle_v8i16_to_v2i16_1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
 ; SSE-NEXT:    movd %xmm0, (%rsi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v8i16_to_v2i16_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
 ; AVX-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v8i16_to_v2i16_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX512F-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
 ; AVX512F-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v8i16_to_v2i16_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsrld $16, (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqw %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v8i16_to_v2i16_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX512BW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
 ; AVX512BW-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v8i16_to_v2i16_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrld $16, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqw %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -403,41 +403,41 @@ define void @shuffle_v8i16_to_v2i16_1(<8
 
 define void @shuffle_v8i16_to_v2i16_2(<8 x i16>* %L, <2 x i16>* %S) nounwind {
 ; SSE-LABEL: shuffle_v8i16_to_v2i16_2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[3,1,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
 ; SSE-NEXT:    movd %xmm0, (%rsi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v8i16_to_v2i16_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
 ; AVX-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
 ; AVX-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v8i16_to_v2i16_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
 ; AVX512F-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
 ; AVX512F-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v8i16_to_v2i16_2:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
 ; AVX512VL-NEXT:    vpmovqw %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v8i16_to_v2i16_2:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
 ; AVX512BW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
 ; AVX512BW-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v8i16_to_v2i16_2:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
 ; AVX512BWVL-NEXT:    vpmovqw %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -449,41 +449,41 @@ define void @shuffle_v8i16_to_v2i16_2(<8
 
 define void @shuffle_v8i16_to_v2i16_3(<8 x i16>* %L, <2 x i16>* %S) nounwind {
 ; SSE-LABEL: shuffle_v8i16_to_v2i16_3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[3,1,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; SSE-NEXT:    movd %xmm0, (%rsi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v8i16_to_v2i16_3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
 ; AVX-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; AVX-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v8i16_to_v2i16_3:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
 ; AVX512F-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; AVX512F-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v8i16_to_v2i16_3:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsrlq $48, (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqw %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v8i16_to_v2i16_3:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm0 = mem[3,1,2,3]
 ; AVX512BW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; AVX512BW-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v8i16_to_v2i16_3:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrlq $48, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqw %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -495,7 +495,7 @@ define void @shuffle_v8i16_to_v2i16_3(<8
 
 define void @shuffle_v16i8_to_v2i8_1(<16 x i8>* %L, <2 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v2i8_1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
@@ -509,42 +509,42 @@ define void @shuffle_v16i8_to_v2i8_1(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v2i8_1:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    pextrw $0, %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v2i8_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v2i8_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpsrlw $8, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrlw $8, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -556,7 +556,7 @@ define void @shuffle_v16i8_to_v2i8_1(<16
 
 define void @shuffle_v16i8_to_v2i8_2(<16 x i8>* %L, <2 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v2i8_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -567,41 +567,41 @@ define void @shuffle_v16i8_to_v2i8_2(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v2i8_2:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    pextrw $0, %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v2i8_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v2i8_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_2:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsrld $16, (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_2:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_2:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrld $16, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -613,7 +613,7 @@ define void @shuffle_v16i8_to_v2i8_2(<16
 
 define void @shuffle_v16i8_to_v2i8_3(<16 x i8>* %L, <2 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v2i8_3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
@@ -627,41 +627,41 @@ define void @shuffle_v16i8_to_v2i8_3(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v2i8_3:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    pextrw $0, %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v2i8_3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v2i8_3:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_3:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsrld $24, (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_3:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_3:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrld $24, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -673,7 +673,7 @@ define void @shuffle_v16i8_to_v2i8_3(<16
 
 define void @shuffle_v16i8_to_v2i8_4(<16 x i8>* %L, <2 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v2i8_4:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
@@ -684,41 +684,41 @@ define void @shuffle_v16i8_to_v2i8_4(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v2i8_4:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    pextrw $0, %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v2i8_4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v2i8_4:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_4:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
 ; AVX512VL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_4:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_4:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm0 = mem[1,1,3,3]
 ; AVX512BWVL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -730,7 +730,7 @@ define void @shuffle_v16i8_to_v2i8_4(<16
 
 define void @shuffle_v16i8_to_v2i8_5(<16 x i8>* %L, <2 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v2i8_5:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
@@ -744,41 +744,41 @@ define void @shuffle_v16i8_to_v2i8_5(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v2i8_5:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    pextrw $0, %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v2i8_5:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v2i8_5:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_5:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsrlq $40, (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_5:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_5:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrlq $40, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -790,7 +790,7 @@ define void @shuffle_v16i8_to_v2i8_5(<16
 
 define void @shuffle_v16i8_to_v2i8_6(<16 x i8>* %L, <2 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v2i8_6:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
@@ -801,41 +801,41 @@ define void @shuffle_v16i8_to_v2i8_6(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v2i8_6:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    pextrw $0, %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v2i8_6:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v2i8_6:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_6:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsrlq $48, (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_6:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_6:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrlq $48, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -847,7 +847,7 @@ define void @shuffle_v16i8_to_v2i8_6(<16
 
 define void @shuffle_v16i8_to_v2i8_7(<16 x i8>* %L, <2 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v2i8_7:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
@@ -861,41 +861,41 @@ define void @shuffle_v16i8_to_v2i8_7(<16
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v2i8_7:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    pextrw $0, %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v2i8_7:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v2i8_7:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v2i8_7:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpsrlq $56, (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v2i8_7:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8_7:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpsrlq $56, (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-256.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-256.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define void @shuffle_v32i8_to_v16i8_1(<32 x i8>* %L, <16 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v16i8_1:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
@@ -20,7 +20,7 @@ define void @shuffle_v32i8_to_v16i8_1(<3
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v16i8_1:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
@@ -32,7 +32,7 @@ define void @shuffle_v32i8_to_v16i8_1(<3
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: shuffle_v32i8_to_v16i8_1:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
@@ -50,7 +50,7 @@ define void @shuffle_v32i8_to_v16i8_1(<3
 
 define void @shuffle_v16i16_to_v8i16_1(<16 x i16>* %L, <8 x i16>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v16i16_to_v8i16_1:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
@@ -62,7 +62,7 @@ define void @shuffle_v16i16_to_v8i16_1(<
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v16i16_to_v8i16_1:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
@@ -74,7 +74,7 @@ define void @shuffle_v16i16_to_v8i16_1(<
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: shuffle_v16i16_to_v8i16_1:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
@@ -92,7 +92,7 @@ define void @shuffle_v16i16_to_v8i16_1(<
 
 define void @shuffle_v8i32_to_v4i32_1(<8 x i32>* %L, <4 x i32>* %S) nounwind {
 ; AVX-LABEL: shuffle_v8i32_to_v4i32_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -101,7 +101,7 @@ define void @shuffle_v8i32_to_v4i32_1(<8
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: shuffle_v8i32_to_v4i32_1:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -116,7 +116,7 @@ define void @shuffle_v8i32_to_v4i32_1(<8
 
 define void @shuffle_v32i8_to_v8i8_1(<32 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -128,7 +128,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -140,7 +140,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -152,7 +152,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -164,7 +164,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -176,7 +176,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,1,5,5,9,9,13,13,13,13,5,5,12,12,13,13]
@@ -194,7 +194,7 @@ define void @shuffle_v32i8_to_v8i8_1(<32
 
 define void @shuffle_v32i8_to_v8i8_2(<32 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -206,7 +206,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -218,7 +218,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -230,7 +230,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -242,7 +242,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -254,7 +254,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8_2:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
@@ -272,7 +272,7 @@ define void @shuffle_v32i8_to_v8i8_2(<32
 
 define void @shuffle_v32i8_to_v8i8_3(<32 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -284,7 +284,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -296,7 +296,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -308,7 +308,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -320,7 +320,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -332,7 +332,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8_3:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} xmm2 = [3,3,7,7,11,11,15,15,7,7,15,15,6,6,7,7]
@@ -350,7 +350,7 @@ define void @shuffle_v32i8_to_v8i8_3(<32
 
 define void @shuffle_v16i16_to_v4i16_1(<16 x i16>* %L, <4 x i16>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -363,7 +363,7 @@ define void @shuffle_v16i16_to_v4i16_1(<
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -376,7 +376,7 @@ define void @shuffle_v16i16_to_v4i16_1(<
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -389,7 +389,7 @@ define void @shuffle_v16i16_to_v4i16_1(<
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -402,7 +402,7 @@ define void @shuffle_v16i16_to_v4i16_1(<
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -415,7 +415,7 @@ define void @shuffle_v16i16_to_v4i16_1(<
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -434,7 +434,7 @@ define void @shuffle_v16i16_to_v4i16_1(<
 
 define void @shuffle_v16i16_to_v4i16_2(<16 x i16>* %L, <4 x i16>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -447,7 +447,7 @@ define void @shuffle_v16i16_to_v4i16_2(<
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -460,7 +460,7 @@ define void @shuffle_v16i16_to_v4i16_2(<
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -473,7 +473,7 @@ define void @shuffle_v16i16_to_v4i16_2(<
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -482,7 +482,7 @@ define void @shuffle_v16i16_to_v4i16_2(<
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -495,7 +495,7 @@ define void @shuffle_v16i16_to_v4i16_2(<
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16_2:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -510,7 +510,7 @@ define void @shuffle_v16i16_to_v4i16_2(<
 
 define void @shuffle_v16i16_to_v4i16_3(<16 x i16>* %L, <4 x i16>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -523,7 +523,7 @@ define void @shuffle_v16i16_to_v4i16_3(<
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -536,7 +536,7 @@ define void @shuffle_v16i16_to_v4i16_3(<
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -549,7 +549,7 @@ define void @shuffle_v16i16_to_v4i16_3(<
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -562,7 +562,7 @@ define void @shuffle_v16i16_to_v4i16_3(<
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -575,7 +575,7 @@ define void @shuffle_v16i16_to_v4i16_3(<
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16_3:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -594,7 +594,7 @@ define void @shuffle_v16i16_to_v4i16_3(<
 
 define void @shuffle_v32i8_to_v4i8_1(<32 x i8>* %L, <4 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -606,7 +606,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -618,7 +618,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -630,7 +630,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,1,1,8,8,9,9,8,8,9,9,10,10,11,11]
@@ -644,7 +644,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -656,7 +656,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,1,1,8,8,9,9,8,8,9,9,10,10,11,11]
@@ -676,7 +676,7 @@ define void @shuffle_v32i8_to_v4i8_1(<32
 
 define void @shuffle_v32i8_to_v4i8_2(<32 x i8>* %L, <4 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -688,7 +688,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -700,7 +700,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -712,7 +712,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -725,7 +725,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -737,7 +737,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_2:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -756,7 +756,7 @@ define void @shuffle_v32i8_to_v4i8_2(<32
 
 define void @shuffle_v32i8_to_v4i8_3(<32 x i8>* %L, <4 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -768,7 +768,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -780,7 +780,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -792,7 +792,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [10,10,11,11,2,2,3,3,8,8,9,9,10,10,11,11]
@@ -806,7 +806,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -818,7 +818,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_3:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} xmm2 = [10,10,11,11,2,2,3,3,8,8,9,9,10,10,11,11]
@@ -838,7 +838,7 @@ define void @shuffle_v32i8_to_v4i8_3(<32
 
 define void @shuffle_v32i8_to_v4i8_4(<32 x i8>* %L, <4 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -850,7 +850,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -862,7 +862,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -874,7 +874,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -883,7 +883,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -895,7 +895,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_4:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -910,7 +910,7 @@ define void @shuffle_v32i8_to_v4i8_4(<32
 
 define void @shuffle_v32i8_to_v4i8_5(<32 x i8>* %L, <4 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -922,7 +922,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -934,7 +934,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -946,7 +946,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -963,7 +963,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -975,7 +975,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_5:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -998,7 +998,7 @@ define void @shuffle_v32i8_to_v4i8_5(<32
 
 define void @shuffle_v32i8_to_v4i8_6(<32 x i8>* %L, <4 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1010,7 +1010,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1022,7 +1022,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1034,7 +1034,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -1047,7 +1047,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1059,7 +1059,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_6:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
@@ -1078,7 +1078,7 @@ define void @shuffle_v32i8_to_v4i8_6(<32
 
 define void @shuffle_v32i8_to_v4i8_7(<32 x i8>* %L, <4 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1090,7 +1090,7 @@ define void @shuffle_v32i8_to_v4i8_7(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1102,7 +1102,7 @@ define void @shuffle_v32i8_to_v4i8_7(<32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1114,7 +1114,7 @@ define void @shuffle_v32i8_to_v4i8_7(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,14,14,15,15,14,14,15,15,4,4,5,5,6,6]
@@ -1126,7 +1126,7 @@ define void @shuffle_v32i8_to_v4i8_7(<32
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -1138,7 +1138,7 @@ define void @shuffle_v32i8_to_v4i8_7(<32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8_7:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,7,14,14,15,15,14,14,15,15,4,4,5,5,6,6]

Modified: llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-512.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shuffle-strided-with-offset-512.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @shuffle_v64i8_to_v32i8_1(<64 x i8>* %L, <32 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v32i8_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
@@ -18,7 +18,7 @@ define void @shuffle_v64i8_to_v32i8_1(<6
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v32i8_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
@@ -30,7 +30,7 @@ define void @shuffle_v64i8_to_v32i8_1(<6
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v32i8_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
@@ -42,7 +42,7 @@ define void @shuffle_v64i8_to_v32i8_1(<6
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v32i8_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31]
@@ -60,7 +60,7 @@ define void @shuffle_v64i8_to_v32i8_1(<6
 
 define void @shuffle_v32i16_to_v16i16_1(<32 x i16>* %L, <16 x i16>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v32i16_to_v16i16_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,4,5,6,7,2,3,6,7,10,11,14,15,22,23,18,19,20,21,22,23,18,19,22,23,26,27,30,31]
@@ -72,7 +72,7 @@ define void @shuffle_v32i16_to_v16i16_1(
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i16_to_v16i16_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,4,5,6,7,2,3,6,7,10,11,14,15,22,23,18,19,20,21,22,23,18,19,22,23,26,27,30,31]
@@ -84,7 +84,7 @@ define void @shuffle_v32i16_to_v16i16_1(
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i16_to_v16i16_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,4,5,6,7,2,3,6,7,10,11,14,15,22,23,18,19,20,21,22,23,18,19,22,23,26,27,30,31]
@@ -96,7 +96,7 @@ define void @shuffle_v32i16_to_v16i16_1(
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i16_to_v16i16_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,3,5,7,17,19,21,23,9,11,13,15,25,27,29,31]
@@ -113,7 +113,7 @@ define void @shuffle_v32i16_to_v16i16_1(
 
 define void @shuffle_v16i32_to_v8i32_1(<16 x i32>* %L, <8 x i32>* %S) nounwind {
 ; AVX512-LABEL: shuffle_v16i32_to_v8i32_1:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %zmm0
 ; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm1
 ; AVX512-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
@@ -129,7 +129,7 @@ define void @shuffle_v16i32_to_v8i32_1(<
 
 define void @shuffle_v64i8_to_v16i8_1(<64 x i8>* %L, <16 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v16i8_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -148,7 +148,7 @@ define void @shuffle_v64i8_to_v16i8_1(<6
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v16i8_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -167,7 +167,7 @@ define void @shuffle_v64i8_to_v16i8_1(<6
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v16i8_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -186,7 +186,7 @@ define void @shuffle_v64i8_to_v16i8_1(<6
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v16i8_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -211,7 +211,7 @@ define void @shuffle_v64i8_to_v16i8_1(<6
 
 define void @shuffle_v64i8_to_v16i8_2(<64 x i8>* %L, <16 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v16i8_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -230,7 +230,7 @@ define void @shuffle_v64i8_to_v16i8_2(<6
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v16i8_2:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -249,7 +249,7 @@ define void @shuffle_v64i8_to_v16i8_2(<6
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v16i8_2:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -268,7 +268,7 @@ define void @shuffle_v64i8_to_v16i8_2(<6
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v16i8_2:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -293,7 +293,7 @@ define void @shuffle_v64i8_to_v16i8_2(<6
 
 define void @shuffle_v64i8_to_v16i8_3(<64 x i8>* %L, <16 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v16i8_3:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -312,7 +312,7 @@ define void @shuffle_v64i8_to_v16i8_3(<6
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v16i8_3:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -331,7 +331,7 @@ define void @shuffle_v64i8_to_v16i8_3(<6
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v16i8_3:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -350,7 +350,7 @@ define void @shuffle_v64i8_to_v16i8_3(<6
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v16i8_3:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -375,7 +375,7 @@ define void @shuffle_v64i8_to_v16i8_3(<6
 
 define void @shuffle_v32i16_to_v8i16_1(<32 x i16>* %L, <8 x i16>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v32i16_to_v8i16_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -396,7 +396,7 @@ define void @shuffle_v32i16_to_v8i16_1(<
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i16_to_v8i16_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -417,7 +417,7 @@ define void @shuffle_v32i16_to_v8i16_1(<
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i16_to_v8i16_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -438,7 +438,7 @@ define void @shuffle_v32i16_to_v8i16_1(<
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} ymm2 = <1,5,9,13,17,21,25,29,u,u,u,u,u,u,u,u>
@@ -454,7 +454,7 @@ define void @shuffle_v32i16_to_v8i16_1(<
 
 define void @shuffle_v32i16_to_v8i16_2(<32 x i16>* %L, <8 x i16>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v32i16_to_v8i16_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -475,7 +475,7 @@ define void @shuffle_v32i16_to_v8i16_2(<
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i16_to_v8i16_2:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -496,7 +496,7 @@ define void @shuffle_v32i16_to_v8i16_2(<
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i16_to_v8i16_2:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -517,7 +517,7 @@ define void @shuffle_v32i16_to_v8i16_2(<
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16_2:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} ymm2 = <2,6,10,14,18,22,26,30,u,u,u,u,u,u,u,u>
@@ -533,7 +533,7 @@ define void @shuffle_v32i16_to_v8i16_2(<
 
 define void @shuffle_v32i16_to_v8i16_3(<32 x i16>* %L, <8 x i16>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v32i16_to_v8i16_3:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -554,7 +554,7 @@ define void @shuffle_v32i16_to_v8i16_3(<
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i16_to_v8i16_3:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -575,7 +575,7 @@ define void @shuffle_v32i16_to_v8i16_3(<
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i16_to_v8i16_3:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -596,7 +596,7 @@ define void @shuffle_v32i16_to_v8i16_3(<
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16_3:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} ymm2 = <3,7,11,15,19,23,27,31,u,u,u,u,u,u,u,u>
@@ -612,7 +612,7 @@ define void @shuffle_v32i16_to_v8i16_3(<
 
 define void @shuffle_v64i8_to_v8i8_1(<64 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v8i8_1:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -631,7 +631,7 @@ define void @shuffle_v64i8_to_v8i8_1(<64
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_1:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -650,7 +650,7 @@ define void @shuffle_v64i8_to_v8i8_1(<64
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_1:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -669,7 +669,7 @@ define void @shuffle_v64i8_to_v8i8_1(<64
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_1:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -697,7 +697,7 @@ define void @shuffle_v64i8_to_v8i8_1(<64
 
 define void @shuffle_v64i8_to_v8i8_2(<64 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v8i8_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -716,7 +716,7 @@ define void @shuffle_v64i8_to_v8i8_2(<64
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_2:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -735,7 +735,7 @@ define void @shuffle_v64i8_to_v8i8_2(<64
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_2:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -754,7 +754,7 @@ define void @shuffle_v64i8_to_v8i8_2(<64
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_2:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} ymm2 = <1,5,9,13,17,21,25,29,u,u,u,u,u,u,u,u>
@@ -770,7 +770,7 @@ define void @shuffle_v64i8_to_v8i8_2(<64
 
 define void @shuffle_v64i8_to_v8i8_3(<64 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v8i8_3:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -789,7 +789,7 @@ define void @shuffle_v64i8_to_v8i8_3(<64
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_3:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -808,7 +808,7 @@ define void @shuffle_v64i8_to_v8i8_3(<64
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_3:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -827,7 +827,7 @@ define void @shuffle_v64i8_to_v8i8_3(<64
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_3:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -855,7 +855,7 @@ define void @shuffle_v64i8_to_v8i8_3(<64
 
 define void @shuffle_v64i8_to_v8i8_4(<64 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v8i8_4:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -874,7 +874,7 @@ define void @shuffle_v64i8_to_v8i8_4(<64
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_4:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -893,7 +893,7 @@ define void @shuffle_v64i8_to_v8i8_4(<64
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_4:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -912,7 +912,7 @@ define void @shuffle_v64i8_to_v8i8_4(<64
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_4:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} ymm2 = <2,6,10,14,18,22,26,30,u,u,u,u,u,u,u,u>
@@ -928,7 +928,7 @@ define void @shuffle_v64i8_to_v8i8_4(<64
 
 define void @shuffle_v64i8_to_v8i8_5(<64 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v8i8_5:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -947,7 +947,7 @@ define void @shuffle_v64i8_to_v8i8_5(<64
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_5:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -966,7 +966,7 @@ define void @shuffle_v64i8_to_v8i8_5(<64
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_5:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -985,7 +985,7 @@ define void @shuffle_v64i8_to_v8i8_5(<64
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_5:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -1020,7 +1020,7 @@ define void @shuffle_v64i8_to_v8i8_5(<64
 
 define void @shuffle_v64i8_to_v8i8_6(<64 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v8i8_6:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -1039,7 +1039,7 @@ define void @shuffle_v64i8_to_v8i8_6(<64
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_6:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -1058,7 +1058,7 @@ define void @shuffle_v64i8_to_v8i8_6(<64
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_6:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -1077,7 +1077,7 @@ define void @shuffle_v64i8_to_v8i8_6(<64
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_6:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} ymm2 = <3,7,11,15,19,23,27,31,u,u,u,u,u,u,u,u>
@@ -1093,7 +1093,7 @@ define void @shuffle_v64i8_to_v8i8_6(<64
 
 define void @shuffle_v64i8_to_v8i8_7(<64 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v8i8_7:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -1112,7 +1112,7 @@ define void @shuffle_v64i8_to_v8i8_7(<64
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v8i8_7:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -1131,7 +1131,7 @@ define void @shuffle_v64i8_to_v8i8_7(<64
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v8i8_7:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -1150,7 +1150,7 @@ define void @shuffle_v64i8_to_v8i8_7(<64
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8_7:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm1, %xmm2

Modified: llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-128.ll Mon Dec  4 09:18:51 2017
@@ -14,7 +14,7 @@
 
 define void @shuffle_v16i8_to_v8i8(<16 x i8>* %L, <8 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v8i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    packuswb %xmm0, %xmm0
@@ -22,42 +22,42 @@ define void @shuffle_v16i8_to_v8i8(<16 x
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v8i8:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    movq %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v8i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v8i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v8i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX512VL-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v8i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v8i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovwb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -69,7 +69,7 @@ define void @shuffle_v16i8_to_v8i8(<16 x
 
 define void @trunc_v8i16_to_v8i8(<16 x i8>* %L, <8 x i8>* %S) nounwind {
 ; SSE2-LABEL: trunc_v8i16_to_v8i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    packuswb %xmm0, %xmm0
@@ -77,42 +77,42 @@ define void @trunc_v8i16_to_v8i8(<16 x i
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: trunc_v8i16_to_v8i8:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    movq %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: trunc_v8i16_to_v8i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v8i16_to_v8i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v8i16_to_v8i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX512VL-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v8i16_to_v8i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v8i16_to_v8i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovwb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -125,7 +125,7 @@ define void @trunc_v8i16_to_v8i8(<16 x i
 
 define void @shuffle_v8i16_to_v4i16(<8 x i16>* %L, <4 x i16>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v8i16_to_v4i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
 ; SSE2-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -133,41 +133,41 @@ define void @shuffle_v8i16_to_v4i16(<8 x
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v8i16_to_v4i16:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; SSE42-NEXT:    movq %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v8i16_to_v4i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v8i16_to_v4i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX512F-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v8i16_to_v4i16:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovdw %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v8i16_to_v4i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX512BW-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v8i16_to_v4i16:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovdw %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -179,7 +179,7 @@ define void @shuffle_v8i16_to_v4i16(<8 x
 
 define void @trunc_v4i32_to_v4i16(<8 x i16>* %L, <4 x i16>* %S) nounwind {
 ; SSE2-LABEL: trunc_v4i32_to_v4i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
 ; SSE2-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -187,41 +187,41 @@ define void @trunc_v4i32_to_v4i16(<8 x i
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: trunc_v4i32_to_v4i16:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; SSE42-NEXT:    movq %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: trunc_v4i32_to_v4i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v4i32_to_v4i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX512F-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v4i32_to_v4i16:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovdw %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v4i32_to_v4i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; AVX512BW-NEXT:    vmovq %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v4i32_to_v4i16:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovdw %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -234,37 +234,37 @@ define void @trunc_v4i32_to_v4i16(<8 x i
 
 define void @shuffle_v4i32_to_v2i32(<4 x i32>* %L, <2 x i32>* %S) nounwind {
 ; SSE-LABEL: shuffle_v4i32_to_v2i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; SSE-NEXT:    movq %xmm0, (%rsi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v4i32_to_v2i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX-NEXT:    vmovlps %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v4i32_to_v2i32:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX512F-NEXT:    vmovlps %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v4i32_to_v2i32:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqd %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v4i32_to_v2i32:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX512BW-NEXT:    vmovlps %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v4i32_to_v2i32:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqd %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -276,37 +276,37 @@ define void @shuffle_v4i32_to_v2i32(<4 x
 
 define void @trunc_v2i64_to_v2i32(<4 x i32>* %L, <2 x i32>* %S) nounwind {
 ; SSE-LABEL: trunc_v2i64_to_v2i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; SSE-NEXT:    movq %xmm0, (%rsi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: trunc_v2i64_to_v2i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX-NEXT:    vmovlps %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v2i64_to_v2i32:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX512F-NEXT:    vmovlps %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v2i64_to_v2i32:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqd %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v2i64_to_v2i32:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX512BW-NEXT:    vmovlps %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v2i64_to_v2i32:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqd %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -319,7 +319,7 @@ define void @trunc_v2i64_to_v2i32(<4 x i
 
 define void @shuffle_v16i8_to_v4i8(<16 x i8>* %L, <4 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v4i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    packuswb %xmm0, %xmm0
@@ -328,41 +328,41 @@ define void @shuffle_v16i8_to_v4i8(<16 x
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v4i8:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    movd %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v4i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v4i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v4i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovdb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v4i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v4i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovdb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -374,7 +374,7 @@ define void @shuffle_v16i8_to_v4i8(<16 x
 
 define void @trunc_v4i32_to_v4i8(<16 x i8>* %L, <4 x i8>* %S) nounwind {
 ; SSE2-LABEL: trunc_v4i32_to_v4i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    packuswb %xmm0, %xmm0
@@ -383,41 +383,41 @@ define void @trunc_v4i32_to_v4i8(<16 x i
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: trunc_v4i32_to_v4i8:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    movd %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: trunc_v4i32_to_v4i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v4i32_to_v4i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v4i32_to_v4i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovdb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v4i32_to_v4i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v4i32_to_v4i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovdb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -430,41 +430,41 @@ define void @trunc_v4i32_to_v4i8(<16 x i
 
 define void @shuffle_v8i16_to_v2i16(<8 x i16>* %L, <2 x i16>* %S) nounwind {
 ; SSE-LABEL: shuffle_v8i16_to_v2i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; SSE-NEXT:    movd %xmm0, (%rsi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v8i16_to_v2i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v8i16_to_v2i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX512F-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512F-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v8i16_to_v2i16:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqw %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v8i16_to_v2i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX512BW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512BW-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v8i16_to_v2i16:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqw %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -476,41 +476,41 @@ define void @shuffle_v8i16_to_v2i16(<8 x
 
 define void @trunc_v2i64_to_v2i16(<8 x i16>* %L, <2 x i16>* %S) nounwind {
 ; SSE-LABEL: trunc_v2i64_to_v2i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; SSE-NEXT:    movd %xmm0, (%rsi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: trunc_v2i64_to_v2i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v2i64_to_v2i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX512F-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512F-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v2i64_to_v2i16:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqw %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v2i64_to_v2i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
 ; AVX512BW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512BW-NEXT:    vmovd %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v2i64_to_v2i16:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqw %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -523,7 +523,7 @@ define void @trunc_v2i64_to_v2i16(<8 x i
 
 define void @shuffle_v16i8_to_v2i8(<16 x i8>* %L, <2 x i8>* %S) nounwind {
 ; SSE2-LABEL: shuffle_v16i8_to_v2i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    packuswb %xmm0, %xmm0
@@ -534,41 +534,41 @@ define void @shuffle_v16i8_to_v2i8(<16 x
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: shuffle_v16i8_to_v2i8:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    pextrw $0, %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v16i8_to_v2i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i8_to_v2i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i8_to_v2i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i8_to_v2i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq
@@ -580,7 +580,7 @@ define void @shuffle_v16i8_to_v2i8(<16 x
 
 define void @trunc_v2i64_to_v2i8(<16 x i8>* %L, <2 x i8>* %S) nounwind {
 ; SSE2-LABEL: trunc_v2i64_to_v2i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    packuswb %xmm0, %xmm0
@@ -591,41 +591,41 @@ define void @trunc_v2i64_to_v2i8(<16 x i
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: trunc_v2i64_to_v2i8:
-; SSE42:       # BB#0:
+; SSE42:       # %bb.0:
 ; SSE42-NEXT:    movdqa (%rdi), %xmm0
 ; SSE42-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE42-NEXT:    pextrw $0, %xmm0, (%rsi)
 ; SSE42-NEXT:    retq
 ;
 ; AVX-LABEL: trunc_v2i64_to_v2i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v2i64_to_v2i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v2i64_to_v2i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v2i64_to_v2i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v2i64_to_v2i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BWVL-NEXT:    vpmovqb %xmm0, (%rsi)
 ; AVX512BWVL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-256.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-256.ll Mon Dec  4 09:18:51 2017
@@ -12,7 +12,7 @@
 
 define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -24,7 +24,7 @@ define void @shuffle_v32i8_to_v16i8(<32
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -36,7 +36,7 @@ define void @shuffle_v32i8_to_v16i8(<32
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: shuffle_v32i8_to_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -54,7 +54,7 @@ define void @shuffle_v32i8_to_v16i8(<32
 
 define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
 ; AVX1-LABEL: trunc_v16i16_to_v16i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -66,7 +66,7 @@ define void @trunc_v16i16_to_v16i8(<32 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_v16i16_to_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -78,7 +78,7 @@ define void @trunc_v16i16_to_v16i8(<32 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v16i16_to_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxwd (%rdi), %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512F-NEXT:    vmovdqa %xmm0, (%rsi)
@@ -86,7 +86,7 @@ define void @trunc_v16i16_to_v16i8(<32 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v16i16_to_v16i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpmovsxwd (%rdi), %zmm0
 ; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512VL-NEXT:    vmovdqa %xmm0, (%rsi)
@@ -94,7 +94,7 @@ define void @trunc_v16i16_to_v16i8(<32 x
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v16i16_to_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
 ; AVX512BW-NEXT:    vmovdqa %xmm0, (%rsi)
@@ -102,7 +102,7 @@ define void @trunc_v16i16_to_v16i8(<32 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v16i16_to_v16i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vpmovwb %ymm0, (%rsi)
 ; AVX512BWVL-NEXT:    vzeroupper
@@ -116,7 +116,7 @@ define void @trunc_v16i16_to_v16i8(<32 x
 
 define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v16i16_to_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -128,7 +128,7 @@ define void @shuffle_v16i16_to_v8i16(<16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v16i16_to_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -140,7 +140,7 @@ define void @shuffle_v16i16_to_v8i16(<16
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i16_to_v8i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -152,7 +152,7 @@ define void @shuffle_v16i16_to_v8i16(<16
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i16_to_v8i16:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
@@ -167,7 +167,7 @@ define void @shuffle_v16i16_to_v8i16(<16
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i16_to_v8i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -179,7 +179,7 @@ define void @shuffle_v16i16_to_v8i16(<16
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i16_to_v8i16:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
@@ -200,7 +200,7 @@ define void @shuffle_v16i16_to_v8i16(<16
 
 define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
 ; AVX1-LABEL: trunc_v8i32_to_v8i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -212,7 +212,7 @@ define void @trunc_v8i32_to_v8i16(<16 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_v8i32_to_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -221,7 +221,7 @@ define void @trunc_v8i32_to_v8i16(<16 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v8i32_to_v8i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512F-NEXT:    vmovdqa %xmm0, (%rsi)
@@ -229,14 +229,14 @@ define void @trunc_v8i32_to_v8i16(<16 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v8i32_to_v8i16:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vpmovdw %ymm0, (%rsi)
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v8i32_to_v8i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512BW-NEXT:    vmovdqa %xmm0, (%rsi)
@@ -244,7 +244,7 @@ define void @trunc_v8i32_to_v8i16(<16 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i16:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vpmovdw %ymm0, (%rsi)
 ; AVX512BWVL-NEXT:    vzeroupper
@@ -258,7 +258,7 @@ define void @trunc_v8i32_to_v8i16(<16 x
 
 define void @shuffle_v8i32_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
 ; AVX-LABEL: shuffle_v8i32_to_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -267,7 +267,7 @@ define void @shuffle_v8i32_to_v4i32(<8 x
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: shuffle_v8i32_to_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -282,7 +282,7 @@ define void @shuffle_v8i32_to_v4i32(<8 x
 
 define void @trunc_v4i64_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
 ; AVX1-LABEL: trunc_v4i64_to_v4i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -291,7 +291,7 @@ define void @trunc_v4i64_to_v4i32(<8 x i
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_v4i64_to_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpermilps {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vmovaps %xmm0, (%rsi)
@@ -299,7 +299,7 @@ define void @trunc_v4i64_to_v4i32(<8 x i
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v4i64_to_v4i32:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512F-NEXT:    vmovdqa %xmm0, (%rsi)
@@ -307,14 +307,14 @@ define void @trunc_v4i64_to_v4i32(<8 x i
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v4i64_to_v4i32:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vpmovqd %ymm0, (%rsi)
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v4i64_to_v4i32:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512BW-NEXT:    vmovdqa %xmm0, (%rsi)
@@ -322,7 +322,7 @@ define void @trunc_v4i64_to_v4i32(<8 x i
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i32:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vpmovqd %ymm0, (%rsi)
 ; AVX512BWVL-NEXT:    vzeroupper
@@ -336,7 +336,7 @@ define void @trunc_v4i64_to_v4i32(<8 x i
 
 define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v8i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -348,7 +348,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v8i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -360,7 +360,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v8i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -372,7 +372,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v8i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -384,7 +384,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v8i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -396,7 +396,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
@@ -417,7 +417,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x
 
 define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX1-LABEL: trunc_v8i32_to_v8i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -429,7 +429,7 @@ define void @trunc_v8i32_to_v8i8(<32 x i
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_v8i32_to_v8i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
@@ -439,7 +439,7 @@ define void @trunc_v8i32_to_v8i8(<32 x i
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v8i32_to_v8i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -448,14 +448,14 @@ define void @trunc_v8i32_to_v8i8(<32 x i
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v8i32_to_v8i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vpmovdb %ymm0, (%rsi)
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v8i32_to_v8i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -464,7 +464,7 @@ define void @trunc_v8i32_to_v8i8(<32 x i
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vpmovdb %ymm0, (%rsi)
 ; AVX512BWVL-NEXT:    vzeroupper
@@ -478,7 +478,7 @@ define void @trunc_v8i32_to_v8i8(<32 x i
 
 define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v16i16_to_v4i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -491,7 +491,7 @@ define void @shuffle_v16i16_to_v4i16(<16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v16i16_to_v4i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -504,7 +504,7 @@ define void @shuffle_v16i16_to_v4i16(<16
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v16i16_to_v4i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -517,7 +517,7 @@ define void @shuffle_v16i16_to_v4i16(<16
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v16i16_to_v4i16:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -526,7 +526,7 @@ define void @shuffle_v16i16_to_v4i16(<16
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v16i16_to_v4i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -539,7 +539,7 @@ define void @shuffle_v16i16_to_v4i16(<16
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -554,7 +554,7 @@ define void @shuffle_v16i16_to_v4i16(<16
 
 define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
 ; AVX1-LABEL: trunc_v4i64_to_v4i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -564,7 +564,7 @@ define void @trunc_v4i64_to_v4i16(<16 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_v4i64_to_v4i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -573,7 +573,7 @@ define void @trunc_v4i64_to_v4i16(<16 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v4i64_to_v4i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -582,14 +582,14 @@ define void @trunc_v4i64_to_v4i16(<16 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v4i64_to_v4i16:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vpmovqw %ymm0, (%rsi)
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v4i64_to_v4i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -598,7 +598,7 @@ define void @trunc_v4i64_to_v4i16(<16 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vpmovqw %ymm0, (%rsi)
 ; AVX512BWVL-NEXT:    vzeroupper
@@ -612,7 +612,7 @@ define void @trunc_v4i64_to_v4i16(<16 x
 
 define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
 ; AVX1-LABEL: shuffle_v32i8_to_v4i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -624,7 +624,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_to_v4i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -636,7 +636,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: shuffle_v32i8_to_v4i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -648,7 +648,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i8_to_v4i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512VL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -657,7 +657,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i8_to_v4i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -669,7 +669,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -684,7 +684,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x
 
 define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
 ; AVX1-LABEL: trunc_v4i64_to_v4i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -694,7 +694,7 @@ define void @trunc_v4i64_to_v4i8(<32 x i
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: trunc_v4i64_to_v4i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -703,7 +703,7 @@ define void @trunc_v4i64_to_v4i8(<32 x i
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v4i64_to_v4i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -712,14 +712,14 @@ define void @trunc_v4i64_to_v4i8(<32 x i
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v4i64_to_v4i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vpmovqb %ymm0, (%rsi)
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v4i64_to_v4i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -728,7 +728,7 @@ define void @trunc_v4i64_to_v4i8(<32 x i
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BWVL-NEXT:    vpmovqb %ymm0, (%rsi)
 ; AVX512BWVL-NEXT:    vzeroupper
@@ -744,7 +744,7 @@ define void @trunc_v4i64_to_v4i8(<32 x i
 ; the resulting BUILD_VECTOR should not be combined to a truncate.
 define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
 ; AVX1-LABEL: negative:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[u,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u],zero,zero,zero,zero,zero,zero,zero,xmm0[0,2,4,6,8,10,12,14]
@@ -755,7 +755,7 @@ define <16 x i8> @negative(<32 x i8> %v,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: negative:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -765,7 +765,7 @@ define <16 x i8> @negative(<32 x i8> %v,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: negative:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX512F-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -775,7 +775,7 @@ define <16 x i8> @negative(<32 x i8> %v,
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: negative:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -785,7 +785,7 @@ define <16 x i8> @negative(<32 x i8> %v,
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: negative:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX512BW-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@@ -795,7 +795,7 @@ define <16 x i8> @negative(<32 x i8> %v,
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: negative:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
 ; AVX512BWVL-NEXT:    movl $65537, %eax # imm = 0x10001
 ; AVX512BWVL-NEXT:    kmovd %eax, %k1

Modified: llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-512.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-512.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v32i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
@@ -22,7 +22,7 @@ define void @shuffle_v64i8_to_v32i8(<64
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v32i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
@@ -34,7 +34,7 @@ define void @shuffle_v64i8_to_v32i8(<64
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v32i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
@@ -46,7 +46,7 @@ define void @shuffle_v64i8_to_v32i8(<64
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v32i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30]
@@ -64,7 +64,7 @@ define void @shuffle_v64i8_to_v32i8(<64
 
 define void @trunc_v32i16_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
 ; AVX512F-LABEL: trunc_v32i16_to_v32i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovsxwd (%rdi), %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512F-NEXT:    vpmovsxwd 32(%rdi), %zmm1
@@ -75,7 +75,7 @@ define void @trunc_v32i16_to_v32i8(<64 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_v32i16_to_v32i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpmovsxwd (%rdi), %zmm0
 ; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512VL-NEXT:    vpmovsxwd 32(%rdi), %zmm1
@@ -86,14 +86,14 @@ define void @trunc_v32i16_to_v32i8(<64 x
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_v32i16_to_v32i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vpmovwb %zmm0, (%rsi)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_v32i16_to_v32i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vpmovwb %zmm0, (%rsi)
 ; AVX512BWVL-NEXT:    vzeroupper
@@ -107,7 +107,7 @@ define void @trunc_v32i16_to_v32i8(<64 x
 
 define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpshuflw {{.*#+}} ymm0 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
 ; AVX512F-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
 ; AVX512F-NEXT:    vpshuflw {{.*#+}} ymm1 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
@@ -119,7 +119,7 @@ define void @shuffle_v32i16_to_v16i16(<3
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpshuflw {{.*#+}} ymm0 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
 ; AVX512VL-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
 ; AVX512VL-NEXT:    vpshuflw {{.*#+}} ymm1 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
@@ -131,7 +131,7 @@ define void @shuffle_v32i16_to_v16i16(<3
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
@@ -145,7 +145,7 @@ define void @shuffle_v32i16_to_v16i16(<3
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i16_to_v16i16:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,2,4,6,16,18,20,22,8,10,12,14,24,26,28,30]
@@ -162,7 +162,7 @@ define void @shuffle_v32i16_to_v16i16(<3
 
 define void @trunc_v16i32_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
 ; AVX512-LABEL: trunc_v16i32_to_v16i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa32 (%rdi), %zmm0
 ; AVX512-NEXT:    vpmovdw %zmm0, (%rsi)
 ; AVX512-NEXT:    vzeroupper
@@ -176,7 +176,7 @@ define void @trunc_v16i32_to_v16i16(<32
 
 define void @shuffle_v16i32_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind {
 ; AVX512-LABEL: shuffle_v16i32_to_v8i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %zmm0
 ; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm1
 ; AVX512-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
@@ -192,7 +192,7 @@ define void @shuffle_v16i32_to_v8i32(<16
 
 define void @trunc_v8i64_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind {
 ; AVX512-LABEL: trunc_v8i64_to_v8i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512-NEXT:    vpmovqd %zmm0, (%rsi)
 ; AVX512-NEXT:    vzeroupper
@@ -206,7 +206,7 @@ define void @trunc_v8i64_to_v8i32(<16 x
 
 define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -225,7 +225,7 @@ define void @shuffle_v64i8_to_v16i8(<64
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -244,7 +244,7 @@ define void @shuffle_v64i8_to_v16i8(<64
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -263,7 +263,7 @@ define void @shuffle_v64i8_to_v16i8(<64
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v16i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -288,7 +288,7 @@ define void @shuffle_v64i8_to_v16i8(<64
 
 define void @trunc_v16i32_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
 ; AVX512-LABEL: trunc_v16i32_to_v16i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa32 (%rdi), %zmm0
 ; AVX512-NEXT:    vpmovdb %zmm0, (%rsi)
 ; AVX512-NEXT:    vzeroupper
@@ -302,7 +302,7 @@ define void @trunc_v16i32_to_v16i8(<64 x
 
 define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -323,7 +323,7 @@ define void @shuffle_v32i16_to_v8i16(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -344,7 +344,7 @@ define void @shuffle_v32i16_to_v8i16(<32
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -365,7 +365,7 @@ define void @shuffle_v32i16_to_v8i16(<32
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,4,8,12,16,20,24,28,u,u,u,u,u,u,u,u>
@@ -381,7 +381,7 @@ define void @shuffle_v32i16_to_v8i16(<32
 
 define void @trunc_v8i64_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
 ; AVX512-LABEL: trunc_v8i64_to_v8i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512-NEXT:    vpmovqw %zmm0, (%rsi)
 ; AVX512-NEXT:    vzeroupper
@@ -395,7 +395,7 @@ define void @trunc_v8i64_to_v8i16(<32 x
 
 define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX512F-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -414,7 +414,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -433,7 +433,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -452,7 +452,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,4,8,12,16,20,24,28,u,u,u,u,u,u,u,u>
@@ -468,7 +468,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x
 
 define void @trunc_v8i64_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
 ; AVX512-LABEL: trunc_v8i64_to_v8i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512-NEXT:    vpmovqb %zmm0, (%rsi)
 ; AVX512-NEXT:    vzeroupper
@@ -482,7 +482,7 @@ define void @trunc_v8i64_to_v8i8(<64 x i
 
 define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61(<64 x i8> %x) {
 ; AVX512F-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
 ; AVX512F-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
@@ -498,7 +498,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
 ; AVX512VL-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
@@ -514,7 +514,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
@@ -531,7 +531,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
@@ -552,7 +552,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01
 
 define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62(<64 x i8> %x) {
 ; AVX512F-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm2
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512F-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
@@ -567,7 +567,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm2
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512VL-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
@@ -582,7 +582,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512BW-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -598,7 +598,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512BWVL-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -618,7 +618,7 @@ define <16 x i8> @trunc_shuffle_v64i8_01
 
 define <4 x double> @PR34175(<32 x i16>* %p) {
 ; AVX512F-LABEL: PR34175:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqu 32(%rdi), %ymm1
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm2
@@ -632,7 +632,7 @@ define <4 x double> @PR34175(<32 x i16>*
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: PR34175:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqu (%rdi), %ymm0
 ; AVX512VL-NEXT:    vmovdqu 32(%rdi), %ymm1
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm2
@@ -646,7 +646,7 @@ define <4 x double> @PR34175(<32 x i16>*
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: PR34175:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqu64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -660,7 +660,7 @@ define <4 x double> @PR34175(<32 x i16>*
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: PR34175:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    vmovdqu64 (%rdi), %zmm0
 ; AVX512BWVL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512BWVL-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,8,16,24,u,u,u,u,u,u,u,u,u,u,u,u>

Modified: llvm/trunk/test/CodeGen/X86/sincos.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sincos.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sincos.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sincos.ll Mon Dec  4 09:18:51 2017
@@ -11,7 +11,7 @@ declare x86_fp80 @sinl(x86_fp80) readonl
 
 define float @test1(float %X) {
 ; CHECK-LABEL: test1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    subl $12, %esp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
@@ -25,7 +25,7 @@ define float @test1(float %X) {
 
 define double @test2(double %X) {
 ; CHECK-LABEL: test2:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    subl $12, %esp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
@@ -39,7 +39,7 @@ define double @test2(double %X) {
 
 define x86_fp80 @test3(x86_fp80 %X) {
 ; CHECK-LABEL: test3:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    subl $28, %esp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
@@ -60,7 +60,7 @@ declare x86_fp80 @cosl(x86_fp80) readonl
 
 define float @test4(float %X) {
 ; CHECK-LABEL: test4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    subl $12, %esp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
@@ -74,7 +74,7 @@ define float @test4(float %X) {
 
 define double @test5(double %X) {
 ; CHECK-LABEL: test5:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    subl $12, %esp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
@@ -88,7 +88,7 @@ define double @test5(double %X) {
 
 define x86_fp80 @test6(x86_fp80 %X) {
 ; CHECK-LABEL: test6:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    subl $28, %esp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)

Modified: llvm/trunk/test/CodeGen/X86/sink-blockfreq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sink-blockfreq.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sink-blockfreq.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sink-blockfreq.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 define i32 @sink_freqinfo(i32 %a, i32 %b) nounwind uwtable ssp {
 ; MSINK_BFI-LABEL: sink_freqinfo
 ; MSINK_BFI: jl
-; MSINK_BFI-NEXT: ## BB#
+; MSINK_BFI-NEXT: ## %bb.
 ; MSINK_BFI-NEXT: imull
 
 ; MSINK_NOBFI-LABEL: sink_freqinfo

Modified: llvm/trunk/test/CodeGen/X86/sink-out-of-loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sink-out-of-loop.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sink-out-of-loop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sink-out-of-loop.ll Mon Dec  4 09:18:51 2017
@@ -68,7 +68,7 @@ loop:
   br i1 %exit_cond, label %exit, label %loop
 
 exit:
-; CHECK: BB#2
+; CHECK: %bb.2
 ; CHECK: imull %eax, %eax
 ; CHECK: retq
   ret i32 %j

Modified: llvm/trunk/test/CodeGen/X86/slow-incdec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/slow-incdec.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/slow-incdec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/slow-incdec.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define i32 @inc(i32 %x) {
 ; INCDEC-LABEL: inc:
-; INCDEC:       # BB#0:
+; INCDEC:       # %bb.0:
 ; INCDEC-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; INCDEC-NEXT:    incl %eax
 ; INCDEC-NEXT:    retl
 ;
 ; ADD-LABEL: inc:
-; ADD:       # BB#0:
+; ADD:       # %bb.0:
 ; ADD-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; ADD-NEXT:    addl $1, %eax
 ; ADD-NEXT:    retl
@@ -20,13 +20,13 @@ define i32 @inc(i32 %x) {
 
 define i32 @dec(i32 %x) {
 ; INCDEC-LABEL: dec:
-; INCDEC:       # BB#0:
+; INCDEC:       # %bb.0:
 ; INCDEC-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; INCDEC-NEXT:    decl %eax
 ; INCDEC-NEXT:    retl
 ;
 ; ADD-LABEL: dec:
-; ADD:       # BB#0:
+; ADD:       # %bb.0:
 ; ADD-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; ADD-NEXT:    addl $-1, %eax
 ; ADD-NEXT:    retl
@@ -36,7 +36,7 @@ define i32 @dec(i32 %x) {
 
 define i32 @inc_size(i32 %x) optsize {
 ; CHECK-LABEL: inc_size:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    incl %eax
 ; CHECK-NEXT:    retl
@@ -46,7 +46,7 @@ define i32 @inc_size(i32 %x) optsize {
 
 define i32 @dec_size(i32 %x) optsize {
 ; CHECK-LABEL: dec_size:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    decl %eax
 ; CHECK-NEXT:    retl

Modified: llvm/trunk/test/CodeGen/X86/slow-pmulld.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/slow-pmulld.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/slow-pmulld.ll (original)
+++ llvm/trunk/test/CodeGen/X86/slow-pmulld.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 
 define <4 x i32> @foo(<4 x i8> %A) {
 ; CHECK32-LABEL: foo:
-; CHECK32:       # BB#0:
+; CHECK32:       # %bb.0:
 ; CHECK32-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u]
 ; CHECK32-NEXT:    movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u>
 ; CHECK32-NEXT:    movdqa %xmm0, %xmm2
@@ -19,7 +19,7 @@ define <4 x i32> @foo(<4 x i8> %A) {
 ; CHECK32-NEXT:    retl
 ;
 ; CHECK64-LABEL: foo:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u]
 ; CHECK64-NEXT:    movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u>
 ; CHECK64-NEXT:    movdqa %xmm0, %xmm2
@@ -29,13 +29,13 @@ define <4 x i32> @foo(<4 x i8> %A) {
 ; CHECK64-NEXT:    retq
 ;
 ; SSE4-32-LABEL: foo:
-; SSE4-32:       # BB#0:
+; SSE4-32:       # %bb.0:
 ; SSE4-32-NEXT:    pand {{\.LCPI.*}}, %xmm0
 ; SSE4-32-NEXT:    pmulld {{\.LCPI.*}}, %xmm0
 ; SSE4-32-NEXT:    retl
 ;
 ; SSE4-64-LABEL: foo:
-; SSE4-64:       # BB#0:
+; SSE4-64:       # %bb.0:
 ; SSE4-64-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE4-64-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE4-64-NEXT:    retq
@@ -46,25 +46,25 @@ define <4 x i32> @foo(<4 x i8> %A) {
 
 define <4 x i32> @foo_os(<4 x i8> %A) minsize {
 ; CHECK32-LABEL: foo_os:
-; CHECK32:       # BB#0:
+; CHECK32:       # %bb.0:
 ; CHECK32-NEXT:    pand {{\.LCPI.*}}, %xmm0
 ; CHECK32-NEXT:    pmulld {{\.LCPI.*}}, %xmm0
 ; CHECK32-NEXT:    retl
 ;
 ; CHECK64-LABEL: foo_os:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    pand {{.*}}(%rip), %xmm0
 ; CHECK64-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; CHECK64-NEXT:    retq
 ;
 ; SSE4-32-LABEL: foo_os:
-; SSE4-32:       # BB#0:
+; SSE4-32:       # %bb.0:
 ; SSE4-32-NEXT:    pand {{\.LCPI.*}}, %xmm0
 ; SSE4-32-NEXT:    pmulld {{\.LCPI.*}}, %xmm0
 ; SSE4-32-NEXT:    retl
 ;
 ; SSE4-64-LABEL: foo_os:
-; SSE4-64:       # BB#0:
+; SSE4-64:       # %bb.0:
 ; SSE4-64-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE4-64-NEXT:    pmulld {{.*}}(%rip), %xmm0
 ; SSE4-64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/slow-unaligned-mem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/slow-unaligned-mem.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/slow-unaligned-mem.ll (original)
+++ llvm/trunk/test/CodeGen/X86/slow-unaligned-mem.ll Mon Dec  4 09:18:51 2017
@@ -64,7 +64,7 @@
 define void @store_zeros(i8* %a) {
 ; SLOW-NOT: not a recognized processor
 ; SLOW-LABEL: store_zeros:
-; SLOW:       # BB#0:
+; SLOW:       # %bb.0:
 ; SLOW-NEXT:    movl
 ; SLOW-NEXT:    movl
 ; SLOW-NEXT:    movl
@@ -85,7 +85,7 @@ define void @store_zeros(i8* %a) {
 ;
 ; FAST-NOT: not a recognized processor
 ; FAST-LABEL: store_zeros:
-; FAST:       # BB#0:
+; FAST:       # %bb.0:
 ; FAST-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; FAST-NOT:     movl
   call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 64, i32 1, i1 false)

Modified: llvm/trunk/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll Mon Dec  4 09:18:51 2017
@@ -17,7 +17,7 @@ define fp128 @TestSelect(fp128 %a, fp128
 ; CHECK-NEXT   callq __subtf3
 ; CHECK-NEXT   testl %ebx, %ebx
 ; CHECK-NEXT   jg .LBB0_2
-; CHECK-NEXT # BB#1:
+; CHECK-NEXT # %bb.1:
 ; CHECK-NEXT   movaps .LCPI0_0(%rip), %xmm0
 ; CHECK-NEXT .LBB0_2:
 ; CHECK-NEXT   addq $32, %rsp

Modified: llvm/trunk/test/CodeGen/X86/splat-for-size.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/splat-for-size.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/splat-for-size.ll (original)
+++ llvm/trunk/test/CodeGen/X86/splat-for-size.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 ; There is no AVX broadcast from double to 128-bit vector because movddup has been around since SSE3 (grrr).
 define <2 x double> @splat_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: splat_v2f64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
 ; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -18,7 +18,7 @@ define <2 x double> @splat_v2f64(<2 x do
 
 define <4 x double> @splat_v4f64(<4 x double> %x) #1 {
 ; CHECK-LABEL: splat_v4f64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vbroadcastsd {{.*}}(%rip), %ymm1
 ; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -28,7 +28,7 @@ define <4 x double> @splat_v4f64(<4 x do
 
 define <4 x float> @splat_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: splat_v4f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
 ; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -38,7 +38,7 @@ define <4 x float> @splat_v4f32(<4 x flo
 
 define <8 x float> @splat_v8f32(<8 x float> %x) #1 {
 ; CHECK-LABEL: splat_v8f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vbroadcastss {{.*}}(%rip), %ymm1
 ; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -50,13 +50,13 @@ define <8 x float> @splat_v8f32(<8 x flo
 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
 define <2 x i64> @splat_v2i64(<2 x i64> %x) #1 {
 ; AVX-LABEL: splat_v2i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
 ; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: splat_v2i64:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %xmm1
 ; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
@@ -68,7 +68,7 @@ define <2 x i64> @splat_v2i64(<2 x i64>
 ; and then we fake it: use vmovddup to splat 64-bit value.
 define <4 x i64> @splat_v4i64(<4 x i64> %x) #0 {
 ; AVX-LABEL: splat_v4i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
 ; AVX-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
@@ -77,7 +77,7 @@ define <4 x i64> @splat_v4i64(<4 x i64>
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: splat_v4i64:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm1
 ; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
@@ -88,13 +88,13 @@ define <4 x i64> @splat_v4i64(<4 x i64>
 ; AVX can't do integer splats, so fake it: use vbroadcastss to splat 32-bit value.
 define <4 x i32> @splat_v4i32(<4 x i32> %x) #1 {
 ; AVX-LABEL: splat_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: splat_v4i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1
 ; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
@@ -105,7 +105,7 @@ define <4 x i32> @splat_v4i32(<4 x i32>
 ; AVX can't do integer splats, so fake it: use vbroadcastss to splat 32-bit value.
 define <8 x i32> @splat_v8i32(<8 x i32> %x) #0 {
 ; AVX-LABEL: splat_v8i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
@@ -114,7 +114,7 @@ define <8 x i32> @splat_v8i32(<8 x i32>
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: splat_v8i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm1
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
@@ -125,12 +125,12 @@ define <8 x i32> @splat_v8i32(<8 x i32>
 ; AVX can't do integer splats, and there's no broadcast fakery for 16-bit. Could use pshuflw, etc?
 define <8 x i16> @splat_v8i16(<8 x i16> %x) #1 {
 ; AVX-LABEL: splat_v8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: splat_v8i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastw {{.*}}(%rip), %xmm1
 ; AVX2-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
@@ -141,7 +141,7 @@ define <8 x i16> @splat_v8i16(<8 x i16>
 ; AVX can't do integer splats, and there's no broadcast fakery for 16-bit. Could use pshuflw, etc?
 define <16 x i16> @splat_v16i16(<16 x i16> %x) #0 {
 ; AVX-LABEL: splat_v16i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [2,2,2,2,2,2,2,2]
 ; AVX-NEXT:    vpaddw %xmm2, %xmm1, %xmm1
@@ -150,7 +150,7 @@ define <16 x i16> @splat_v16i16(<16 x i1
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: splat_v16i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastw {{.*}}(%rip), %ymm1
 ; AVX2-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
@@ -161,12 +161,12 @@ define <16 x i16> @splat_v16i16(<16 x i1
 ; AVX can't do integer splats, and there's no broadcast fakery for 8-bit. Could use pshufb, etc?
 define <16 x i8> @splat_v16i8(<16 x i8> %x) #1 {
 ; AVX-LABEL: splat_v16i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: splat_v16i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastb {{.*}}(%rip), %xmm1
 ; AVX2-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
@@ -177,7 +177,7 @@ define <16 x i8> @splat_v16i8(<16 x i8>
 ; AVX can't do integer splats, and there's no broadcast fakery for 8-bit. Could use pshufb, etc?
 define <32 x i8> @splat_v32i8(<32 x i8> %x) #0 {
 ; AVX-LABEL: splat_v32i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
 ; AVX-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
@@ -186,7 +186,7 @@ define <32 x i8> @splat_v32i8(<32 x i8>
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: splat_v32i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastb {{.*}}(%rip), %ymm1
 ; AVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/split-extend-vector-inreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/split-extend-vector-inreg.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/split-extend-vector-inreg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/split-extend-vector-inreg.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <4 x i64> @autogen_SD88863() {
 ; X32-LABEL: autogen_SD88863:
-; X32:       # BB#0: # %BB
+; X32:       # %bb.0: # %BB
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X32-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X32-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
@@ -15,11 +15,11 @@ define <4 x i64> @autogen_SD88863() {
 ; X32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X32-NEXT:    testb %al, %al
 ; X32-NEXT:    jne .LBB0_1
-; X32-NEXT:  # BB#2: # %CF240
+; X32-NEXT:  # %bb.2: # %CF240
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: autogen_SD88863:
-; X64:       # BB#0: # %BB
+; X64:       # %bb.0: # %BB
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X64-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X64-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
@@ -30,7 +30,7 @@ define <4 x i64> @autogen_SD88863() {
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    testb %al, %al
 ; X64-NEXT:    jne .LBB0_1
-; X64-NEXT:  # BB#2: # %CF240
+; X64-NEXT:  # %bb.2: # %CF240
 ; X64-NEXT:    retq
 BB:
   %I26 = insertelement <4 x i64> undef, i64 undef, i32 2

Modified: llvm/trunk/test/CodeGen/X86/split-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/split-store.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/split-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/split-store.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @int32_float_pair(i32 %tmp1, float %tmp2, i64* %ref.tmp) {
 ; CHECK-LABEL: int32_float_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, (%rsi)
 ; CHECK-NEXT:    movss %xmm0, 4(%rsi)
 ; CHECK-NEXT:    retq
@@ -18,7 +18,7 @@ define void @int32_float_pair(i32 %tmp1,
 
 define void @float_int32_pair(float %tmp1, i32 %tmp2, i64* %ref.tmp) {
 ; CHECK-LABEL: float_int32_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movss %xmm0, (%rsi)
 ; CHECK-NEXT:    movl %edi, 4(%rsi)
 ; CHECK-NEXT:    retq
@@ -33,7 +33,7 @@ define void @float_int32_pair(float %tmp
 
 define void @int16_float_pair(i16 signext %tmp1, float %tmp2, i64* %ref.tmp) {
 ; CHECK-LABEL: int16_float_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movzwl %di, %eax
 ; CHECK-NEXT:    movl %eax, (%rsi)
 ; CHECK-NEXT:    movss %xmm0, 4(%rsi)
@@ -49,7 +49,7 @@ define void @int16_float_pair(i16 signex
 
 define void @int8_float_pair(i8 signext %tmp1, float %tmp2, i64* %ref.tmp) {
 ; CHECK-LABEL: int8_float_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    movl %eax, (%rsi)
 ; CHECK-NEXT:    movss %xmm0, 4(%rsi)
@@ -65,7 +65,7 @@ define void @int8_float_pair(i8 signext
 
 define void @int32_int32_pair(i32 %tmp1, i32 %tmp2, i64* %ref.tmp) {
 ; CHECK-LABEL: int32_int32_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, (%rdx)
 ; CHECK-NEXT:    movl %esi, 4(%rdx)
 ; CHECK-NEXT:    retq
@@ -79,7 +79,7 @@ define void @int32_int32_pair(i32 %tmp1,
 
 define void @int16_int16_pair(i16 signext %tmp1, i16 signext %tmp2, i32* %ref.tmp) {
 ; CHECK-LABEL: int16_int16_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movw %di, (%rdx)
 ; CHECK-NEXT:    movw %si, 2(%rdx)
 ; CHECK-NEXT:    retq
@@ -93,7 +93,7 @@ define void @int16_int16_pair(i16 signex
 
 define void @int8_int8_pair(i8 signext %tmp1, i8 signext %tmp2, i16* %ref.tmp) {
 ; CHECK-LABEL: int8_int8_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movb %dil, (%rdx)
 ; CHECK-NEXT:    movb %sil, 1(%rdx)
 ; CHECK-NEXT:    retq
@@ -107,7 +107,7 @@ define void @int8_int8_pair(i8 signext %
 
 define void @int31_int31_pair(i31 %tmp1, i31 %tmp2, i64* %ref.tmp) {
 ; CHECK-LABEL: int31_int31_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $2147483647, %edi # imm = 0x7FFFFFFF
 ; CHECK-NEXT:    movl %edi, (%rdx)
 ; CHECK-NEXT:    andl $2147483647, %esi # imm = 0x7FFFFFFF
@@ -123,7 +123,7 @@ define void @int31_int31_pair(i31 %tmp1,
 
 define void @int31_int17_pair(i31 %tmp1, i17 %tmp2, i64* %ref.tmp) {
 ; CHECK-LABEL: int31_int17_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $2147483647, %edi # imm = 0x7FFFFFFF
 ; CHECK-NEXT:    movl %edi, (%rdx)
 ; CHECK-NEXT:    andl $131071, %esi # imm = 0x1FFFF
@@ -139,7 +139,7 @@ define void @int31_int17_pair(i31 %tmp1,
 
 define void @int7_int3_pair(i7 signext %tmp1, i3 signext %tmp2, i16* %ref.tmp) {
 ; CHECK-LABEL: int7_int3_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andb $127, %dil
 ; CHECK-NEXT:    movb %dil, (%rdx)
 ; CHECK-NEXT:    andb $7, %sil
@@ -155,7 +155,7 @@ define void @int7_int3_pair(i7 signext %
 
 define void @int24_int24_pair(i24 signext %tmp1, i24 signext %tmp2, i48* %ref.tmp) {
 ; CHECK-LABEL: int24_int24_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movw %di, (%rdx)
 ; CHECK-NEXT:    shrl $16, %edi
 ; CHECK-NEXT:    movb %dil, 2(%rdx)
@@ -175,7 +175,7 @@ define void @int24_int24_pair(i24 signex
 
 define void @int12_int12_pair(i12 signext %tmp1, i12 signext %tmp2, i24* %ref.tmp) {
 ; CHECK-LABEL: int12_int12_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %eax
 ; CHECK-NEXT:    shll $12, %eax
 ; CHECK-NEXT:    andl $4095, %edi # imm = 0xFFF
@@ -196,7 +196,7 @@ define void @int12_int12_pair(i12 signex
 
 define void @int7_int7_pair(i7 signext %tmp1, i7 signext %tmp2, i14* %ref.tmp) {
 ; CHECK-LABEL: int7_int7_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    shll $7, %esi
 ; CHECK-NEXT:    andl $127, %edi
 ; CHECK-NEXT:    orl %esi, %edi
@@ -215,7 +215,7 @@ define void @int7_int7_pair(i7 signext %
 
 define void @int1_int1_pair(i1 signext %tmp1, i1 signext %tmp2, i2* %ref.tmp) {
 ; CHECK-LABEL: int1_int1_pair:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addb %sil, %sil
 ; CHECK-NEXT:    andb $1, %dil
 ; CHECK-NEXT:    orb %sil, %dil
@@ -232,7 +232,7 @@ define void @int1_int1_pair(i1 signext %
 
 define void @mbb_int32_float_pair(i32 %tmp1, float %tmp2, i64* %ref.tmp) {
 ; CHECK-LABEL: mbb_int32_float_pair:
-; CHECK:       # BB#0: # %next
+; CHECK:       # %bb.0: # %next
 ; CHECK-NEXT:    movl %edi, (%rsi)
 ; CHECK-NEXT:    movss %xmm0, 4(%rsi)
 ; CHECK-NEXT:    retq
@@ -250,12 +250,12 @@ next:
 
 define void @mbb_int32_float_multi_stores(i32 %tmp1, float %tmp2, i64* %ref.tmp, i64* %ref.tmp1, i1 %cmp) {
 ; CHECK-LABEL: mbb_int32_float_multi_stores:
-; CHECK:       # BB#0: # %bb1
+; CHECK:       # %bb.0: # %bb1
 ; CHECK-NEXT:    movl %edi, (%rsi)
 ; CHECK-NEXT:    movss %xmm0, 4(%rsi)
 ; CHECK-NEXT:    testb $1, %cl
 ; CHECK-NEXT:    je .LBB15_2
-; CHECK-NEXT:  # BB#1: # %bb2
+; CHECK-NEXT:  # %bb.1: # %bb2
 ; CHECK-NEXT:    movl %edi, (%rdx)
 ; CHECK-NEXT:    movss %xmm0, 4(%rdx)
 ; CHECK-NEXT:  .LBB15_2: # %exitbb

Modified: llvm/trunk/test/CodeGen/X86/sqrt-fastmath-tune.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sqrt-fastmath-tune.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sqrt-fastmath-tune.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sqrt-fastmath-tune.ll Mon Dec  4 09:18:51 2017
@@ -12,12 +12,12 @@ declare <8 x float> @llvm.sqrt.v8f32(<8
 
 define float @foo_x1(float %f) #0 {
 ; SCALAR-EST-LABEL: foo_x1:
-; SCALAR-EST:       # BB#0:
+; SCALAR-EST:       # %bb.0:
 ; SCALAR-EST-NEXT:    rsqrtss %xmm0
 ; SCALAR-EST:         retq
 ;
 ; SCALAR-ACC-LABEL: foo_x1:
-; SCALAR-ACC:       # BB#0:
+; SCALAR-ACC:       # %bb.0:
 ; SCALAR-ACC-NEXT:    {{^ *v?sqrtss %xmm0}}
 ; SCALAR-ACC-NEXT:    retq
   %call = tail call float @llvm.sqrt.f32(float %f) #1
@@ -26,12 +26,12 @@ define float @foo_x1(float %f) #0 {
 
 define <4 x float> @foo_x4(<4 x float> %f) #0 {
 ; VECTOR-EST-LABEL: foo_x4:
-; VECTOR-EST:       # BB#0:
+; VECTOR-EST:       # %bb.0:
 ; VECTOR-EST-NEXT:    rsqrtps %xmm0
 ; VECTOR-EST:         retq
 ;
 ; VECTOR-ACC-LABEL: foo_x4:
-; VECTOR-ACC:       # BB#0:
+; VECTOR-ACC:       # %bb.0:
 ; VECTOR-ACC-NEXT:    {{^ *v?sqrtps %xmm0}}
 ; VECTOR-ACC-NEXT:    retq
   %call = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %f) #1
@@ -40,12 +40,12 @@ define <4 x float> @foo_x4(<4 x float> %
 
 define <8 x float> @foo_x8(<8 x float> %f) #0 {
 ; VECTOR-EST-LABEL: foo_x8:
-; VECTOR-EST:       # BB#0:
+; VECTOR-EST:       # %bb.0:
 ; VECTOR-EST-NEXT:    rsqrtps
 ; VECTOR-EST:         retq
 ;
 ; VECTOR-ACC-LABEL: foo_x8:
-; VECTOR-ACC:       # BB#0:
+; VECTOR-ACC:       # %bb.0:
 ; VECTOR-ACC-NEXT:    {{^ *v?sqrtps %[xy]mm0}}
 ; VECTOR-ACC-NOT:     rsqrt
 ; VECTOR-ACC:         retq

Modified: llvm/trunk/test/CodeGen/X86/sqrt-fastmath.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sqrt-fastmath.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sqrt-fastmath.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sqrt-fastmath.ll Mon Dec  4 09:18:51 2017
@@ -12,12 +12,12 @@ declare <8 x float> @llvm.sqrt.v8f32(<8
 
 define double @finite_f64_no_estimate(double %d) #0 {
 ; SSE-LABEL: finite_f64_no_estimate:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtsd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: finite_f64_no_estimate:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %call = tail call double @__sqrt_finite(double %d) #2
@@ -28,12 +28,12 @@ define double @finite_f64_no_estimate(do
 
 define double @finite_f64_estimate(double %d) #1 {
 ; SSE-LABEL: finite_f64_estimate:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtsd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: finite_f64_estimate:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %call = tail call double @__sqrt_finite(double %d) #2
@@ -42,12 +42,12 @@ define double @finite_f64_estimate(doubl
 
 define float @finite_f32_no_estimate(float %f) #0 {
 ; SSE-LABEL: finite_f32_no_estimate:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: finite_f32_no_estimate:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %call = tail call float @__sqrtf_finite(float %f) #2
@@ -56,7 +56,7 @@ define float @finite_f32_no_estimate(flo
 
 define float @finite_f32_estimate(float %f) #1 {
 ; SSE-LABEL: finite_f32_estimate:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    rsqrtss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    mulss %xmm1, %xmm2
@@ -71,7 +71,7 @@ define float @finite_f32_estimate(float
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: finite_f32_estimate:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm1
 ; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm2
 ; AVX-NEXT:    vmulss %xmm1, %xmm2, %xmm1
@@ -88,7 +88,7 @@ define float @finite_f32_estimate(float
 
 define x86_fp80 @finite_f80_no_estimate(x86_fp80 %ld) #0 {
 ; CHECK-LABEL: finite_f80_no_estimate:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    fsqrt
 ; CHECK-NEXT:    retq
@@ -100,7 +100,7 @@ define x86_fp80 @finite_f80_no_estimate(
 
 define x86_fp80 @finite_f80_estimate_but_no(x86_fp80 %ld) #1 {
 ; CHECK-LABEL: finite_f80_estimate_but_no:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    fsqrt
 ; CHECK-NEXT:    retq
@@ -110,14 +110,14 @@ define x86_fp80 @finite_f80_estimate_but
 
 define float @f32_no_estimate(float %x) #0 {
 ; SSE-LABEL: f32_no_estimate:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtss %xmm0, %xmm1
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    divss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: f32_no_estimate:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vdivss %xmm0, %xmm1, %xmm0
@@ -129,7 +129,7 @@ define float @f32_no_estimate(float %x)
 
 define float @f32_estimate(float %x) #1 {
 ; SSE-LABEL: f32_estimate:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    rsqrtss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm2
 ; SSE-NEXT:    mulss %xmm2, %xmm2
@@ -141,7 +141,7 @@ define float @f32_estimate(float %x) #1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: f32_estimate:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm1
 ; AVX-NEXT:    vmulss %xmm1, %xmm1, %xmm2
 ; AVX-NEXT:    vmulss %xmm2, %xmm0, %xmm0
@@ -156,14 +156,14 @@ define float @f32_estimate(float %x) #1
 
 define <4 x float> @v4f32_no_estimate(<4 x float> %x) #0 {
 ; SSE-LABEL: v4f32_no_estimate:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtps %xmm0, %xmm1
 ; SSE-NEXT:    movaps {{.*#+}} xmm0 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; SSE-NEXT:    divps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: v4f32_no_estimate:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtps %xmm0, %xmm0
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; AVX-NEXT:    vdivps %xmm0, %xmm1, %xmm0
@@ -175,7 +175,7 @@ define <4 x float> @v4f32_no_estimate(<4
 
 define <4 x float> @v4f32_estimate(<4 x float> %x) #1 {
 ; SSE-LABEL: v4f32_estimate:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    rsqrtps %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm2
 ; SSE-NEXT:    mulps %xmm2, %xmm2
@@ -187,7 +187,7 @@ define <4 x float> @v4f32_estimate(<4 x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: v4f32_estimate:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vrsqrtps %xmm0, %xmm1
 ; AVX-NEXT:    vmulps %xmm1, %xmm1, %xmm2
 ; AVX-NEXT:    vmulps %xmm2, %xmm0, %xmm0
@@ -202,7 +202,7 @@ define <4 x float> @v4f32_estimate(<4 x
 
 define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 {
 ; SSE-LABEL: v8f32_no_estimate:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtps %xmm1, %xmm2
 ; SSE-NEXT:    sqrtps %xmm0, %xmm3
 ; SSE-NEXT:    movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
@@ -212,7 +212,7 @@ define <8 x float> @v8f32_no_estimate(<8
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: v8f32_no_estimate:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtps %ymm0, %ymm0
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; AVX-NEXT:    vdivps %ymm0, %ymm1, %ymm0
@@ -224,7 +224,7 @@ define <8 x float> @v8f32_no_estimate(<8
 
 define <8 x float> @v8f32_estimate(<8 x float> %x) #1 {
 ; SSE-LABEL: v8f32_estimate:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    rsqrtps %xmm0, %xmm3
 ; SSE-NEXT:    movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01]
 ; SSE-NEXT:    movaps %xmm3, %xmm2
@@ -246,7 +246,7 @@ define <8 x float> @v8f32_estimate(<8 x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: v8f32_estimate:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vrsqrtps %ymm0, %ymm1
 ; AVX-NEXT:    vmulps %ymm1, %ymm1, %ymm2
 ; AVX-NEXT:    vmulps %ymm2, %ymm0, %ymm0

Modified: llvm/trunk/test/CodeGen/X86/sqrt-partial.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sqrt-partial.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sqrt-partial.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sqrt-partial.ll Mon Dec  4 09:18:51 2017
@@ -10,11 +10,11 @@
 
 define float @f(float %val) nounwind {
 ; CHECK-LABEL: f:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorps %xmm1, %xmm1
 ; CHECK-NEXT:    ucomiss %xmm1, %xmm0
 ; CHECK-NEXT:    jb .LBB0_2
-; CHECK-NEXT:  # BB#1: # %.split
+; CHECK-NEXT:  # %bb.1: # %.split
 ; CHECK-NEXT:    sqrtss %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB0_2: # %call.sqrt
@@ -25,11 +25,11 @@ define float @f(float %val) nounwind {
 
 define double @d(double %val) nounwind {
 ; CHECK-LABEL: d:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorps %xmm1, %xmm1
 ; CHECK-NEXT:    ucomisd %xmm1, %xmm0
 ; CHECK-NEXT:    jb .LBB1_2
-; CHECK-NEXT:  # BB#1: # %.split
+; CHECK-NEXT:  # %bb.1: # %.split
 ; CHECK-NEXT:    sqrtsd %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB1_2: # %call.sqrt

Modified: llvm/trunk/test/CodeGen/X86/sse-align-12.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-align-12.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-align-12.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-align-12.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <4 x float> @a(<4 x float>* %y) nounwind {
 ; CHECK-LABEL: a:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movups (%rdi), %xmm0
 ; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
 ; CHECK-NEXT:    retq
@@ -21,7 +21,7 @@ define <4 x float> @a(<4 x float>* %y) n
 
 define <4 x float> @b(<4 x float>* %y, <4 x float> %z) nounwind {
 ; CHECK-LABEL: b:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movups (%rdi), %xmm1
 ; CHECK-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; CHECK-NEXT:    retq
@@ -39,7 +39,7 @@ define <4 x float> @b(<4 x float>* %y, <
 
 define <2 x double> @c(<2 x double>* %y) nounwind {
 ; CHECK-LABEL: c:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movupd (%rdi), %xmm0
 ; CHECK-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
 ; CHECK-NEXT:    retq
@@ -53,7 +53,7 @@ define <2 x double> @c(<2 x double>* %y)
 
 define <2 x double> @d(<2 x double>* %y, <2 x double> %z) nounwind {
 ; CHECK-LABEL: d:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movups (%rdi), %xmm1
 ; CHECK-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/sse-fcopysign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-fcopysign.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-fcopysign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-fcopysign.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define float @tst1(float %a, float %b) nounwind {
 ; X32-LABEL: tst1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $8, %esp
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -19,7 +19,7 @@ define float @tst1(float %a, float %b) n
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: tst1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps %xmm0, %xmm2
 ; X64-NEXT:    movaps %xmm1, %xmm0
 ; X64-NEXT:    movaps %xmm2, %xmm1
@@ -30,7 +30,7 @@ define float @tst1(float %a, float %b) n
 
 define double @tst2(double %a, float %b, float %c) nounwind {
 ; X32-LABEL: tst2:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $16, %esp
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -43,7 +43,7 @@ define double @tst2(double %a, float %b,
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: tst2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    addss %xmm2, %xmm1
 ; X64-NEXT:    cvtss2sd %xmm1, %xmm1
 ; X64-NEXT:    jmp copysign # TAILCALL
@@ -62,7 +62,7 @@ declare double @copysign(double, double)
 
 define float @int1(float %a, float %b) nounwind {
 ; X32-LABEL: int1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    andps {{\.LCPI.*}}, %xmm0
@@ -75,7 +75,7 @@ define float @int1(float %a, float %b) n
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: int1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andps {{.*}}(%rip), %xmm0
 ; X64-NEXT:    andps {{.*}}(%rip), %xmm1
 ; X64-NEXT:    orps %xmm1, %xmm0
@@ -86,7 +86,7 @@ define float @int1(float %a, float %b) n
 
 define double @int2(double %a, float %b, float %c) nounwind {
 ; X32-LABEL: int2:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-8, %esp
@@ -105,7 +105,7 @@ define double @int2(double %a, float %b,
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: int2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    addss %xmm2, %xmm1
 ; X64-NEXT:    cvtss2sd %xmm1, %xmm1
 ; X64-NEXT:    andps {{.*}}(%rip), %xmm1
@@ -120,13 +120,13 @@ define double @int2(double %a, float %b,
 
 define float @cst1() nounwind {
 ; X32-LABEL: cst1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    fld1
 ; X32-NEXT:    fchs
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: cst1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    retq
   %tmp = tail call float @llvm.copysign.f32( float 1.0, float -2.0 )
@@ -135,13 +135,13 @@ define float @cst1() nounwind {
 
 define double @cst2() nounwind {
 ; X32-LABEL: cst2:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    fldz
 ; X32-NEXT:    fchs
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: cst2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    retq
   %tmp1 = fadd float -1.0, -1.0

Modified: llvm/trunk/test/CodeGen/X86/sse-fsignum.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-fsignum.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-fsignum.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-fsignum.ll Mon Dec  4 09:18:51 2017
@@ -11,7 +11,7 @@
 
 define void @signum32a(<4 x float>*) {
 ; AVX-LABEL: signum32a:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovaps (%rdi), %xmm0
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vcmpltps %xmm1, %xmm0, %xmm2
@@ -34,7 +34,7 @@ entry:
 
 define void @signum64a(<2 x double>*) {
 ; AVX-LABEL: signum64a:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovapd (%rdi), %xmm0
 ; AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vcmpltpd %xmm1, %xmm0, %xmm2
@@ -63,7 +63,7 @@ entry:
 
 define void @signum32b(<8 x float>*) {
 ; AVX1-LABEL: signum32b:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vcmpltps %ymm1, %ymm0, %ymm2
@@ -76,7 +76,7 @@ define void @signum32b(<8 x float>*) {
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: signum32b:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vcmpltps %ymm1, %ymm0, %ymm2
@@ -89,7 +89,7 @@ define void @signum32b(<8 x float>*) {
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: signum32b:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    vcmpltps %zmm1, %zmm0, %k1
@@ -117,7 +117,7 @@ entry:
 
 define void @signum64b(<4 x double>*) {
 ; AVX1-LABEL: signum64b:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovapd (%rdi), %ymm0
 ; AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vcmpltpd %ymm1, %ymm0, %ymm2
@@ -134,7 +134,7 @@ define void @signum64b(<4 x double>*) {
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: signum64b:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovapd (%rdi), %ymm0
 ; AVX2-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vcmpltpd %ymm1, %ymm0, %ymm2
@@ -151,7 +151,7 @@ define void @signum64b(<4 x double>*) {
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: signum64b:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovapd (%rdi), %ymm0
 ; AVX512F-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    vcmpltpd %ymm1, %ymm0, %ymm2
@@ -181,7 +181,7 @@ entry:
 
 define void @signum32c(<8 x float>*) {
 ; AVX-LABEL: signum32c:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vcmpltps %ymm1, %ymm0, %ymm2
@@ -207,7 +207,7 @@ entry:
 
 define void @signum64c(<4 x double>*) {
 ; AVX1-LABEL: signum64c:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovapd (%rdi), %ymm0
 ; AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vcmpltpd %ymm1, %ymm0, %ymm2
@@ -223,7 +223,7 @@ define void @signum64c(<4 x double>*) {
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: signum64c:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovapd (%rdi), %ymm0
 ; AVX2-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vcmpltpd %ymm1, %ymm0, %ymm2
@@ -237,7 +237,7 @@ define void @signum64c(<4 x double>*) {
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: signum64c:
-; AVX512F:       # BB#0: # %entry
+; AVX512F:       # %bb.0: # %entry
 ; AVX512F-NEXT:    vmovapd (%rdi), %ymm0
 ; AVX512F-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    vcmpltpd %ymm1, %ymm0, %ymm2

Modified: llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define <4 x float> @test_mm_cvtsi64_ss(<4 x float> %a0, i64 %a1) nounwind {
 ; X64-LABEL: test_mm_cvtsi64_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtsi2ssq %rdi, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1)
@@ -15,7 +15,7 @@ declare <4 x float> @llvm.x86.sse.cvtsi6
 
 define i64 @test_mm_cvtss_si64(<4 x float> %a0) nounwind {
 ; X64-LABEL: test_mm_cvtss_si64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtss2si %xmm0, %rax
 ; X64-NEXT:    retq
   %res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0)
@@ -25,7 +25,7 @@ declare i64 @llvm.x86.sse.cvtss2si64(<4
 
 define i64 @test_mm_cvttss_si64(<4 x float> %a0) nounwind {
 ; X64-LABEL: test_mm_cvttss_si64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttss2si %xmm0, %rax
 ; X64-NEXT:    retq
   %res = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0)

Modified: llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel.ll Mon Dec  4 09:18:51 2017
@@ -6,12 +6,12 @@
 
 define <4 x float> @test_mm_add_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_add_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    addps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_add_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    addps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = fadd <4 x float> %a0, %a1
@@ -20,12 +20,12 @@ define <4 x float> @test_mm_add_ps(<4 x
 
 define <4 x float> @test_mm_add_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_add_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    addss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_add_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    addss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %ext0 = extractelement <4 x float> %a0, i32 0
@@ -37,12 +37,12 @@ define <4 x float> @test_mm_add_ss(<4 x
 
 define <4 x float> @test_mm_and_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_and_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    andps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_and_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <4 x float> %a0 to <4 x i32>
@@ -54,12 +54,12 @@ define <4 x float> @test_mm_and_ps(<4 x
 
 define <4 x float> @test_mm_andnot_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_andnot_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    andnps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_andnot_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andnps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <4 x float> %a0 to <4 x i32>
@@ -72,12 +72,12 @@ define <4 x float> @test_mm_andnot_ps(<4
 
 define <4 x float> @test_mm_cmpeq_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpeq_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpeqps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpeq_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpeqps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %cmp = fcmp oeq <4 x float> %a0, %a1
@@ -88,12 +88,12 @@ define <4 x float> @test_mm_cmpeq_ps(<4
 
 define <4 x float> @test_mm_cmpeq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpeq_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpeqss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpeq_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpeqss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 0)
@@ -103,13 +103,13 @@ declare <4 x float> @llvm.x86.sse.cmp.ss
 
 define <4 x float> @test_mm_cmpge_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpge_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpleps %xmm0, %xmm1
 ; X32-NEXT:    movaps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpge_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpleps %xmm0, %xmm1
 ; X64-NEXT:    movaps %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -121,13 +121,13 @@ define <4 x float> @test_mm_cmpge_ps(<4
 
 define <4 x float> @test_mm_cmpge_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpge_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpless %xmm0, %xmm1
 ; X32-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpge_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpless %xmm0, %xmm1
 ; X64-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; X64-NEXT:    retq
@@ -138,13 +138,13 @@ define <4 x float> @test_mm_cmpge_ss(<4
 
 define <4 x float> @test_mm_cmpgt_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpgt_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpltps %xmm0, %xmm1
 ; X32-NEXT:    movaps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpgt_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpltps %xmm0, %xmm1
 ; X64-NEXT:    movaps %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -156,13 +156,13 @@ define <4 x float> @test_mm_cmpgt_ps(<4
 
 define <4 x float> @test_mm_cmpgt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpgt_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpltss %xmm0, %xmm1
 ; X32-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpgt_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpltss %xmm0, %xmm1
 ; X64-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; X64-NEXT:    retq
@@ -173,12 +173,12 @@ define <4 x float> @test_mm_cmpgt_ss(<4
 
 define <4 x float> @test_mm_cmple_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmple_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpleps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmple_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpleps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %cmp = fcmp ole <4 x float> %a0, %a1
@@ -189,12 +189,12 @@ define <4 x float> @test_mm_cmple_ps(<4
 
 define <4 x float> @test_mm_cmple_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmple_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpless %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmple_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpless %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 2)
@@ -203,12 +203,12 @@ define <4 x float> @test_mm_cmple_ss(<4
 
 define <4 x float> @test_mm_cmplt_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmplt_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpltps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmplt_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpltps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %cmp = fcmp olt <4 x float> %a0, %a1
@@ -219,12 +219,12 @@ define <4 x float> @test_mm_cmplt_ps(<4
 
 define <4 x float> @test_mm_cmplt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmplt_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpltss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmplt_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpltss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 1)
@@ -233,12 +233,12 @@ define <4 x float> @test_mm_cmplt_ss(<4
 
 define <4 x float> @test_mm_cmpneq_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpneq_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpneqps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpneq_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpneqps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %cmp = fcmp une <4 x float> %a0, %a1
@@ -249,12 +249,12 @@ define <4 x float> @test_mm_cmpneq_ps(<4
 
 define <4 x float> @test_mm_cmpneq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpneq_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpneqss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpneq_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpneqss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 4)
@@ -263,13 +263,13 @@ define <4 x float> @test_mm_cmpneq_ss(<4
 
 define <4 x float> @test_mm_cmpnge_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnge_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnleps %xmm0, %xmm1
 ; X32-NEXT:    movaps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnge_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnleps %xmm0, %xmm1
 ; X64-NEXT:    movaps %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -281,13 +281,13 @@ define <4 x float> @test_mm_cmpnge_ps(<4
 
 define <4 x float> @test_mm_cmpnge_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnge_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnless %xmm0, %xmm1
 ; X32-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnge_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnless %xmm0, %xmm1
 ; X64-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; X64-NEXT:    retq
@@ -298,13 +298,13 @@ define <4 x float> @test_mm_cmpnge_ss(<4
 
 define <4 x float> @test_mm_cmpngt_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpngt_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnltps %xmm0, %xmm1
 ; X32-NEXT:    movaps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpngt_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnltps %xmm0, %xmm1
 ; X64-NEXT:    movaps %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -316,13 +316,13 @@ define <4 x float> @test_mm_cmpngt_ps(<4
 
 define <4 x float> @test_mm_cmpngt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpngt_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnltss %xmm0, %xmm1
 ; X32-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpngt_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnltss %xmm0, %xmm1
 ; X64-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; X64-NEXT:    retq
@@ -333,12 +333,12 @@ define <4 x float> @test_mm_cmpngt_ss(<4
 
 define <4 x float> @test_mm_cmpnle_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnle_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnleps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnle_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnleps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %cmp = fcmp ugt <4 x float> %a0, %a1
@@ -349,12 +349,12 @@ define <4 x float> @test_mm_cmpnle_ps(<4
 
 define <4 x float> @test_mm_cmpnle_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnle_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnless %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnle_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnless %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 6)
@@ -363,12 +363,12 @@ define <4 x float> @test_mm_cmpnle_ss(<4
 
 define <4 x float> @test_mm_cmpnlt_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnlt_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnltps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnlt_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnltps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %cmp = fcmp uge <4 x float> %a0, %a1
@@ -379,12 +379,12 @@ define <4 x float> @test_mm_cmpnlt_ps(<4
 
 define <4 x float> @test_mm_cmpnlt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnlt_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnltss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnlt_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnltss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 5)
@@ -393,12 +393,12 @@ define <4 x float> @test_mm_cmpnlt_ss(<4
 
 define <4 x float> @test_mm_cmpord_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpord_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpordps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpord_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpordps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %cmp = fcmp ord <4 x float> %a0, %a1
@@ -409,12 +409,12 @@ define <4 x float> @test_mm_cmpord_ps(<4
 
 define <4 x float> @test_mm_cmpord_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpord_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpordss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpord_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpordss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 7)
@@ -423,12 +423,12 @@ define <4 x float> @test_mm_cmpord_ss(<4
 
 define <4 x float> @test_mm_cmpunord_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpunord_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpunordps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpunord_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpunordps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %cmp = fcmp uno <4 x float> %a0, %a1
@@ -439,12 +439,12 @@ define <4 x float> @test_mm_cmpunord_ps(
 
 define <4 x float> @test_mm_cmpunord_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpunord_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpunordss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpunord_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpunordss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 3)
@@ -453,7 +453,7 @@ define <4 x float> @test_mm_cmpunord_ss(
 
 define i32 @test_mm_comieq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_comieq_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    comiss %xmm1, %xmm0
 ; X32-NEXT:    setnp %al
 ; X32-NEXT:    sete %cl
@@ -462,7 +462,7 @@ define i32 @test_mm_comieq_ss(<4 x float
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comieq_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    comiss %xmm1, %xmm0
 ; X64-NEXT:    setnp %al
 ; X64-NEXT:    sete %cl
@@ -476,14 +476,14 @@ declare i32 @llvm.x86.sse.comieq.ss(<4 x
 
 define i32 @test_mm_comige_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_comige_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    comiss %xmm1, %xmm0
 ; X32-NEXT:    setae %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comige_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    comiss %xmm1, %xmm0
 ; X64-NEXT:    setae %al
@@ -495,14 +495,14 @@ declare i32 @llvm.x86.sse.comige.ss(<4 x
 
 define i32 @test_mm_comigt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_comigt_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    comiss %xmm1, %xmm0
 ; X32-NEXT:    seta %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comigt_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    comiss %xmm1, %xmm0
 ; X64-NEXT:    seta %al
@@ -514,14 +514,14 @@ declare i32 @llvm.x86.sse.comigt.ss(<4 x
 
 define i32 @test_mm_comile_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_comile_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    comiss %xmm0, %xmm1
 ; X32-NEXT:    setae %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comile_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    comiss %xmm0, %xmm1
 ; X64-NEXT:    setae %al
@@ -533,14 +533,14 @@ declare i32 @llvm.x86.sse.comile.ss(<4 x
 
 define i32 @test_mm_comilt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_comilt_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    comiss %xmm0, %xmm1
 ; X32-NEXT:    seta %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comilt_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    comiss %xmm0, %xmm1
 ; X64-NEXT:    seta %al
@@ -552,7 +552,7 @@ declare i32 @llvm.x86.sse.comilt.ss(<4 x
 
 define i32 @test_mm_comineq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_comineq_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    comiss %xmm1, %xmm0
 ; X32-NEXT:    setp %al
 ; X32-NEXT:    setne %cl
@@ -561,7 +561,7 @@ define i32 @test_mm_comineq_ss(<4 x floa
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comineq_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    comiss %xmm1, %xmm0
 ; X64-NEXT:    setp %al
 ; X64-NEXT:    setne %cl
@@ -575,12 +575,12 @@ declare i32 @llvm.x86.sse.comineq.ss(<4
 
 define i32 @test_mm_cvt_ss2si(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_cvt_ss2si:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtss2si %xmm0, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvt_ss2si:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtss2si %xmm0, %eax
 ; X64-NEXT:    retq
   %res = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0)
@@ -590,12 +590,12 @@ declare i32 @llvm.x86.sse.cvtss2si(<4 x
 
 define <4 x float> @test_mm_cvtsi32_ss(<4 x float> %a0, i32 %a1) nounwind {
 ; X32-LABEL: test_mm_cvtsi32_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtsi2ssl {{[0-9]+}}(%esp), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtsi32_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtsi2ssl %edi, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float> %a0, i32 %a1)
@@ -605,7 +605,7 @@ declare <4 x float> @llvm.x86.sse.cvtsi2
 
 define float @test_mm_cvtss_f32(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtss_f32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movss %xmm0, (%esp)
 ; X32-NEXT:    flds (%esp)
@@ -613,7 +613,7 @@ define float @test_mm_cvtss_f32(<4 x flo
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtss_f32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = extractelement <4 x float> %a0, i32 0
   ret float %res
@@ -621,12 +621,12 @@ define float @test_mm_cvtss_f32(<4 x flo
 
 define i32 @test_mm_cvtss_si32(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtss_si32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtss2si %xmm0, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtss_si32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtss2si %xmm0, %eax
 ; X64-NEXT:    retq
   %res = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0)
@@ -635,12 +635,12 @@ define i32 @test_mm_cvtss_si32(<4 x floa
 
 define i32 @test_mm_cvttss_si(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_cvttss_si:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvttss2si %xmm0, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvttss_si:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttss2si %xmm0, %eax
 ; X64-NEXT:    retq
   %res = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0)
@@ -650,12 +650,12 @@ declare i32 @llvm.x86.sse.cvttss2si(<4 x
 
 define i32 @test_mm_cvttss_si32(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_cvttss_si32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvttss2si %xmm0, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvttss_si32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttss2si %xmm0, %eax
 ; X64-NEXT:    retq
   %res = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0)
@@ -664,12 +664,12 @@ define i32 @test_mm_cvttss_si32(<4 x flo
 
 define <4 x float> @test_mm_div_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_div_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    divps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_div_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    divps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = fdiv <4 x float> %a0, %a1
@@ -678,12 +678,12 @@ define <4 x float> @test_mm_div_ps(<4 x
 
 define <4 x float> @test_mm_div_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_div_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    divss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_div_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    divss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %ext0 = extractelement <4 x float> %a0, i32 0
@@ -695,7 +695,7 @@ define <4 x float> @test_mm_div_ss(<4 x
 
 define i32 @test_MM_GET_EXCEPTION_MASK() nounwind {
 ; X32-LABEL: test_MM_GET_EXCEPTION_MASK:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movl %esp, %eax
 ; X32-NEXT:    stmxcsr (%eax)
@@ -705,7 +705,7 @@ define i32 @test_MM_GET_EXCEPTION_MASK()
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_MM_GET_EXCEPTION_MASK:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    stmxcsr (%rax)
 ; X64-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
@@ -722,7 +722,7 @@ declare void @llvm.x86.sse.stmxcsr(i8*)
 
 define i32 @test_MM_GET_EXCEPTION_STATE() nounwind {
 ; X32-LABEL: test_MM_GET_EXCEPTION_STATE:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movl %esp, %eax
 ; X32-NEXT:    stmxcsr (%eax)
@@ -732,7 +732,7 @@ define i32 @test_MM_GET_EXCEPTION_STATE(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_MM_GET_EXCEPTION_STATE:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    stmxcsr (%rax)
 ; X64-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
@@ -748,7 +748,7 @@ define i32 @test_MM_GET_EXCEPTION_STATE(
 
 define i32 @test_MM_GET_FLUSH_ZERO_MODE() nounwind {
 ; X32-LABEL: test_MM_GET_FLUSH_ZERO_MODE:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movl %esp, %eax
 ; X32-NEXT:    stmxcsr (%eax)
@@ -758,7 +758,7 @@ define i32 @test_MM_GET_FLUSH_ZERO_MODE(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_MM_GET_FLUSH_ZERO_MODE:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    stmxcsr (%rax)
 ; X64-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
@@ -774,7 +774,7 @@ define i32 @test_MM_GET_FLUSH_ZERO_MODE(
 
 define i32 @test_MM_GET_ROUNDING_MODE() nounwind {
 ; X32-LABEL: test_MM_GET_ROUNDING_MODE:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movl %esp, %eax
 ; X32-NEXT:    stmxcsr (%eax)
@@ -784,7 +784,7 @@ define i32 @test_MM_GET_ROUNDING_MODE()
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_MM_GET_ROUNDING_MODE:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    stmxcsr (%rax)
 ; X64-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
@@ -800,7 +800,7 @@ define i32 @test_MM_GET_ROUNDING_MODE()
 
 define i32 @test_mm_getcsr() nounwind {
 ; X32-LABEL: test_mm_getcsr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movl %esp, %eax
 ; X32-NEXT:    stmxcsr (%eax)
@@ -809,7 +809,7 @@ define i32 @test_mm_getcsr() nounwind {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_getcsr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    stmxcsr (%rax)
 ; X64-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
@@ -823,13 +823,13 @@ define i32 @test_mm_getcsr() nounwind {
 
 define <4 x float> @test_mm_load_ps(float* %a0) nounwind {
 ; X32-LABEL: test_mm_load_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movaps (%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_load_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps (%rdi), %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast float* %a0 to <4 x float>*
@@ -839,14 +839,14 @@ define <4 x float> @test_mm_load_ps(floa
 
 define <4 x float> @test_mm_load_ps1(float* %a0) nounwind {
 ; X32-LABEL: test_mm_load_ps1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_load_ps1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-NEXT:    retq
@@ -860,13 +860,13 @@ define <4 x float> @test_mm_load_ps1(flo
 
 define <4 x float> @test_mm_load_ss(float* %a0) nounwind {
 ; X32-LABEL: test_mm_load_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_load_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    retq
   %ld = load float, float* %a0, align 1
@@ -879,14 +879,14 @@ define <4 x float> @test_mm_load_ss(floa
 
 define <4 x float> @test_mm_load1_ps(float* %a0) nounwind {
 ; X32-LABEL: test_mm_load1_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_load1_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-NEXT:    retq
@@ -900,7 +900,7 @@ define <4 x float> @test_mm_load1_ps(flo
 
 define <4 x float> @test_mm_loadh_pi(<4 x float> %a0, x86_mmx* %a1) {
 ; X32-LABEL: test_mm_loadh_pi:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -909,7 +909,7 @@ define <4 x float> @test_mm_loadh_pi(<4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_loadh_pi:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rax
 ; X64-NEXT:    movl %eax, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    shrq $32, %rax
@@ -930,7 +930,7 @@ define <4 x float> @test_mm_loadh_pi(<4
 
 define <4 x float> @test_mm_loadl_pi(<4 x float> %a0, x86_mmx* %a1) {
 ; X32-LABEL: test_mm_loadl_pi:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -940,7 +940,7 @@ define <4 x float> @test_mm_loadl_pi(<4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_loadl_pi:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rax
 ; X64-NEXT:    movl %eax, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    shrq $32, %rax
@@ -962,14 +962,14 @@ define <4 x float> @test_mm_loadl_pi(<4
 
 define <4 x float> @test_mm_loadr_ps(float* %a0) nounwind {
 ; X32-LABEL: test_mm_loadr_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movaps (%eax), %xmm0
 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_loadr_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps (%rdi), %xmm0
 ; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
 ; X64-NEXT:    retq
@@ -981,13 +981,13 @@ define <4 x float> @test_mm_loadr_ps(flo
 
 define <4 x float> @test_mm_loadu_ps(float* %a0) nounwind {
 ; X32-LABEL: test_mm_loadu_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movups (%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_loadu_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movups (%rdi), %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast float* %a0 to <4 x float>*
@@ -997,12 +997,12 @@ define <4 x float> @test_mm_loadu_ps(flo
 
 define <4 x float> @test_mm_max_ps(<4 x float> %a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_max_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    maxps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_max_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    maxps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
@@ -1012,12 +1012,12 @@ declare <4 x float> @llvm.x86.sse.max.ps
 
 define <4 x float> @test_mm_max_ss(<4 x float> %a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_max_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    maxss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_max_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    maxss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1)
@@ -1027,12 +1027,12 @@ declare <4 x float> @llvm.x86.sse.max.ss
 
 define <4 x float> @test_mm_min_ps(<4 x float> %a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_min_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    minps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_min_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    minps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
@@ -1042,12 +1042,12 @@ declare <4 x float> @llvm.x86.sse.min.ps
 
 define <4 x float> @test_mm_min_ss(<4 x float> %a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_min_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    minss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_min_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    minss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1)
@@ -1057,12 +1057,12 @@ declare <4 x float> @llvm.x86.sse.min.ss
 
 define <4 x float> @test_mm_move_ss(<4 x float> %a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_move_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_move_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
@@ -1071,12 +1071,12 @@ define <4 x float> @test_mm_move_ss(<4 x
 
 define <4 x float> @test_mm_movehl_ps(<4 x float> %a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_movehl_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_movehl_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
@@ -1085,12 +1085,12 @@ define <4 x float> @test_mm_movehl_ps(<4
 
 define <4 x float> @test_mm_movelh_ps(<4 x float> %a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_movelh_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_movelh_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -1099,12 +1099,12 @@ define <4 x float> @test_mm_movelh_ps(<4
 
 define i32 @test_mm_movemask_ps(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_movemask_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movmskps %xmm0, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_movemask_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movmskps %xmm0, %eax
 ; X64-NEXT:    retq
   %res = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0)
@@ -1114,12 +1114,12 @@ declare i32 @llvm.x86.sse.movmsk.ps(<4 x
 
 define <4 x float> @test_mm_mul_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_mul_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    mulps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mul_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    mulps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = fmul <4 x float> %a0, %a1
@@ -1128,12 +1128,12 @@ define <4 x float> @test_mm_mul_ps(<4 x
 
 define <4 x float> @test_mm_mul_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_mul_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    mulss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mul_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    mulss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %ext0 = extractelement <4 x float> %a0, i32 0
@@ -1145,12 +1145,12 @@ define <4 x float> @test_mm_mul_ss(<4 x
 
 define <4 x float> @test_mm_or_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_or_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    orps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_or_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    orps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <4 x float> %a0 to <4 x i32>
@@ -1162,13 +1162,13 @@ define <4 x float> @test_mm_or_ps(<4 x f
 
 define void @test_mm_prefetch(i8* %a0) {
 ; X32-LABEL: test_mm_prefetch:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    prefetchnta (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_prefetch:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    prefetchnta (%rdi)
 ; X64-NEXT:    retq
   call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1)
@@ -1178,12 +1178,12 @@ declare void @llvm.prefetch(i8* nocaptur
 
 define <4 x float> @test_mm_rcp_ps(<4 x float> %a0) {
 ; X32-LABEL: test_mm_rcp_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    rcpps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_rcp_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    rcpps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0)
@@ -1193,12 +1193,12 @@ declare <4 x float> @llvm.x86.sse.rcp.ps
 
 define <4 x float> @test_mm_rcp_ss(<4 x float> %a0) {
 ; X32-LABEL: test_mm_rcp_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    rcpss %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_rcp_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    rcpss %xmm0, %xmm0
 ; X64-NEXT:    retq
   %rcp = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a0)
@@ -1216,12 +1216,12 @@ declare <4 x float> @llvm.x86.sse.rcp.ss
 
 define <4 x float> @test_mm_rsqrt_ps(<4 x float> %a0) {
 ; X32-LABEL: test_mm_rsqrt_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    rsqrtps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_rsqrt_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    rsqrtps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0)
@@ -1231,12 +1231,12 @@ declare <4 x float> @llvm.x86.sse.rsqrt.
 
 define <4 x float> @test_mm_rsqrt_ss(<4 x float> %a0) {
 ; X32-LABEL: test_mm_rsqrt_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    rsqrtss %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_rsqrt_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    rsqrtss %xmm0, %xmm0
 ; X64-NEXT:    retq
   %rsqrt = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a0)
@@ -1254,7 +1254,7 @@ declare <4 x float> @llvm.x86.sse.rsqrt.
 
 define void @test_MM_SET_EXCEPTION_MASK(i32 %a0) nounwind {
 ; X32-LABEL: test_MM_SET_EXCEPTION_MASK:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl %esp, %ecx
@@ -1268,7 +1268,7 @@ define void @test_MM_SET_EXCEPTION_MASK(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_MM_SET_EXCEPTION_MASK:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    stmxcsr (%rax)
 ; X64-NEXT:    movl -{{[0-9]+}}(%rsp), %ecx
@@ -1291,7 +1291,7 @@ declare void @llvm.x86.sse.ldmxcsr(i8*)
 
 define void @test_MM_SET_EXCEPTION_STATE(i32 %a0) nounwind {
 ; X32-LABEL: test_MM_SET_EXCEPTION_STATE:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl %esp, %ecx
@@ -1305,7 +1305,7 @@ define void @test_MM_SET_EXCEPTION_STATE
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_MM_SET_EXCEPTION_STATE:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    stmxcsr (%rax)
 ; X64-NEXT:    movl -{{[0-9]+}}(%rsp), %ecx
@@ -1327,7 +1327,7 @@ define void @test_MM_SET_EXCEPTION_STATE
 
 define void @test_MM_SET_FLUSH_ZERO_MODE(i32 %a0) nounwind {
 ; X32-LABEL: test_MM_SET_FLUSH_ZERO_MODE:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl %esp, %ecx
@@ -1341,7 +1341,7 @@ define void @test_MM_SET_FLUSH_ZERO_MODE
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_MM_SET_FLUSH_ZERO_MODE:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    stmxcsr (%rax)
 ; X64-NEXT:    movl -{{[0-9]+}}(%rsp), %ecx
@@ -1363,7 +1363,7 @@ define void @test_MM_SET_FLUSH_ZERO_MODE
 
 define <4 x float> @test_mm_set_ps(float %a0, float %a1, float %a2, float %a3) nounwind {
 ; X32-LABEL: test_mm_set_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -1374,7 +1374,7 @@ define <4 x float> @test_mm_set_ps(float
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; X64-NEXT:    unpcklps {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
 ; X64-NEXT:    movlhps {{.*#+}} xmm3 = xmm3[0],xmm1[0]
@@ -1389,13 +1389,13 @@ define <4 x float> @test_mm_set_ps(float
 
 define <4 x float> @test_mm_set_ps1(float %a0) nounwind {
 ; X32-LABEL: test_mm_set_ps1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set_ps1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-NEXT:    retq
   %res0  = insertelement <4 x float> undef, float %a0, i32 0
@@ -1407,7 +1407,7 @@ define <4 x float> @test_mm_set_ps1(floa
 
 define void @test_MM_SET_ROUNDING_MODE(i32 %a0) nounwind {
 ; X32-LABEL: test_MM_SET_ROUNDING_MODE:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl %esp, %ecx
@@ -1421,7 +1421,7 @@ define void @test_MM_SET_ROUNDING_MODE(i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_MM_SET_ROUNDING_MODE:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    stmxcsr (%rax)
 ; X64-NEXT:    movl -{{[0-9]+}}(%rsp), %ecx
@@ -1443,14 +1443,14 @@ define void @test_MM_SET_ROUNDING_MODE(i
 
 define <4 x float> @test_mm_set_ss(float %a0) nounwind {
 ; X32-LABEL: test_mm_set_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    xorps %xmm0, %xmm0
 ; X32-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm1, %xmm1
 ; X64-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 ; X64-NEXT:    movaps %xmm1, %xmm0
@@ -1464,13 +1464,13 @@ define <4 x float> @test_mm_set_ss(float
 
 define <4 x float> @test_mm_set1_ps(float %a0) nounwind {
 ; X32-LABEL: test_mm_set1_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set1_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-NEXT:    retq
   %res0  = insertelement <4 x float> undef, float %a0, i32 0
@@ -1482,13 +1482,13 @@ define <4 x float> @test_mm_set1_ps(floa
 
 define void @test_mm_setcsr(i32 %a0) nounwind {
 ; X32-LABEL: test_mm_setcsr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    ldmxcsr (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_setcsr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    movl %edi, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    ldmxcsr (%rax)
@@ -1502,7 +1502,7 @@ define void @test_mm_setcsr(i32 %a0) nou
 
 define <4 x float> @test_mm_setr_ps(float %a0, float %a1, float %a2, float %a3) nounwind {
 ; X32-LABEL: test_mm_setr_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
 ; X32-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
@@ -1513,7 +1513,7 @@ define <4 x float> @test_mm_setr_ps(floa
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_setr_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
 ; X64-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
@@ -1527,12 +1527,12 @@ define <4 x float> @test_mm_setr_ps(floa
 
 define <4 x float> @test_mm_setzero_ps() {
 ; X32-LABEL: test_mm_setzero_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_setzero_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    retq
   ret <4 x float> zeroinitializer
@@ -1540,12 +1540,12 @@ define <4 x float> @test_mm_setzero_ps()
 
 define void @test_mm_sfence() nounwind {
 ; X32-LABEL: test_mm_sfence:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    sfence
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sfence:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    sfence
 ; X64-NEXT:    retq
   call void @llvm.x86.sse.sfence()
@@ -1555,12 +1555,12 @@ declare void @llvm.x86.sse.sfence() noun
 
 define <4 x float> @test_mm_shuffle_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_shuffle_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_shuffle_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 0, i32 4, i32 4>
@@ -1569,12 +1569,12 @@ define <4 x float> @test_mm_shuffle_ps(<
 
 define <4 x float> @test_mm_sqrt_ps(<4 x float> %a0) {
 ; X32-LABEL: test_mm_sqrt_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    sqrtps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sqrt_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    sqrtps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0)
@@ -1584,12 +1584,12 @@ declare <4 x float> @llvm.x86.sse.sqrt.p
 
 define <4 x float> @test_mm_sqrt_ss(<4 x float> %a0) {
 ; X32-LABEL: test_mm_sqrt_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    sqrtss %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sqrt_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    sqrtss %xmm0, %xmm0
 ; X64-NEXT:    retq
   %sqrt = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0)
@@ -1607,13 +1607,13 @@ declare <4 x float> @llvm.x86.sse.sqrt.s
 
 define void @test_mm_store_ps(float *%a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_store_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movaps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_store_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %arg0 = bitcast float* %a0 to <4 x float>*
@@ -1623,14 +1623,14 @@ define void @test_mm_store_ps(float *%a0
 
 define void @test_mm_store_ps1(float *%a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_store_ps1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X32-NEXT:    movaps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_store_ps1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-NEXT:    movaps %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -1642,13 +1642,13 @@ define void @test_mm_store_ps1(float *%a
 
 define void @test_mm_store_ss(float *%a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_store_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movss %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_store_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movss %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %ext = extractelement <4 x float> %a1, i32 0
@@ -1658,14 +1658,14 @@ define void @test_mm_store_ss(float *%a0
 
 define void @test_mm_store1_ps(float *%a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_store1_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X32-NEXT:    movaps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_store1_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-NEXT:    movaps %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -1677,7 +1677,7 @@ define void @test_mm_store1_ps(float *%a
 
 define void @test_mm_storeh_ps(x86_mmx *%a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_storeh_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-16, %esp
@@ -1693,7 +1693,7 @@ define void @test_mm_storeh_ps(x86_mmx *
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_storeh_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    movq %rax, (%rdi)
@@ -1707,7 +1707,7 @@ define void @test_mm_storeh_ps(x86_mmx *
 
 define void @test_mm_storel_ps(x86_mmx *%a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_storel_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-16, %esp
@@ -1723,7 +1723,7 @@ define void @test_mm_storel_ps(x86_mmx *
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_storel_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    movq %rax, (%rdi)
@@ -1737,14 +1737,14 @@ define void @test_mm_storel_ps(x86_mmx *
 
 define void @test_mm_storer_ps(float *%a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_storer_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
 ; X32-NEXT:    movaps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_storer_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
 ; X64-NEXT:    movaps %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -1756,13 +1756,13 @@ define void @test_mm_storer_ps(float *%a
 
 define void @test_mm_storeu_ps(float *%a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_storeu_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movups %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_storeu_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movups %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %arg0 = bitcast float* %a0 to <4 x float>*
@@ -1772,13 +1772,13 @@ define void @test_mm_storeu_ps(float *%a
 
 define void @test_mm_stream_ps(float *%a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm_stream_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movntps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_stream_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movntps %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %arg0 = bitcast float* %a0 to <4 x float>*
@@ -1788,12 +1788,12 @@ define void @test_mm_stream_ps(float *%a
 
 define <4 x float> @test_mm_sub_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_sub_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sub_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = fsub <4 x float> %a0, %a1
@@ -1802,12 +1802,12 @@ define <4 x float> @test_mm_sub_ps(<4 x
 
 define <4 x float> @test_mm_sub_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_sub_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sub_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %ext0 = extractelement <4 x float> %a0, i32 0
@@ -1819,7 +1819,7 @@ define <4 x float> @test_mm_sub_ss(<4 x
 
 define void @test_MM_TRANSPOSE4_PS(<4 x float>* %a0, <4 x float>* %a1, <4 x float>* %a2, <4 x float>* %a3) nounwind {
 ; X32-LABEL: test_MM_TRANSPOSE4_PS:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esi
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -1849,7 +1849,7 @@ define void @test_MM_TRANSPOSE4_PS(<4 x
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_MM_TRANSPOSE4_PS:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps (%rdi), %xmm0
 ; X64-NEXT:    movaps (%rsi), %xmm1
 ; X64-NEXT:    movaps (%rdx), %xmm2
@@ -1892,7 +1892,7 @@ define void @test_MM_TRANSPOSE4_PS(<4 x
 
 define i32 @test_mm_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomieq_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    ucomiss %xmm1, %xmm0
 ; X32-NEXT:    setnp %al
 ; X32-NEXT:    sete %cl
@@ -1901,7 +1901,7 @@ define i32 @test_mm_ucomieq_ss(<4 x floa
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomieq_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    ucomiss %xmm1, %xmm0
 ; X64-NEXT:    setnp %al
 ; X64-NEXT:    sete %cl
@@ -1915,14 +1915,14 @@ declare i32 @llvm.x86.sse.ucomieq.ss(<4
 
 define i32 @test_mm_ucomige_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomige_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    ucomiss %xmm1, %xmm0
 ; X32-NEXT:    setae %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomige_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    ucomiss %xmm1, %xmm0
 ; X64-NEXT:    setae %al
@@ -1934,14 +1934,14 @@ declare i32 @llvm.x86.sse.ucomige.ss(<4
 
 define i32 @test_mm_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomigt_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    ucomiss %xmm1, %xmm0
 ; X32-NEXT:    seta %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomigt_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    ucomiss %xmm1, %xmm0
 ; X64-NEXT:    seta %al
@@ -1953,14 +1953,14 @@ declare i32 @llvm.x86.sse.ucomigt.ss(<4
 
 define i32 @test_mm_ucomile_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomile_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    ucomiss %xmm0, %xmm1
 ; X32-NEXT:    setae %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomile_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    ucomiss %xmm0, %xmm1
 ; X64-NEXT:    setae %al
@@ -1972,14 +1972,14 @@ declare i32 @llvm.x86.sse.ucomile.ss(<4
 
 define i32 @test_mm_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomilt_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    ucomiss %xmm0, %xmm1
 ; X32-NEXT:    seta %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomilt_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    ucomiss %xmm0, %xmm1
 ; X64-NEXT:    seta %al
@@ -1991,7 +1991,7 @@ declare i32 @llvm.x86.sse.ucomilt.ss(<4
 
 define i32 @test_mm_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomineq_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    ucomiss %xmm1, %xmm0
 ; X32-NEXT:    setp %al
 ; X32-NEXT:    setne %cl
@@ -2000,7 +2000,7 @@ define i32 @test_mm_ucomineq_ss(<4 x flo
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomineq_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    ucomiss %xmm1, %xmm0
 ; X64-NEXT:    setp %al
 ; X64-NEXT:    setne %cl
@@ -2014,23 +2014,23 @@ declare i32 @llvm.x86.sse.ucomineq.ss(<4
 
 define <4 x float> @test_mm_undefined_ps() {
 ; X32-LABEL: test_mm_undefined_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_undefined_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   ret <4 x float> undef
 }
 
 define <4 x float> @test_mm_unpackhi_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_unpackhi_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpackhi_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -2039,12 +2039,12 @@ define <4 x float> @test_mm_unpackhi_ps(
 
 define <4 x float> @test_mm_unpacklo_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_unpacklo_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpacklo_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -2053,12 +2053,12 @@ define <4 x float> @test_mm_unpacklo_ps(
 
 define <4 x float> @test_mm_xor_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_xor_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_xor_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <4 x float> %a0 to <4 x i32>

Modified: llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll Mon Dec  4 09:18:51 2017
@@ -3,18 +3,18 @@
 
 define void @test_x86_sse_storeu_ps(i8* %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_storeu_ps:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE-NEXT:    movups %xmm0, (%eax)
 ; SSE-NEXT:    retl
 ;
 ; KNL-LABEL: test_x86_sse_storeu_ps:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL-NEXT:    vmovups %xmm0, (%eax)
 ; KNL-NEXT:    retl
 ; CHECK-LABEL: test_x86_sse_storeu_ps:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movups %xmm0, (%eax)
 ; CHECK-NEXT:    retl
@@ -26,21 +26,21 @@ declare void @llvm.x86.sse.storeu.ps(i8*
 
 define <4 x float> @test_x86_sse_add_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_add_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    addss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x58,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_add_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vaddss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x58,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_add_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vaddss %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x58,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
 ; CHECK-LABEL: test_x86_sse_add_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    addss %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <4 x float> @llvm.x86.sse.add.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -51,21 +51,21 @@ declare <4 x float> @llvm.x86.sse.add.ss
 
 define <4 x float> @test_x86_sse_sub_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_sub_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    subss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5c,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_sub_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vsubss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5c,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_sub_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vsubss %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x5c,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
 ; CHECK-LABEL: test_x86_sse_sub_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    subss %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <4 x float> @llvm.x86.sse.sub.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -76,21 +76,21 @@ declare <4 x float> @llvm.x86.sse.sub.ss
 
 define <4 x float> @test_x86_sse_mul_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_mul_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    mulss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x59,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_mul_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vmulss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x59,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_mul_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vmulss %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x59,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
 ; CHECK-LABEL: test_x86_sse_mul_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    mulss %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <4 x float> @llvm.x86.sse.mul.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -101,21 +101,21 @@ declare <4 x float> @llvm.x86.sse.mul.ss
 
 define <4 x float> @test_x86_sse_div_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_div_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    divss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5e,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_div_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vdivss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5e,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_div_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vdivss %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x5e,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
 ; CHECK-LABEL: test_x86_sse_div_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    divss %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <4 x float> @llvm.x86.sse.div.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86.ll Mon Dec  4 09:18:51 2017
@@ -5,12 +5,12 @@
 
 define <4 x float> @test_x86_sse_cmp_ps(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_cmp_ps:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpordps %xmm1, %xmm0 ## encoding: [0x0f,0xc2,0xc1,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse_cmp_ps:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vcmpordps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0xc2,0xc1,0x07]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
@@ -21,12 +21,12 @@ declare <4 x float> @llvm.x86.sse.cmp.ps
 
 define <4 x float> @test_x86_sse_cmp_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_cmp_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpordss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0xc2,0xc1,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse_cmp_ss:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vcmpordss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0xc2,0xc1,0x07]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
@@ -37,7 +37,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ss
 
 define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_comieq_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    comiss %xmm1, %xmm0 ## encoding: [0x0f,0x2f,0xc1]
 ; SSE-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; SSE-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -46,7 +46,7 @@ define i32 @test_x86_sse_comieq_ss(<4 x
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_comieq_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
 ; AVX2-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; AVX2-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -55,7 +55,7 @@ define i32 @test_x86_sse_comieq_ss(<4 x
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_comieq_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
 ; SKX-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; SKX-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -70,21 +70,21 @@ declare i32 @llvm.x86.sse.comieq.ss(<4 x
 
 define i32 @test_x86_sse_comige_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_comige_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comiss %xmm1, %xmm0 ## encoding: [0x0f,0x2f,0xc1]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_comige_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
 ; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_comige_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
 ; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -97,21 +97,21 @@ declare i32 @llvm.x86.sse.comige.ss(<4 x
 
 define i32 @test_x86_sse_comigt_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_comigt_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comiss %xmm1, %xmm0 ## encoding: [0x0f,0x2f,0xc1]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_comigt_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
 ; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_comigt_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
 ; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -124,21 +124,21 @@ declare i32 @llvm.x86.sse.comigt.ss(<4 x
 
 define i32 @test_x86_sse_comile_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_comile_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comiss %xmm0, %xmm1 ## encoding: [0x0f,0x2f,0xc8]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_comile_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vcomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2f,0xc8]
 ; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_comile_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vcomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc8]
 ; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -151,21 +151,21 @@ declare i32 @llvm.x86.sse.comile.ss(<4 x
 
 define i32 @test_x86_sse_comilt_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_comilt_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comiss %xmm0, %xmm1 ## encoding: [0x0f,0x2f,0xc8]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_comilt_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vcomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2f,0xc8]
 ; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_comilt_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vcomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc8]
 ; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -178,7 +178,7 @@ declare i32 @llvm.x86.sse.comilt.ss(<4 x
 
 define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_comineq_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    comiss %xmm1, %xmm0 ## encoding: [0x0f,0x2f,0xc1]
 ; SSE-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; SSE-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -187,7 +187,7 @@ define i32 @test_x86_sse_comineq_ss(<4 x
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_comineq_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
 ; AVX2-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; AVX2-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -196,7 +196,7 @@ define i32 @test_x86_sse_comineq_ss(<4 x
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_comineq_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
 ; SKX-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; SKX-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -211,19 +211,19 @@ declare i32 @llvm.x86.sse.comineq.ss(<4
 
 define <4 x float> @test_x86_sse_cvtsi2ss(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse_cvtsi2ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
 ; SSE-NEXT:    cvtsi2ssl %eax, %xmm0 ## encoding: [0xf3,0x0f,0x2a,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_cvtsi2ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
 ; AVX2-NEXT:    vcvtsi2ssl %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x2a,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_cvtsi2ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
 ; SKX-NEXT:    vcvtsi2ssl %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2a,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
@@ -235,17 +235,17 @@ declare <4 x float> @llvm.x86.sse.cvtsi2
 
 define i32 @test_x86_sse_cvtss2si(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse_cvtss2si:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtss2si %xmm0, %eax ## encoding: [0xf3,0x0f,0x2d,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_cvtss2si:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtss2si %xmm0, %eax ## encoding: [0xc5,0xfa,0x2d,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_cvtss2si:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtss2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2d,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0) ; <i32> [#uses=1]
@@ -256,17 +256,17 @@ declare i32 @llvm.x86.sse.cvtss2si(<4 x
 
 define i32 @test_x86_sse_cvttss2si(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse_cvttss2si:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttss2si %xmm0, %eax ## encoding: [0xf3,0x0f,0x2c,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_cvttss2si:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvttss2si %xmm0, %eax ## encoding: [0xc5,0xfa,0x2c,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_cvttss2si:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvttss2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2c,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0) ; <i32> [#uses=1]
@@ -277,13 +277,13 @@ declare i32 @llvm.x86.sse.cvttss2si(<4 x
 
 define void @test_x86_sse_ldmxcsr(i8* %a0) {
 ; SSE-LABEL: test_x86_sse_ldmxcsr:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SSE-NEXT:    ldmxcsr (%eax) ## encoding: [0x0f,0xae,0x10]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse_ldmxcsr:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; VCHECK-NEXT:    vldmxcsr (%eax) ## encoding: [0xc5,0xf8,0xae,0x10]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
@@ -296,17 +296,17 @@ declare void @llvm.x86.sse.ldmxcsr(i8*)
 
 define <4 x float> @test_x86_sse_max_ps(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_max_ps:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    maxps %xmm1, %xmm0 ## encoding: [0x0f,0x5f,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_max_ps:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5f,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_max_ps:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5f,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -317,17 +317,17 @@ declare <4 x float> @llvm.x86.sse.max.ps
 
 define <4 x float> @test_x86_sse_max_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_max_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    maxss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5f,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_max_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5f,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_max_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5f,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -338,17 +338,17 @@ declare <4 x float> @llvm.x86.sse.max.ss
 
 define <4 x float> @test_x86_sse_min_ps(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_min_ps:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    minps %xmm1, %xmm0 ## encoding: [0x0f,0x5d,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_min_ps:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vminps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5d,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_min_ps:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vminps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5d,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -359,17 +359,17 @@ declare <4 x float> @llvm.x86.sse.min.ps
 
 define <4 x float> @test_x86_sse_min_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_min_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    minss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5d,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_min_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vminss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5d,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_min_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vminss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5d,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -380,12 +380,12 @@ declare <4 x float> @llvm.x86.sse.min.ss
 
 define i32 @test_x86_sse_movmsk_ps(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse_movmsk_ps:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movmskps %xmm0, %eax ## encoding: [0x0f,0x50,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse_movmsk_ps:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vmovmskps %xmm0, %eax ## encoding: [0xc5,0xf8,0x50,0xc0]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) ; <i32> [#uses=1]
@@ -397,12 +397,12 @@ declare i32 @llvm.x86.sse.movmsk.ps(<4 x
 
 define <4 x float> @test_x86_sse_rcp_ps(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse_rcp_ps:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    rcpps %xmm0, %xmm0 ## encoding: [0x0f,0x53,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse_rcp_ps:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vrcpps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x53,0xc0]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -413,12 +413,12 @@ declare <4 x float> @llvm.x86.sse.rcp.ps
 
 define <4 x float> @test_x86_sse_rcp_ss(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse_rcp_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    rcpss %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x53,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse_rcp_ss:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vrcpss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x53,0xc0]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -429,12 +429,12 @@ declare <4 x float> @llvm.x86.sse.rcp.ss
 
 define <4 x float> @test_x86_sse_rsqrt_ps(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse_rsqrt_ps:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    rsqrtps %xmm0, %xmm0 ## encoding: [0x0f,0x52,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse_rsqrt_ps:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vrsqrtps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x52,0xc0]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -445,12 +445,12 @@ declare <4 x float> @llvm.x86.sse.rsqrt.
 
 define <4 x float> @test_x86_sse_rsqrt_ss(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse_rsqrt_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    rsqrtss %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x52,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse_rsqrt_ss:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x52,0xc0]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -461,17 +461,17 @@ declare <4 x float> @llvm.x86.sse.rsqrt.
 
 define <4 x float> @test_x86_sse_sqrt_ps(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse_sqrt_ps:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    sqrtps %xmm0, %xmm0 ## encoding: [0x0f,0x51,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_sqrt_ps:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vsqrtps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x51,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_sqrt_ps:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vsqrtps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x51,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -482,17 +482,17 @@ declare <4 x float> @llvm.x86.sse.sqrt.p
 
 define <4 x float> @test_x86_sse_sqrt_ss(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse_sqrt_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    sqrtss %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x51,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_sqrt_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x51,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_sqrt_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x51,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -503,13 +503,13 @@ declare <4 x float> @llvm.x86.sse.sqrt.s
 
 define void @test_x86_sse_stmxcsr(i8* %a0) {
 ; SSE-LABEL: test_x86_sse_stmxcsr:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SSE-NEXT:    stmxcsr (%eax) ## encoding: [0x0f,0xae,0x18]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse_stmxcsr:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; VCHECK-NEXT:    vstmxcsr (%eax) ## encoding: [0xc5,0xf8,0xae,0x18]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
@@ -521,7 +521,7 @@ declare void @llvm.x86.sse.stmxcsr(i8*)
 
 define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_ucomieq_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    ucomiss %xmm1, %xmm0 ## encoding: [0x0f,0x2e,0xc1]
 ; SSE-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; SSE-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -530,7 +530,7 @@ define i32 @test_x86_sse_ucomieq_ss(<4 x
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_ucomieq_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
 ; AVX2-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; AVX2-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -539,7 +539,7 @@ define i32 @test_x86_sse_ucomieq_ss(<4 x
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_ucomieq_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
 ; SKX-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; SKX-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -554,21 +554,21 @@ declare i32 @llvm.x86.sse.ucomieq.ss(<4
 
 define i32 @test_x86_sse_ucomige_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_ucomige_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomiss %xmm1, %xmm0 ## encoding: [0x0f,0x2e,0xc1]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_ucomige_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
 ; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_ucomige_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
 ; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -581,21 +581,21 @@ declare i32 @llvm.x86.sse.ucomige.ss(<4
 
 define i32 @test_x86_sse_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_ucomigt_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomiss %xmm1, %xmm0 ## encoding: [0x0f,0x2e,0xc1]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_ucomigt_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
 ; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_ucomigt_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
 ; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -608,21 +608,21 @@ declare i32 @llvm.x86.sse.ucomigt.ss(<4
 
 define i32 @test_x86_sse_ucomile_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_ucomile_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomiss %xmm0, %xmm1 ## encoding: [0x0f,0x2e,0xc8]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_ucomile_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vucomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2e,0xc8]
 ; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_ucomile_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vucomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc8]
 ; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -635,21 +635,21 @@ declare i32 @llvm.x86.sse.ucomile.ss(<4
 
 define i32 @test_x86_sse_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_ucomilt_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomiss %xmm0, %xmm1 ## encoding: [0x0f,0x2e,0xc8]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_ucomilt_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vucomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2e,0xc8]
 ; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_ucomilt_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vucomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc8]
 ; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -662,7 +662,7 @@ declare i32 @llvm.x86.sse.ucomilt.ss(<4
 
 define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse_ucomineq_ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    ucomiss %xmm1, %xmm0 ## encoding: [0x0f,0x2e,0xc1]
 ; SSE-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; SSE-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -671,7 +671,7 @@ define i32 @test_x86_sse_ucomineq_ss(<4
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_ucomineq_ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
 ; AVX2-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; AVX2-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -680,7 +680,7 @@ define i32 @test_x86_sse_ucomineq_ss(<4
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_ucomineq_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
 ; SKX-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; SKX-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -695,12 +695,12 @@ declare i32 @llvm.x86.sse.ucomineq.ss(<4
 
 define void @sfence() nounwind {
 ; SSE-LABEL: sfence:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    sfence ## encoding: [0x0f,0xae,0xf8]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: sfence:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    sfence ## encoding: [0x0f,0xae,0xf8]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   tail call void @llvm.x86.sse.sfence()

Modified: llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86_64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86_64.ll Mon Dec  4 09:18:51 2017
@@ -5,21 +5,21 @@
 
 define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_sse_cvtss2si64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtss2si %xmm0, %rax
 ; CHECK-NEXT:    retq
 ; SSE-LABEL: test_x86_sse_cvtss2si64:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtss2si %xmm0, %rax ## encoding: [0xf3,0x48,0x0f,0x2d,0xc0]
 ; SSE-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_cvtss2si64:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtss2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfa,0x2d,0xc0]
 ; AVX2-NEXT:    retq ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_cvtss2si64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtss2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2d,0xc0]
 ; SKX-NEXT:    retq ## encoding: [0xc3]
   %res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
@@ -30,21 +30,21 @@ declare i64 @llvm.x86.sse.cvtss2si64(<4
 
 define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
 ; CHECK-LABEL: test_x86_sse_cvtsi642ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 ; SSE-LABEL: test_x86_sse_cvtsi642ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtsi2ssq %rdi, %xmm0 ## encoding: [0xf3,0x48,0x0f,0x2a,0xc7]
 ; SSE-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_cvtsi642ss:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfa,0x2a,0xc7]
 ; AVX2-NEXT:    retq ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_cvtsi642ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2a,0xc7]
 ; SKX-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
@@ -55,21 +55,21 @@ declare <4 x float> @llvm.x86.sse.cvtsi6
 
 define i64 @test_x86_sse_cvttss2si64(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_sse_cvttss2si64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvttss2si %xmm0, %rax
 ; CHECK-NEXT:    retq
 ; SSE-LABEL: test_x86_sse_cvttss2si64:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttss2si %xmm0, %rax ## encoding: [0xf3,0x48,0x0f,0x2c,0xc0]
 ; SSE-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse_cvttss2si64:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvttss2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfa,0x2c,0xc0]
 ; AVX2-NEXT:    retq ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse_cvttss2si64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvttss2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2c,0xc0]
 ; SKX-NEXT:    retq ## encoding: [0xc3]
   %res = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0) ; <i64> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/sse-minmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-minmax.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-minmax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-minmax.ll Mon Dec  4 09:18:51 2017
@@ -15,7 +15,7 @@
 
 define double @ogt(double %x, double %y)  {
 ; ALL-LABEL: ogt:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    maxsd %xmm1, %xmm0
 ; ALL-NEXT:    retq
   %c = fcmp ogt double %x, %y
@@ -25,7 +25,7 @@ define double @ogt(double %x, double %y)
 
 define double @olt(double %x, double %y)  {
 ; ALL-LABEL: olt:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    minsd %xmm1, %xmm0
 ; ALL-NEXT:    retq
   %c = fcmp olt double %x, %y
@@ -35,18 +35,18 @@ define double @olt(double %x, double %y)
 
 define double @ogt_inverse(double %x, double %y)  {
 ; STRICT-LABEL: ogt_inverse:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    minsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ogt_inverse:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ogt_inverse:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
 ; FINITE-NEXT:    retq
@@ -57,18 +57,18 @@ define double @ogt_inverse(double %x, do
 
 define double @olt_inverse(double %x, double %y)  {
 ; STRICT-LABEL: olt_inverse:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    maxsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: olt_inverse:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: olt_inverse:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
 ; FINITE-NEXT:    retq
@@ -79,7 +79,7 @@ define double @olt_inverse(double %x, do
 
 define double @oge(double %x, double %y)  {
 ; STRICT-LABEL: oge:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm2
 ; STRICT-NEXT:    andpd %xmm2, %xmm0
@@ -88,7 +88,7 @@ define double @oge(double %x, double %y)
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: oge:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    maxsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp oge double %x, %y
@@ -98,7 +98,7 @@ define double @oge(double %x, double %y)
 
 define double @ole(double %x, double %y)  {
 ; STRICT-LABEL: ole:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movapd %xmm0, %xmm2
 ; STRICT-NEXT:    cmplesd %xmm1, %xmm2
 ; STRICT-NEXT:    andpd %xmm2, %xmm0
@@ -108,7 +108,7 @@ define double @ole(double %x, double %y)
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ole:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    minsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp ole double %x, %y
@@ -118,7 +118,7 @@ define double @ole(double %x, double %y)
 
 define double @oge_inverse(double %x, double %y)  {
 ; STRICT-LABEL: oge_inverse:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm2
 ; STRICT-NEXT:    andpd %xmm2, %xmm1
@@ -128,12 +128,12 @@ define double @oge_inverse(double %x, do
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: oge_inverse:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: oge_inverse:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
 ; FINITE-NEXT:    retq
@@ -144,7 +144,7 @@ define double @oge_inverse(double %x, do
 
 define double @ole_inverse(double %x, double %y)  {
 ; STRICT-LABEL: ole_inverse:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movapd %xmm0, %xmm2
 ; STRICT-NEXT:    cmplesd %xmm1, %xmm2
 ; STRICT-NEXT:    andpd %xmm2, %xmm1
@@ -154,12 +154,12 @@ define double @ole_inverse(double %x, do
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ole_inverse:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ole_inverse:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
 ; FINITE-NEXT:    retq
@@ -170,7 +170,7 @@ define double @ole_inverse(double %x, do
 
 define double @ogt_x(double %x)  {
 ; ALL-LABEL: ogt_x:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    xorpd %xmm1, %xmm1
 ; ALL-NEXT:    maxsd %xmm1, %xmm0
 ; ALL-NEXT:    retq
@@ -181,7 +181,7 @@ define double @ogt_x(double %x)  {
 
 define double @olt_x(double %x)  {
 ; ALL-LABEL: olt_x:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    xorpd %xmm1, %xmm1
 ; ALL-NEXT:    minsd %xmm1, %xmm0
 ; ALL-NEXT:    retq
@@ -192,20 +192,20 @@ define double @olt_x(double %x)  {
 
 define double @ogt_inverse_x(double %x)  {
 ; STRICT-LABEL: ogt_inverse_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    minsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ogt_inverse_x:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
 ; UNSAFE-NEXT:    minsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ogt_inverse_x:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    xorpd %xmm1, %xmm1
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -217,20 +217,20 @@ define double @ogt_inverse_x(double %x)
 
 define double @olt_inverse_x(double %x)  {
 ; STRICT-LABEL: olt_inverse_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    maxsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: olt_inverse_x:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
 ; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: olt_inverse_x:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    xorpd %xmm1, %xmm1
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -242,14 +242,14 @@ define double @olt_inverse_x(double %x)
 
 define double @oge_x(double %x)  {
 ; STRICT-LABEL: oge_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm1
 ; STRICT-NEXT:    andpd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: oge_x:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    xorpd %xmm1, %xmm1
 ; RELAX-NEXT:    maxsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
@@ -260,7 +260,7 @@ define double @oge_x(double %x)  {
 
 define double @ole_x(double %x)  {
 ; STRICT-LABEL: ole_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm2, %xmm2
 ; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm2, %xmm1
@@ -269,7 +269,7 @@ define double @ole_x(double %x)  {
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ole_x:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    xorpd %xmm1, %xmm1
 ; RELAX-NEXT:    minsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
@@ -280,7 +280,7 @@ define double @ole_x(double %x)  {
 
 define double @oge_inverse_x(double %x)  {
 ; STRICT-LABEL: oge_inverse_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm1
 ; STRICT-NEXT:    andnpd %xmm0, %xmm1
@@ -288,13 +288,13 @@ define double @oge_inverse_x(double %x)
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: oge_inverse_x:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
 ; UNSAFE-NEXT:    minsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: oge_inverse_x:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    xorpd %xmm1, %xmm1
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -306,7 +306,7 @@ define double @oge_inverse_x(double %x)
 
 define double @ole_inverse_x(double %x)  {
 ; STRICT-LABEL: ole_inverse_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm2, %xmm2
 ; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm2, %xmm1
@@ -315,13 +315,13 @@ define double @ole_inverse_x(double %x)
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ole_inverse_x:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
 ; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ole_inverse_x:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    xorpd %xmm1, %xmm1
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -333,7 +333,7 @@ define double @ole_inverse_x(double %x)
 
 define double @ugt(double %x, double %y)  {
 ; STRICT-LABEL: ugt:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movapd %xmm0, %xmm2
 ; STRICT-NEXT:    cmpnlesd %xmm1, %xmm2
 ; STRICT-NEXT:    andpd %xmm2, %xmm0
@@ -343,7 +343,7 @@ define double @ugt(double %x, double %y)
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ugt:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    maxsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp ugt double %x, %y
@@ -353,7 +353,7 @@ define double @ugt(double %x, double %y)
 
 define double @ult(double %x, double %y)  {
 ; STRICT-LABEL: ult:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm2
 ; STRICT-NEXT:    andpd %xmm2, %xmm0
@@ -362,7 +362,7 @@ define double @ult(double %x, double %y)
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ult:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    minsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp ult double %x, %y
@@ -372,7 +372,7 @@ define double @ult(double %x, double %y)
 
 define double @ugt_inverse(double %x, double %y)  {
 ; STRICT-LABEL: ugt_inverse:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movapd %xmm0, %xmm2
 ; STRICT-NEXT:    cmpnlesd %xmm1, %xmm2
 ; STRICT-NEXT:    andpd %xmm2, %xmm1
@@ -382,12 +382,12 @@ define double @ugt_inverse(double %x, do
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ugt_inverse:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ugt_inverse:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
 ; FINITE-NEXT:    retq
@@ -398,7 +398,7 @@ define double @ugt_inverse(double %x, do
 
 define double @ult_inverse(double %x, double %y)  {
 ; STRICT-LABEL: ult_inverse:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm2
 ; STRICT-NEXT:    andpd %xmm2, %xmm1
@@ -408,12 +408,12 @@ define double @ult_inverse(double %x, do
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ult_inverse:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ult_inverse:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
 ; FINITE-NEXT:    retq
@@ -424,13 +424,13 @@ define double @ult_inverse(double %x, do
 
 define double @uge(double %x, double %y)  {
 ; STRICT-LABEL: uge:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    maxsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: uge:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    maxsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp uge double %x, %y
@@ -440,13 +440,13 @@ define double @uge(double %x, double %y)
 
 define double @ule(double %x, double %y)  {
 ; STRICT-LABEL: ule:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    minsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ule:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    minsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp ule double %x, %y
@@ -456,17 +456,17 @@ define double @ule(double %x, double %y)
 
 define double @uge_inverse(double %x, double %y)  {
 ; STRICT-LABEL: uge_inverse:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    minsd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: uge_inverse:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: uge_inverse:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
 ; FINITE-NEXT:    retq
@@ -477,17 +477,17 @@ define double @uge_inverse(double %x, do
 
 define double @ule_inverse(double %x, double %y)  {
 ; STRICT-LABEL: ule_inverse:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    maxsd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ule_inverse:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ule_inverse:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
 ; FINITE-NEXT:    retq
@@ -498,7 +498,7 @@ define double @ule_inverse(double %x, do
 
 define double @ugt_x(double %x)  {
 ; STRICT-LABEL: ugt_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm2, %xmm2
 ; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm2, %xmm1
@@ -507,7 +507,7 @@ define double @ugt_x(double %x)  {
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ugt_x:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    xorpd %xmm1, %xmm1
 ; RELAX-NEXT:    maxsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
@@ -518,14 +518,14 @@ define double @ugt_x(double %x)  {
 
 define double @ult_x(double %x)  {
 ; STRICT-LABEL: ult_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm1
 ; STRICT-NEXT:    andpd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ult_x:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    xorpd %xmm1, %xmm1
 ; RELAX-NEXT:    minsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
@@ -536,7 +536,7 @@ define double @ult_x(double %x)  {
 
 define double @ugt_inverse_x(double %x)  {
 ; STRICT-LABEL: ugt_inverse_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm2, %xmm2
 ; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm2, %xmm1
@@ -545,13 +545,13 @@ define double @ugt_inverse_x(double %x)
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ugt_inverse_x:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
 ; UNSAFE-NEXT:    minsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ugt_inverse_x:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    xorpd %xmm1, %xmm1
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -563,7 +563,7 @@ define double @ugt_inverse_x(double %x)
 
 define double @ult_inverse_x(double %x)  {
 ; STRICT-LABEL: ult_inverse_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm1
 ; STRICT-NEXT:    andnpd %xmm0, %xmm1
@@ -571,13 +571,13 @@ define double @ult_inverse_x(double %x)
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ult_inverse_x:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
 ; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ult_inverse_x:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    xorpd %xmm1, %xmm1
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -589,14 +589,14 @@ define double @ult_inverse_x(double %x)
 
 define double @uge_x(double %x)  {
 ; STRICT-LABEL: uge_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    maxsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: uge_x:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    xorpd %xmm1, %xmm1
 ; RELAX-NEXT:    maxsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
@@ -607,14 +607,14 @@ define double @uge_x(double %x)  {
 
 define double @ule_x(double %x)  {
 ; STRICT-LABEL: ule_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    minsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ule_x:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    xorpd %xmm1, %xmm1
 ; RELAX-NEXT:    minsd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
@@ -625,19 +625,19 @@ define double @ule_x(double %x)  {
 
 define double @uge_inverse_x(double %x)  {
 ; STRICT-LABEL: uge_inverse_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    minsd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: uge_inverse_x:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
 ; UNSAFE-NEXT:    minsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: uge_inverse_x:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    xorpd %xmm1, %xmm1
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -649,19 +649,19 @@ define double @uge_inverse_x(double %x)
 
 define double @ule_inverse_x(double %x)  {
 ; STRICT-LABEL: ule_inverse_x:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    maxsd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ule_inverse_x:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
 ; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ule_inverse_x:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    xorpd %xmm1, %xmm1
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -673,7 +673,7 @@ define double @ule_inverse_x(double %x)
 
 define double @ogt_y(double %x)  {
 ; ALL-LABEL: ogt_y:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; ALL-NEXT:    retq
   %c = fcmp ogt double %x, -0.000000e+00
@@ -683,7 +683,7 @@ define double @ogt_y(double %x)  {
 
 define double @olt_y(double %x)  {
 ; ALL-LABEL: olt_y:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; ALL-NEXT:    retq
   %c = fcmp olt double %x, -0.000000e+00
@@ -693,19 +693,19 @@ define double @olt_y(double %x)  {
 
 define double @ogt_inverse_y(double %x)  {
 ; STRICT-LABEL: ogt_inverse_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; STRICT-NEXT:    minsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ogt_inverse_y:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ogt_inverse_y:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -717,19 +717,19 @@ define double @ogt_inverse_y(double %x)
 
 define double @olt_inverse_y(double %x)  {
 ; STRICT-LABEL: olt_inverse_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; STRICT-NEXT:    maxsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: olt_inverse_y:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: olt_inverse_y:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -741,7 +741,7 @@ define double @olt_inverse_y(double %x)
 
 define double @oge_y(double %x)  {
 ; STRICT-LABEL: oge_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm2
@@ -751,7 +751,7 @@ define double @oge_y(double %x)  {
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: oge_y:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp oge double %x, -0.000000e+00
@@ -761,7 +761,7 @@ define double @oge_y(double %x)  {
 
 define double @ole_y(double %x)  {
 ; STRICT-LABEL: ole_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
 ; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm2, %xmm1
@@ -772,7 +772,7 @@ define double @ole_y(double %x)  {
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ole_y:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp ole double %x, -0.000000e+00
@@ -782,7 +782,7 @@ define double @ole_y(double %x)  {
 
 define double @oge_inverse_y(double %x)  {
 ; STRICT-LABEL: oge_inverse_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
 ; STRICT-NEXT:    movapd %xmm2, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm1
@@ -793,12 +793,12 @@ define double @oge_inverse_y(double %x)
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: oge_inverse_y:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: oge_inverse_y:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -810,7 +810,7 @@ define double @oge_inverse_y(double %x)
 
 define double @ole_inverse_y(double %x)  {
 ; STRICT-LABEL: ole_inverse_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
 ; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm2, %xmm1
@@ -821,12 +821,12 @@ define double @ole_inverse_y(double %x)
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ole_inverse_y:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ole_inverse_y:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -838,7 +838,7 @@ define double @ole_inverse_y(double %x)
 
 define double @ugt_y(double %x)  {
 ; STRICT-LABEL: ugt_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
 ; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm2, %xmm1
@@ -849,7 +849,7 @@ define double @ugt_y(double %x)  {
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ugt_y:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp ugt double %x, -0.000000e+00
@@ -859,7 +859,7 @@ define double @ugt_y(double %x)  {
 
 define double @ult_y(double %x)  {
 ; STRICT-LABEL: ult_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm2
@@ -869,7 +869,7 @@ define double @ult_y(double %x)  {
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ult_y:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp ult double %x, -0.000000e+00
@@ -879,7 +879,7 @@ define double @ult_y(double %x)  {
 
 define double @ugt_inverse_y(double %x)  {
 ; STRICT-LABEL: ugt_inverse_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
 ; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm2, %xmm1
@@ -890,12 +890,12 @@ define double @ugt_inverse_y(double %x)
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ugt_inverse_y:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ugt_inverse_y:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -907,7 +907,7 @@ define double @ugt_inverse_y(double %x)
 
 define double @ult_inverse_y(double %x)  {
 ; STRICT-LABEL: ult_inverse_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
 ; STRICT-NEXT:    movapd %xmm2, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm1
@@ -918,12 +918,12 @@ define double @ult_inverse_y(double %x)
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ult_inverse_y:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ult_inverse_y:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -935,14 +935,14 @@ define double @ult_inverse_y(double %x)
 
 define double @uge_y(double %x)  {
 ; STRICT-LABEL: uge_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; STRICT-NEXT:    maxsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: uge_y:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp uge double %x, -0.000000e+00
@@ -952,14 +952,14 @@ define double @uge_y(double %x)  {
 
 define double @ule_y(double %x)  {
 ; STRICT-LABEL: ule_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; STRICT-NEXT:    minsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ule_y:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; RELAX-NEXT:    retq
   %c = fcmp ule double %x, -0.000000e+00
@@ -969,17 +969,17 @@ define double @ule_y(double %x)  {
 
 define double @uge_inverse_y(double %x)  {
 ; STRICT-LABEL: uge_inverse_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: uge_inverse_y:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: uge_inverse_y:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -991,17 +991,17 @@ define double @uge_inverse_y(double %x)
 
 define double @ule_inverse_y(double %x)  {
 ; STRICT-LABEL: ule_inverse_y:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ule_inverse_y:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: ule_inverse_y:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -1015,19 +1015,19 @@ define double @ule_inverse_y(double %x)
 
 define double @clampTo3k_a(double %x)  {
 ; STRICT-LABEL: clampTo3k_a:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; STRICT-NEXT:    minsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: clampTo3k_a:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: clampTo3k_a:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -1039,17 +1039,17 @@ define double @clampTo3k_a(double %x)  {
 
 define double @clampTo3k_b(double %x)  {
 ; STRICT-LABEL: clampTo3k_b:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: clampTo3k_b:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: clampTo3k_b:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -1061,19 +1061,19 @@ define double @clampTo3k_b(double %x)  {
 
 define double @clampTo3k_c(double %x)  {
 ; STRICT-LABEL: clampTo3k_c:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; STRICT-NEXT:    maxsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: clampTo3k_c:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: clampTo3k_c:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -1085,17 +1085,17 @@ define double @clampTo3k_c(double %x)  {
 
 define double @clampTo3k_d(double %x)  {
 ; STRICT-LABEL: clampTo3k_d:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: clampTo3k_d:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: clampTo3k_d:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -1107,19 +1107,19 @@ define double @clampTo3k_d(double %x)  {
 
 define double @clampTo3k_e(double %x)  {
 ; STRICT-LABEL: clampTo3k_e:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; STRICT-NEXT:    maxsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: clampTo3k_e:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: clampTo3k_e:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -1131,17 +1131,17 @@ define double @clampTo3k_e(double %x)  {
 
 define double @clampTo3k_f(double %x)  {
 ; STRICT-LABEL: clampTo3k_f:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: clampTo3k_f:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    maxsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: clampTo3k_f:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    maxsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -1153,19 +1153,19 @@ define double @clampTo3k_f(double %x)  {
 
 define double @clampTo3k_g(double %x)  {
 ; STRICT-LABEL: clampTo3k_g:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; STRICT-NEXT:    minsd %xmm0, %xmm1
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: clampTo3k_g:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: clampTo3k_g:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -1177,17 +1177,17 @@ define double @clampTo3k_g(double %x)  {
 
 define double @clampTo3k_h(double %x)  {
 ; STRICT-LABEL: clampTo3k_h:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: clampTo3k_h:
-; UNSAFE:       # BB#0:
+; UNSAFE:       # %bb.0:
 ; UNSAFE-NEXT:    minsd {{.*}}(%rip), %xmm0
 ; UNSAFE-NEXT:    retq
 ;
 ; FINITE-LABEL: clampTo3k_h:
-; FINITE:       # BB#0:
+; FINITE:       # %bb.0:
 ; FINITE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; FINITE-NEXT:    minsd %xmm0, %xmm1
 ; FINITE-NEXT:    movapd %xmm1, %xmm0
@@ -1199,7 +1199,7 @@ define double @clampTo3k_h(double %x)  {
 
 define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y)  {
 ; STRICT-LABEL: test_maxpd:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movapd %xmm0, %xmm2
 ; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    cmplepd %xmm2, %xmm0
@@ -1208,7 +1208,7 @@ define <2 x double> @test_maxpd(<2 x dou
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: test_maxpd:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    maxpd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %max_is_x = fcmp oge <2 x double> %x, %y
@@ -1218,7 +1218,7 @@ define <2 x double> @test_maxpd(<2 x dou
 
 define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y)  {
 ; STRICT-LABEL: test_minpd:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movapd %xmm0, %xmm2
 ; STRICT-NEXT:    cmplepd %xmm1, %xmm0
 ; STRICT-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
@@ -1226,7 +1226,7 @@ define <2 x double> @test_minpd(<2 x dou
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: test_minpd:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    minpd %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %min_is_x = fcmp ole <2 x double> %x, %y
@@ -1236,7 +1236,7 @@ define <2 x double> @test_minpd(<2 x dou
 
 define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y)  {
 ; STRICT-LABEL: test_maxps:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movaps %xmm0, %xmm2
 ; STRICT-NEXT:    movaps %xmm1, %xmm0
 ; STRICT-NEXT:    cmpleps %xmm2, %xmm0
@@ -1245,7 +1245,7 @@ define <4 x float> @test_maxps(<4 x floa
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: test_maxps:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    maxps %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %max_is_x = fcmp oge <4 x float> %x, %y
@@ -1255,7 +1255,7 @@ define <4 x float> @test_maxps(<4 x floa
 
 define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y)  {
 ; STRICT-LABEL: test_minps:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movaps %xmm0, %xmm2
 ; STRICT-NEXT:    cmpleps %xmm1, %xmm0
 ; STRICT-NEXT:    blendvps %xmm0, %xmm2, %xmm1
@@ -1263,7 +1263,7 @@ define <4 x float> @test_minps(<4 x floa
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: test_minps:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    minps %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %min_is_x = fcmp ole <4 x float> %x, %y
@@ -1273,7 +1273,7 @@ define <4 x float> @test_minps(<4 x floa
 
 define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y)  {
 ; STRICT-LABEL: test_maxps_illegal_v2f32:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movaps %xmm0, %xmm2
 ; STRICT-NEXT:    movaps %xmm1, %xmm0
 ; STRICT-NEXT:    cmpleps %xmm2, %xmm0
@@ -1282,7 +1282,7 @@ define <2 x float> @test_maxps_illegal_v
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: test_maxps_illegal_v2f32:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    maxps %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %max_is_x = fcmp oge <2 x float> %x, %y
@@ -1292,7 +1292,7 @@ define <2 x float> @test_maxps_illegal_v
 
 define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y)  {
 ; STRICT-LABEL: test_minps_illegal_v2f32:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movaps %xmm0, %xmm2
 ; STRICT-NEXT:    cmpleps %xmm1, %xmm0
 ; STRICT-NEXT:    blendvps %xmm0, %xmm2, %xmm1
@@ -1300,7 +1300,7 @@ define <2 x float> @test_minps_illegal_v
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: test_minps_illegal_v2f32:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    minps %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %min_is_x = fcmp ole <2 x float> %x, %y
@@ -1310,7 +1310,7 @@ define <2 x float> @test_minps_illegal_v
 
 define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y)  {
 ; STRICT-LABEL: test_maxps_illegal_v3f32:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movaps %xmm0, %xmm2
 ; STRICT-NEXT:    movaps %xmm1, %xmm0
 ; STRICT-NEXT:    cmpleps %xmm2, %xmm0
@@ -1319,7 +1319,7 @@ define <3 x float> @test_maxps_illegal_v
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: test_maxps_illegal_v3f32:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    maxps %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %max_is_x = fcmp oge <3 x float> %x, %y
@@ -1329,7 +1329,7 @@ define <3 x float> @test_maxps_illegal_v
 
 define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y)  {
 ; STRICT-LABEL: test_minps_illegal_v3f32:
-; STRICT:       # BB#0:
+; STRICT:       # %bb.0:
 ; STRICT-NEXT:    movaps %xmm0, %xmm2
 ; STRICT-NEXT:    cmpleps %xmm1, %xmm0
 ; STRICT-NEXT:    blendvps %xmm0, %xmm2, %xmm1
@@ -1337,7 +1337,7 @@ define <3 x float> @test_minps_illegal_v
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: test_minps_illegal_v3f32:
-; RELAX:       # BB#0:
+; RELAX:       # %bb.0:
 ; RELAX-NEXT:    minps %xmm1, %xmm0
 ; RELAX-NEXT:    retq
   %min_is_x = fcmp ole <3 x float> %x, %y

Modified: llvm/trunk/test/CodeGen/X86/sse-only.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-only.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-only.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-only.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define void @test1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind  {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movapd (%ecx), %xmm0

Modified: llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll Mon Dec  4 09:18:51 2017
@@ -9,12 +9,12 @@
 
 define <4 x float> @recip(<4 x float> %x) {
 ; SSE-LABEL: recip:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    rcpss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: recip:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vrcpss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %y = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %x)
@@ -24,12 +24,12 @@ define <4 x float> @recip(<4 x float> %x
 
 define <4 x float> @recip_square_root(<4 x float> %x) {
 ; SSE-LABEL: recip_square_root:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    rsqrtss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: recip_square_root:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %y = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %x)
@@ -39,12 +39,12 @@ define <4 x float> @recip_square_root(<4
 
 define <4 x float> @square_root(<4 x float> %x) {
 ; SSE-LABEL: square_root:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: square_root:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %y = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %x)
@@ -54,12 +54,12 @@ define <4 x float> @square_root(<4 x flo
 
 define <2 x double> @square_root_double(<2 x double> %x) {
 ; SSE-LABEL: square_root_double:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    sqrtsd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: square_root_double:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %y = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %x)

Modified: llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll Mon Dec  4 09:18:51 2017
@@ -10,12 +10,12 @@
 
 define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test_add_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_add_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <4 x float> %b, i32 0
@@ -27,12 +27,12 @@ define <4 x float> @test_add_ss(<4 x flo
 
 define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test_sub_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_sub_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <4 x float> %b, i32 0
@@ -44,12 +44,12 @@ define <4 x float> @test_sub_ss(<4 x flo
 
 define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test_mul_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_mul_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <4 x float> %b, i32 0
@@ -61,12 +61,12 @@ define <4 x float> @test_mul_ss(<4 x flo
 
 define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test_div_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_div_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <4 x float> %b, i32 0
@@ -78,25 +78,25 @@ define <4 x float> @test_div_ss(<4 x flo
 
 define <4 x float> @test_sqrt_ss(<4 x float> %a) {
 ; SSE2-LABEL: test_sqrt_ss:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    sqrtss %xmm0, %xmm1
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test_sqrt_ss:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    sqrtss %xmm0, %xmm1
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_sqrt_ss:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vsqrtss %xmm0, %xmm0, %xmm1
 ; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX512-LABEL: test_sqrt_ss:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vsqrtss %xmm0, %xmm0, %xmm1
 ; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; AVX512-NEXT:    retq
@@ -109,12 +109,12 @@ declare float @llvm.sqrt.f32(float)
 
 define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: test_add_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_add_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <2 x double> %b, i32 0
@@ -126,12 +126,12 @@ define <2 x double> @test_add_sd(<2 x do
 
 define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: test_sub_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_sub_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <2 x double> %b, i32 0
@@ -143,12 +143,12 @@ define <2 x double> @test_sub_sd(<2 x do
 
 define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: test_mul_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_mul_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <2 x double> %b, i32 0
@@ -160,12 +160,12 @@ define <2 x double> @test_mul_sd(<2 x do
 
 define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: test_div_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_div_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <2 x double> %b, i32 0
@@ -177,25 +177,25 @@ define <2 x double> @test_div_sd(<2 x do
 
 define <2 x double> @test_sqrt_sd(<2 x double> %a) {
 ; SSE2-LABEL: test_sqrt_sd:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    sqrtsd %xmm0, %xmm1
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test_sqrt_sd:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    sqrtsd %xmm0, %xmm1
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: test_sqrt_sd:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm1
 ; AVX1-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX512-LABEL: test_sqrt_sd:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm1
 ; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; AVX512-NEXT:    retq
@@ -208,13 +208,13 @@ declare double @llvm.sqrt.f64(double)
 
 define <4 x float> @test2_add_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test2_add_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test2_add_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <4 x float> %a, i32 0
@@ -226,13 +226,13 @@ define <4 x float> @test2_add_ss(<4 x fl
 
 define <4 x float> @test2_sub_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test2_sub_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test2_sub_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <4 x float> %a, i32 0
@@ -244,13 +244,13 @@ define <4 x float> @test2_sub_ss(<4 x fl
 
 define <4 x float> @test2_mul_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test2_mul_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test2_mul_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <4 x float> %a, i32 0
@@ -262,13 +262,13 @@ define <4 x float> @test2_mul_ss(<4 x fl
 
 define <4 x float> @test2_div_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test2_div_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test2_div_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <4 x float> %a, i32 0
@@ -280,13 +280,13 @@ define <4 x float> @test2_div_ss(<4 x fl
 
 define <2 x double> @test2_add_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: test2_add_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test2_add_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <2 x double> %a, i32 0
@@ -298,13 +298,13 @@ define <2 x double> @test2_add_sd(<2 x d
 
 define <2 x double> @test2_sub_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: test2_sub_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test2_sub_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <2 x double> %a, i32 0
@@ -316,13 +316,13 @@ define <2 x double> @test2_sub_sd(<2 x d
 
 define <2 x double> @test2_mul_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: test2_mul_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test2_mul_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <2 x double> %a, i32 0
@@ -334,13 +334,13 @@ define <2 x double> @test2_mul_sd(<2 x d
 
 define <2 x double> @test2_div_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: test2_div_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test2_div_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = extractelement <2 x double> %a, i32 0
@@ -352,13 +352,13 @@ define <2 x double> @test2_div_sd(<2 x d
 
 define <4 x float> @test_multiple_add_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test_multiple_add_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm0, %xmm1
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_multiple_add_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm1
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -372,14 +372,14 @@ define <4 x float> @test_multiple_add_ss
 
 define <4 x float> @test_multiple_sub_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test_multiple_sub_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    subss %xmm1, %xmm2
 ; SSE-NEXT:    subss %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_multiple_sub_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm1
 ; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -393,13 +393,13 @@ define <4 x float> @test_multiple_sub_ss
 
 define <4 x float> @test_multiple_mul_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test_multiple_mul_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulss %xmm0, %xmm1
 ; SSE-NEXT:    mulss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_multiple_mul_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm1
 ; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -413,14 +413,14 @@ define <4 x float> @test_multiple_mul_ss
 
 define <4 x float> @test_multiple_div_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test_multiple_div_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    divss %xmm1, %xmm2
 ; SSE-NEXT:    divss %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_multiple_div_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm1
 ; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -437,12 +437,12 @@ define <4 x float> @test_multiple_div_ss
 
 define <4 x float> @blend_add_ss(<4 x float> %a, float %b) {
 ; SSE-LABEL: blend_add_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: blend_add_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 
@@ -455,12 +455,12 @@ define <4 x float> @blend_add_ss(<4 x fl
 
 define <4 x float> @blend_sub_ss(<4 x float> %a, float %b) {
 ; SSE-LABEL: blend_sub_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: blend_sub_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 
@@ -473,12 +473,12 @@ define <4 x float> @blend_sub_ss(<4 x fl
 
 define <4 x float> @blend_mul_ss(<4 x float> %a, float %b) {
 ; SSE-LABEL: blend_mul_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: blend_mul_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 
@@ -491,12 +491,12 @@ define <4 x float> @blend_mul_ss(<4 x fl
 
 define <4 x float> @blend_div_ss(<4 x float> %a, float %b) {
 ; SSE-LABEL: blend_div_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: blend_div_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 
@@ -509,12 +509,12 @@ define <4 x float> @blend_div_ss(<4 x fl
 
 define <2 x double> @blend_add_sd(<2 x double> %a, double %b) {
 ; SSE-LABEL: blend_add_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: blend_add_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 
@@ -527,12 +527,12 @@ define <2 x double> @blend_add_sd(<2 x d
 
 define <2 x double> @blend_sub_sd(<2 x double> %a, double %b) {
 ; SSE-LABEL: blend_sub_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: blend_sub_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 
@@ -545,12 +545,12 @@ define <2 x double> @blend_sub_sd(<2 x d
 
 define <2 x double> @blend_mul_sd(<2 x double> %a, double %b) {
 ; SSE-LABEL: blend_mul_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: blend_mul_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 
@@ -563,12 +563,12 @@ define <2 x double> @blend_mul_sd(<2 x d
 
 define <2 x double> @blend_div_sd(<2 x double> %a, double %b) {
 ; SSE-LABEL: blend_div_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: blend_div_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 
@@ -584,12 +584,12 @@ define <2 x double> @blend_div_sd(<2 x d
 
 define <4 x float> @insert_test_add_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test_add_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test_add_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fadd <4 x float> %a, %b
@@ -599,12 +599,12 @@ define <4 x float> @insert_test_add_ss(<
 
 define <4 x float> @insert_test_sub_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test_sub_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test_sub_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fsub <4 x float> %a, %b
@@ -614,12 +614,12 @@ define <4 x float> @insert_test_sub_ss(<
 
 define <4 x float> @insert_test_mul_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test_mul_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test_mul_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fmul <4 x float> %a, %b
@@ -629,12 +629,12 @@ define <4 x float> @insert_test_mul_ss(<
 
 define <4 x float> @insert_test_div_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test_div_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test_div_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fdiv <4 x float> %a, %b
@@ -644,12 +644,12 @@ define <4 x float> @insert_test_div_ss(<
 
 define <2 x double> @insert_test_add_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test_add_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test_add_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fadd <2 x double> %a, %b
@@ -659,12 +659,12 @@ define <2 x double> @insert_test_add_sd(
 
 define <2 x double> @insert_test_sub_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test_sub_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test_sub_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fsub <2 x double> %a, %b
@@ -674,12 +674,12 @@ define <2 x double> @insert_test_sub_sd(
 
 define <2 x double> @insert_test_mul_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test_mul_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test_mul_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fmul <2 x double> %a, %b
@@ -689,12 +689,12 @@ define <2 x double> @insert_test_mul_sd(
 
 define <2 x double> @insert_test_div_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test_div_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test_div_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fdiv <2 x double> %a, %b
@@ -704,13 +704,13 @@ define <2 x double> @insert_test_div_sd(
 
 define <4 x float> @insert_test2_add_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test2_add_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test2_add_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fadd <4 x float> %b, %a
@@ -720,13 +720,13 @@ define <4 x float> @insert_test2_add_ss(
 
 define <4 x float> @insert_test2_sub_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test2_sub_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test2_sub_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fsub <4 x float> %b, %a
@@ -736,13 +736,13 @@ define <4 x float> @insert_test2_sub_ss(
 
 define <4 x float> @insert_test2_mul_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test2_mul_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test2_mul_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fmul <4 x float> %b, %a
@@ -752,13 +752,13 @@ define <4 x float> @insert_test2_mul_ss(
 
 define <4 x float> @insert_test2_div_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test2_div_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test2_div_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fdiv <4 x float> %b, %a
@@ -768,13 +768,13 @@ define <4 x float> @insert_test2_div_ss(
 
 define <2 x double> @insert_test2_add_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test2_add_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test2_add_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fadd <2 x double> %b, %a
@@ -784,13 +784,13 @@ define <2 x double> @insert_test2_add_sd
 
 define <2 x double> @insert_test2_sub_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test2_sub_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test2_sub_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fsub <2 x double> %b, %a
@@ -800,13 +800,13 @@ define <2 x double> @insert_test2_sub_sd
 
 define <2 x double> @insert_test2_mul_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test2_mul_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test2_mul_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fmul <2 x double> %b, %a
@@ -816,13 +816,13 @@ define <2 x double> @insert_test2_mul_sd
 
 define <2 x double> @insert_test2_div_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test2_div_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test2_div_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fdiv <2 x double> %b, %a
@@ -832,12 +832,12 @@ define <2 x double> @insert_test2_div_sd
 
 define <4 x float> @insert_test3_add_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test3_add_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test3_add_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fadd <4 x float> %a, %b
@@ -847,12 +847,12 @@ define <4 x float> @insert_test3_add_ss(
 
 define <4 x float> @insert_test3_sub_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test3_sub_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test3_sub_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fsub <4 x float> %a, %b
@@ -862,12 +862,12 @@ define <4 x float> @insert_test3_sub_ss(
 
 define <4 x float> @insert_test3_mul_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test3_mul_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test3_mul_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fmul <4 x float> %a, %b
@@ -877,12 +877,12 @@ define <4 x float> @insert_test3_mul_ss(
 
 define <4 x float> @insert_test3_div_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test3_div_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test3_div_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fdiv <4 x float> %a, %b
@@ -892,12 +892,12 @@ define <4 x float> @insert_test3_div_ss(
 
 define <2 x double> @insert_test3_add_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test3_add_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test3_add_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fadd <2 x double> %a, %b
@@ -907,12 +907,12 @@ define <2 x double> @insert_test3_add_sd
 
 define <2 x double> @insert_test3_sub_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test3_sub_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test3_sub_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fsub <2 x double> %a, %b
@@ -922,12 +922,12 @@ define <2 x double> @insert_test3_sub_sd
 
 define <2 x double> @insert_test3_mul_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test3_mul_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test3_mul_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fmul <2 x double> %a, %b
@@ -937,12 +937,12 @@ define <2 x double> @insert_test3_mul_sd
 
 define <2 x double> @insert_test3_div_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test3_div_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test3_div_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fdiv <2 x double> %a, %b
@@ -952,13 +952,13 @@ define <2 x double> @insert_test3_div_sd
 
 define <4 x float> @insert_test4_add_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test4_add_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test4_add_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fadd <4 x float> %b, %a
@@ -968,13 +968,13 @@ define <4 x float> @insert_test4_add_ss(
 
 define <4 x float> @insert_test4_sub_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test4_sub_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test4_sub_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fsub <4 x float> %b, %a
@@ -984,13 +984,13 @@ define <4 x float> @insert_test4_sub_ss(
 
 define <4 x float> @insert_test4_mul_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test4_mul_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test4_mul_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fmul <4 x float> %b, %a
@@ -1000,13 +1000,13 @@ define <4 x float> @insert_test4_mul_ss(
 
 define <4 x float> @insert_test4_div_ss(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: insert_test4_div_ss:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test4_div_ss:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fdiv <4 x float> %b, %a
@@ -1016,13 +1016,13 @@ define <4 x float> @insert_test4_div_ss(
 
 define <2 x double> @insert_test4_add_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test4_add_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test4_add_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fadd <2 x double> %b, %a
@@ -1032,13 +1032,13 @@ define <2 x double> @insert_test4_add_sd
 
 define <2 x double> @insert_test4_sub_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test4_sub_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test4_sub_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fsub <2 x double> %b, %a
@@ -1048,13 +1048,13 @@ define <2 x double> @insert_test4_sub_sd
 
 define <2 x double> @insert_test4_mul_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test4_mul_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test4_mul_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fmul <2 x double> %b, %a
@@ -1064,13 +1064,13 @@ define <2 x double> @insert_test4_mul_sd
 
 define <2 x double> @insert_test4_div_sd(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: insert_test4_div_sd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd %xmm0, %xmm1
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insert_test4_div_sd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %1 = fdiv <2 x double> %b, %a
@@ -1080,10 +1080,10 @@ define <2 x double> @insert_test4_div_sd
 
 define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
 ; SSE2-LABEL: add_ss_mask:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    testb $1, %dil
 ; SSE2-NEXT:    jne .LBB62_1
-; SSE2-NEXT:  # BB#2:
+; SSE2-NEXT:  # %bb.2:
 ; SSE2-NEXT:    movaps %xmm2, %xmm1
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; SSE2-NEXT:    retq
@@ -1093,10 +1093,10 @@ define <4 x float> @add_ss_mask(<4 x flo
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: add_ss_mask:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    testb $1, %dil
 ; SSE41-NEXT:    jne .LBB62_1
-; SSE41-NEXT:  # BB#2:
+; SSE41-NEXT:  # %bb.2:
 ; SSE41-NEXT:    movaps %xmm2, %xmm1
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
 ; SSE41-NEXT:    retq
@@ -1106,17 +1106,17 @@ define <4 x float> @add_ss_mask(<4 x flo
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: add_ss_mask:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    testb $1, %dil
 ; AVX1-NEXT:    je .LBB62_2
-; AVX1-NEXT:  # BB#1:
+; AVX1-NEXT:  # %bb.1:
 ; AVX1-NEXT:    vaddss %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:  .LBB62_2:
 ; AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX512-LABEL: add_ss_mask:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vaddss %xmm1, %xmm0, %xmm1
 ; AVX512-NEXT:    kmovw %edi, %k1
 ; AVX512-NEXT:    vmovss %xmm1, %xmm0, %xmm2 {%k1}
@@ -1135,10 +1135,10 @@ define <4 x float> @add_ss_mask(<4 x flo
 
 define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
 ; SSE2-LABEL: add_sd_mask:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    testb $1, %dil
 ; SSE2-NEXT:    jne .LBB63_1
-; SSE2-NEXT:  # BB#2:
+; SSE2-NEXT:  # %bb.2:
 ; SSE2-NEXT:    movapd %xmm2, %xmm1
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
@@ -1148,10 +1148,10 @@ define <2 x double> @add_sd_mask(<2 x do
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: add_sd_mask:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    testb $1, %dil
 ; SSE41-NEXT:    jne .LBB63_1
-; SSE41-NEXT:  # BB#2:
+; SSE41-NEXT:  # %bb.2:
 ; SSE41-NEXT:    movapd %xmm2, %xmm1
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE41-NEXT:    retq
@@ -1161,17 +1161,17 @@ define <2 x double> @add_sd_mask(<2 x do
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: add_sd_mask:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    testb $1, %dil
 ; AVX1-NEXT:    je .LBB63_2
-; AVX1-NEXT:  # BB#1:
+; AVX1-NEXT:  # %bb.1:
 ; AVX1-NEXT:    vaddsd %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:  .LBB63_2:
 ; AVX1-NEXT:    vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX512-LABEL: add_sd_mask:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vaddsd %xmm1, %xmm0, %xmm1
 ; AVX512-NEXT:    kmovw %edi, %k1
 ; AVX512-NEXT:    vmovsd %xmm1, %xmm0, %xmm2 {%k1}

Modified: llvm/trunk/test/CodeGen/X86/sse-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-schedule.ll Mon Dec  4 09:18:51 2017
@@ -13,61 +13,61 @@
 
 define <4 x float> @test_addps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_addps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    addps %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    addps (%rdi), %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_addps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    addps %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    addps (%rdi), %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_addps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    addps %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    addps (%rdi), %xmm0 # sched: [6:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_addps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vaddps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_addps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vaddps (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_addps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vaddps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_addps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vaddps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_addps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vaddps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_addps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vaddps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_addps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -79,61 +79,61 @@ define <4 x float> @test_addps(<4 x floa
 
 define float @test_addss(float %a0, float %a1, float *%a2) {
 ; GENERIC-LABEL: test_addss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    addss %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    addss (%rdi), %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_addss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    addss %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    addss (%rdi), %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_addss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    addss %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    addss (%rdi), %xmm0 # sched: [6:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_addss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vaddss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_addss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vaddss (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_addss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vaddss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_addss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vaddss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_addss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vaddss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_addss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vaddss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_addss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -145,13 +145,13 @@ define float @test_addss(float %a0, floa
 
 define <4 x float> @test_andps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_andps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    andps %xmm1, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    andps (%rdi), %xmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_andps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    andps %xmm1, %xmm0 # sched: [1:0.50]
 ; ATOM-NEXT:    andps (%rdi), %xmm0 # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -161,49 +161,49 @@ define <4 x float> @test_andps(<4 x floa
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_andps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    andps %xmm1, %xmm0 # sched: [1:0.50]
 ; SLM-NEXT:    andps (%rdi), %xmm0 # sched: [4:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_andps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vandps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vandps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_andps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vandps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vandps (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_andps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vandps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vandps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_andps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vandps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_andps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKX-NEXT:    vandps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_andps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vandps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_andps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -219,13 +219,13 @@ define <4 x float> @test_andps(<4 x floa
 
 define <4 x float> @test_andnotps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_andnotps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    andnps %xmm1, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    andnps (%rdi), %xmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_andnotps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    andnps %xmm1, %xmm0 # sched: [1:0.50]
 ; ATOM-NEXT:    andnps (%rdi), %xmm0 # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -235,49 +235,49 @@ define <4 x float> @test_andnotps(<4 x f
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_andnotps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    andnps %xmm1, %xmm0 # sched: [1:0.50]
 ; SLM-NEXT:    andnps (%rdi), %xmm0 # sched: [4:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_andnotps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vandnps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vandnps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_andnotps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vandnps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vandnps (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_andnotps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vandnps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vandnps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_andnotps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vandnps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_andnotps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKX-NEXT:    vandnps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_andnotps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vandnps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_andnotps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandnps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -295,56 +295,56 @@ define <4 x float> @test_andnotps(<4 x f
 
 define <4 x float> @test_cmpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_cmpps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    cmpeqps %xmm0, %xmm1 # sched: [3:1.00]
 ; GENERIC-NEXT:    cmpeqps (%rdi), %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    orps %xmm1, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_cmpps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    cmpeqps %xmm0, %xmm1 # sched: [5:5.00]
 ; ATOM-NEXT:    cmpeqps (%rdi), %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    orps %xmm1, %xmm0 # sched: [1:0.50]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_cmpps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    cmpeqps %xmm0, %xmm1 # sched: [3:1.00]
 ; SLM-NEXT:    cmpeqps (%rdi), %xmm0 # sched: [6:1.00]
 ; SLM-NEXT:    orps %xmm1, %xmm0 # sched: [1:0.50]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_cmpps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
 ; SANDY-NEXT:    vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SANDY-NEXT:    vorps %xmm0, %xmm1, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cmpps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
 ; HASWELL-NEXT:    vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vorps %xmm0, %xmm1, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cmpps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BROADWELL-NEXT:    vorps %xmm0, %xmm1, %xmm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cmpps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cmpps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcmpeqps %xmm1, %xmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vcmpeqps (%rdi), %xmm0, %k1 # sched: [9:1.00]
 ; SKX-NEXT:    korw %k1, %k0, %k0 # sched: [1:1.00]
@@ -352,14 +352,14 @@ define <4 x float> @test_cmpps(<4 x floa
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cmpps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
 ; BTVER2-NEXT:    vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cmpps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
@@ -375,61 +375,61 @@ define <4 x float> @test_cmpps(<4 x floa
 
 define float @test_cmpss(float %a0, float %a1, float *%a2) {
 ; GENERIC-LABEL: test_cmpss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    cmpeqss %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    cmpeqss (%rdi), %xmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_cmpss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    cmpeqss %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    cmpeqss (%rdi), %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_cmpss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    cmpeqss %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    cmpeqss (%rdi), %xmm0 # sched: [6:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_cmpss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cmpss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cmpss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cmpss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SKYLAKE-NEXT:    vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cmpss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SKX-NEXT:    vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cmpss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cmpss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -446,7 +446,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ss
 
 define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_comiss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    comiss %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    setnp %al # sched: [1:0.50]
 ; GENERIC-NEXT:    sete %cl # sched: [1:0.50]
@@ -460,7 +460,7 @@ define i32 @test_comiss(<4 x float> %a0,
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_comiss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    comiss %xmm1, %xmm0 # sched: [9:4.50]
 ; ATOM-NEXT:    setnp %al # sched: [1:0.50]
 ; ATOM-NEXT:    sete %cl # sched: [1:0.50]
@@ -474,7 +474,7 @@ define i32 @test_comiss(<4 x float> %a0,
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_comiss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    comiss %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    setnp %al # sched: [1:0.50]
 ; SLM-NEXT:    sete %cl # sched: [1:0.50]
@@ -488,7 +488,7 @@ define i32 @test_comiss(<4 x float> %a0,
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_comiss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    setnp %al # sched: [1:0.50]
 ; SANDY-NEXT:    sete %cl # sched: [1:0.50]
@@ -502,7 +502,7 @@ define i32 @test_comiss(<4 x float> %a0,
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_comiss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    setnp %al # sched: [1:0.50]
 ; HASWELL-NEXT:    sete %cl # sched: [1:0.50]
@@ -516,7 +516,7 @@ define i32 @test_comiss(<4 x float> %a0,
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_comiss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    setnp %al # sched: [1:0.50]
 ; BROADWELL-NEXT:    sete %cl # sched: [1:0.50]
@@ -530,7 +530,7 @@ define i32 @test_comiss(<4 x float> %a0,
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_comiss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; SKYLAKE-NEXT:    setnp %al # sched: [1:0.50]
 ; SKYLAKE-NEXT:    sete %cl # sched: [1:0.50]
@@ -544,7 +544,7 @@ define i32 @test_comiss(<4 x float> %a0,
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_comiss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; SKX-NEXT:    setnp %al # sched: [1:0.50]
 ; SKX-NEXT:    sete %cl # sched: [1:0.50]
@@ -558,7 +558,7 @@ define i32 @test_comiss(<4 x float> %a0,
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_comiss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    setnp %al # sched: [1:0.50]
 ; BTVER2-NEXT:    sete %cl # sched: [1:0.50]
@@ -572,7 +572,7 @@ define i32 @test_comiss(<4 x float> %a0,
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_comiss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    setnp %al # sched: [1:0.25]
 ; ZNVER1-NEXT:    sete %cl # sched: [1:0.25]
@@ -594,70 +594,70 @@ declare i32 @llvm.x86.sse.comieq.ss(<4 x
 
 define float @test_cvtsi2ss(i32 %a0, i32 *%a1) {
 ; GENERIC-LABEL: test_cvtsi2ss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    cvtsi2ssl %edi, %xmm1 # sched: [5:2.00]
 ; GENERIC-NEXT:    cvtsi2ssl (%rsi), %xmm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    addss %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_cvtsi2ss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    cvtsi2ssl (%rsi), %xmm0 # sched: [7:3.50]
 ; ATOM-NEXT:    cvtsi2ssl %edi, %xmm1 # sched: [6:3.00]
 ; ATOM-NEXT:    addss %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_cvtsi2ss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    cvtsi2ssl (%rsi), %xmm0 # sched: [7:1.00]
 ; SLM-NEXT:    cvtsi2ssl %edi, %xmm1 # sched: [4:0.50]
 ; SLM-NEXT:    addss %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_cvtsi2ss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:2.00]
 ; SANDY-NEXT:    vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [10:1.00]
 ; SANDY-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvtsi2ss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [4:1.00]
 ; HASWELL-NEXT:    vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
 ; HASWELL-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvtsi2ss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [4:1.00]
 ; BROADWELL-NEXT:    vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
 ; BROADWELL-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvtsi2ss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
 ; SKYLAKE-NEXT:    vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
 ; SKYLAKE-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvtsi2ss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
 ; SKX-NEXT:    vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
 ; SKX-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvtsi2ss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
 ; BTVER2-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvtsi2ss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -671,70 +671,70 @@ define float @test_cvtsi2ss(i32 %a0, i32
 
 define float @test_cvtsi2ssq(i64 %a0, i64 *%a1) {
 ; GENERIC-LABEL: test_cvtsi2ssq:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    cvtsi2ssq %rdi, %xmm1 # sched: [5:2.00]
 ; GENERIC-NEXT:    cvtsi2ssq (%rsi), %xmm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    addss %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_cvtsi2ssq:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    cvtsi2ssq (%rsi), %xmm0 # sched: [7:3.50]
 ; ATOM-NEXT:    cvtsi2ssq %rdi, %xmm1 # sched: [6:3.00]
 ; ATOM-NEXT:    addss %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_cvtsi2ssq:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    cvtsi2ssq (%rsi), %xmm0 # sched: [7:1.00]
 ; SLM-NEXT:    cvtsi2ssq %rdi, %xmm1 # sched: [4:0.50]
 ; SLM-NEXT:    addss %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_cvtsi2ssq:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:2.00]
 ; SANDY-NEXT:    vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [10:1.00]
 ; SANDY-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvtsi2ssq:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:2.00]
 ; HASWELL-NEXT:    vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
 ; HASWELL-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvtsi2ssq:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:2.00]
 ; BROADWELL-NEXT:    vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
 ; BROADWELL-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvtsi2ssq:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [6:2.00]
 ; SKYLAKE-NEXT:    vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
 ; SKYLAKE-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvtsi2ssq:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [6:2.00]
 ; SKX-NEXT:    vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [9:1.00]
 ; SKX-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvtsi2ssq:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
 ; BTVER2-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvtsi2ssq:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -748,70 +748,70 @@ define float @test_cvtsi2ssq(i64 %a0, i6
 
 define i32 @test_cvtss2si(float %a0, float *%a1) {
 ; GENERIC-LABEL: test_cvtss2si:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    cvtss2si %xmm0, %ecx # sched: [5:1.00]
 ; GENERIC-NEXT:    cvtss2si (%rdi), %eax # sched: [9:1.00]
 ; GENERIC-NEXT:    addl %ecx, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_cvtss2si:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    cvtss2si (%rdi), %eax # sched: [9:4.50]
 ; ATOM-NEXT:    cvtss2si %xmm0, %ecx # sched: [8:4.00]
 ; ATOM-NEXT:    addl %ecx, %eax # sched: [1:0.50]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_cvtss2si:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    cvtss2si (%rdi), %eax # sched: [7:1.00]
 ; SLM-NEXT:    cvtss2si %xmm0, %ecx # sched: [4:0.50]
 ; SLM-NEXT:    addl %ecx, %eax # sched: [1:0.50]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_cvtss2si:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvtss2si %xmm0, %ecx # sched: [5:1.00]
 ; SANDY-NEXT:    vcvtss2si (%rdi), %eax # sched: [10:1.00]
 ; SANDY-NEXT:    addl %ecx, %eax # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvtss2si:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtss2si %xmm0, %ecx # sched: [4:1.00]
 ; HASWELL-NEXT:    vcvtss2si (%rdi), %eax # sched: [4:1.00]
 ; HASWELL-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvtss2si:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtss2si %xmm0, %ecx # sched: [4:1.00]
 ; BROADWELL-NEXT:    vcvtss2si (%rdi), %eax # sched: [9:1.00]
 ; BROADWELL-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvtss2si:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtss2si %xmm0, %ecx # sched: [6:1.00]
 ; SKYLAKE-NEXT:    vcvtss2si (%rdi), %eax # sched: [11:1.00]
 ; SKYLAKE-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvtss2si:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtss2si %xmm0, %ecx # sched: [6:1.00]
 ; SKX-NEXT:    vcvtss2si (%rdi), %eax # sched: [11:1.00]
 ; SKX-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvtss2si:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtss2si (%rdi), %eax # sched: [8:1.00]
 ; BTVER2-NEXT:    vcvtss2si %xmm0, %ecx # sched: [3:1.00]
 ; BTVER2-NEXT:    addl %ecx, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvtss2si:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtss2si (%rdi), %eax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtss2si %xmm0, %ecx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
@@ -828,70 +828,70 @@ declare i32 @llvm.x86.sse.cvtss2si(<4 x
 
 define i64 @test_cvtss2siq(float %a0, float *%a1) {
 ; GENERIC-LABEL: test_cvtss2siq:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    cvtss2si %xmm0, %rcx # sched: [5:1.00]
 ; GENERIC-NEXT:    cvtss2si (%rdi), %rax # sched: [9:1.00]
 ; GENERIC-NEXT:    addq %rcx, %rax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_cvtss2siq:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    cvtss2si (%rdi), %rax # sched: [10:5.00]
 ; ATOM-NEXT:    cvtss2si %xmm0, %rcx # sched: [9:4.50]
 ; ATOM-NEXT:    addq %rcx, %rax # sched: [1:0.50]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_cvtss2siq:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    cvtss2si (%rdi), %rax # sched: [7:1.00]
 ; SLM-NEXT:    cvtss2si %xmm0, %rcx # sched: [4:0.50]
 ; SLM-NEXT:    addq %rcx, %rax # sched: [1:0.50]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_cvtss2siq:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvtss2si %xmm0, %rcx # sched: [5:1.00]
 ; SANDY-NEXT:    vcvtss2si (%rdi), %rax # sched: [10:1.00]
 ; SANDY-NEXT:    addq %rcx, %rax # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvtss2siq:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtss2si %xmm0, %rcx # sched: [4:1.00]
 ; HASWELL-NEXT:    vcvtss2si (%rdi), %rax # sched: [4:1.00]
 ; HASWELL-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvtss2siq:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtss2si %xmm0, %rcx # sched: [4:1.00]
 ; BROADWELL-NEXT:    vcvtss2si (%rdi), %rax # sched: [9:1.00]
 ; BROADWELL-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvtss2siq:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtss2si %xmm0, %rcx # sched: [6:1.00]
 ; SKYLAKE-NEXT:    vcvtss2si (%rdi), %rax # sched: [11:1.00]
 ; SKYLAKE-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvtss2siq:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtss2si %xmm0, %rcx # sched: [6:1.00]
 ; SKX-NEXT:    vcvtss2si (%rdi), %rax # sched: [11:1.00]
 ; SKX-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvtss2siq:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtss2si (%rdi), %rax # sched: [8:1.00]
 ; BTVER2-NEXT:    vcvtss2si %xmm0, %rcx # sched: [3:1.00]
 ; BTVER2-NEXT:    addq %rcx, %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvtss2siq:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtss2si (%rdi), %rax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtss2si %xmm0, %rcx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
@@ -908,70 +908,70 @@ declare i64 @llvm.x86.sse.cvtss2si64(<4
 
 define i32 @test_cvttss2si(float %a0, float *%a1) {
 ; GENERIC-LABEL: test_cvttss2si:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    cvttss2si %xmm0, %ecx # sched: [5:1.00]
 ; GENERIC-NEXT:    cvttss2si (%rdi), %eax # sched: [9:1.00]
 ; GENERIC-NEXT:    addl %ecx, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_cvttss2si:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    cvttss2si (%rdi), %eax # sched: [9:4.50]
 ; ATOM-NEXT:    cvttss2si %xmm0, %ecx # sched: [8:4.00]
 ; ATOM-NEXT:    addl %ecx, %eax # sched: [1:0.50]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_cvttss2si:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    cvttss2si (%rdi), %eax # sched: [7:1.00]
 ; SLM-NEXT:    cvttss2si %xmm0, %ecx # sched: [4:0.50]
 ; SLM-NEXT:    addl %ecx, %eax # sched: [1:0.50]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_cvttss2si:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvttss2si %xmm0, %ecx # sched: [5:1.00]
 ; SANDY-NEXT:    vcvttss2si (%rdi), %eax # sched: [10:1.00]
 ; SANDY-NEXT:    addl %ecx, %eax # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvttss2si:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvttss2si %xmm0, %ecx # sched: [4:1.00]
 ; HASWELL-NEXT:    vcvttss2si (%rdi), %eax # sched: [4:1.00]
 ; HASWELL-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvttss2si:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvttss2si %xmm0, %ecx # sched: [4:1.00]
 ; BROADWELL-NEXT:    vcvttss2si (%rdi), %eax # sched: [9:1.00]
 ; BROADWELL-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvttss2si:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvttss2si %xmm0, %ecx # sched: [7:1.00]
 ; SKYLAKE-NEXT:    vcvttss2si (%rdi), %eax # sched: [11:1.00]
 ; SKYLAKE-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvttss2si:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttss2si %xmm0, %ecx # sched: [7:1.00]
 ; SKX-NEXT:    vcvttss2si (%rdi), %eax # sched: [11:1.00]
 ; SKX-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvttss2si:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvttss2si (%rdi), %eax # sched: [8:1.00]
 ; BTVER2-NEXT:    vcvttss2si %xmm0, %ecx # sched: [3:1.00]
 ; BTVER2-NEXT:    addl %ecx, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvttss2si:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvttss2si (%rdi), %eax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttss2si %xmm0, %ecx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
@@ -985,70 +985,70 @@ define i32 @test_cvttss2si(float %a0, fl
 
 define i64 @test_cvttss2siq(float %a0, float *%a1) {
 ; GENERIC-LABEL: test_cvttss2siq:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    cvttss2si %xmm0, %rcx # sched: [5:1.00]
 ; GENERIC-NEXT:    cvttss2si (%rdi), %rax # sched: [9:1.00]
 ; GENERIC-NEXT:    addq %rcx, %rax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_cvttss2siq:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    cvttss2si (%rdi), %rax # sched: [10:5.00]
 ; ATOM-NEXT:    cvttss2si %xmm0, %rcx # sched: [9:4.50]
 ; ATOM-NEXT:    addq %rcx, %rax # sched: [1:0.50]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_cvttss2siq:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    cvttss2si (%rdi), %rax # sched: [7:1.00]
 ; SLM-NEXT:    cvttss2si %xmm0, %rcx # sched: [4:0.50]
 ; SLM-NEXT:    addq %rcx, %rax # sched: [1:0.50]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_cvttss2siq:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvttss2si %xmm0, %rcx # sched: [5:1.00]
 ; SANDY-NEXT:    vcvttss2si (%rdi), %rax # sched: [10:1.00]
 ; SANDY-NEXT:    addq %rcx, %rax # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvttss2siq:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvttss2si %xmm0, %rcx # sched: [4:1.00]
 ; HASWELL-NEXT:    vcvttss2si (%rdi), %rax # sched: [4:1.00]
 ; HASWELL-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvttss2siq:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvttss2si %xmm0, %rcx # sched: [4:1.00]
 ; BROADWELL-NEXT:    vcvttss2si (%rdi), %rax # sched: [9:1.00]
 ; BROADWELL-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvttss2siq:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvttss2si %xmm0, %rcx # sched: [7:1.00]
 ; SKYLAKE-NEXT:    vcvttss2si (%rdi), %rax # sched: [11:1.00]
 ; SKYLAKE-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvttss2siq:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttss2si %xmm0, %rcx # sched: [7:1.00]
 ; SKX-NEXT:    vcvttss2si (%rdi), %rax # sched: [11:1.00]
 ; SKX-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvttss2siq:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvttss2si (%rdi), %rax # sched: [8:1.00]
 ; BTVER2-NEXT:    vcvttss2si %xmm0, %rcx # sched: [3:1.00]
 ; BTVER2-NEXT:    addq %rcx, %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvttss2siq:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvttss2si (%rdi), %rax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttss2si %xmm0, %rcx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
@@ -1062,61 +1062,61 @@ define i64 @test_cvttss2siq(float %a0, f
 
 define <4 x float> @test_divps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_divps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    divps %xmm1, %xmm0 # sched: [14:1.00]
 ; GENERIC-NEXT:    divps (%rdi), %xmm0 # sched: [20:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_divps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    divps %xmm1, %xmm0 # sched: [70:35.00]
 ; ATOM-NEXT:    divps (%rdi), %xmm0 # sched: [125:62.50]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_divps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    divps %xmm1, %xmm0 # sched: [34:34.00]
 ; SLM-NEXT:    divps (%rdi), %xmm0 # sched: [37:34.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_divps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vdivps %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
 ; SANDY-NEXT:    vdivps (%rdi), %xmm0, %xmm0 # sched: [20:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_divps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vdivps %xmm1, %xmm0, %xmm0 # sched: [13:1.00]
 ; HASWELL-NEXT:    vdivps (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_divps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vdivps %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
 ; BROADWELL-NEXT:    vdivps (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_divps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vdivps %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
 ; SKYLAKE-NEXT:    vdivps (%rdi), %xmm0, %xmm0 # sched: [17:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_divps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vdivps %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
 ; SKX-NEXT:    vdivps (%rdi), %xmm0, %xmm0 # sched: [17:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_divps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vdivps %xmm1, %xmm0, %xmm0 # sched: [19:19.00]
 ; BTVER2-NEXT:    vdivps (%rdi), %xmm0, %xmm0 # sched: [24:19.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_divps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vdivps %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
 ; ZNVER1-NEXT:    vdivps (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1128,61 +1128,61 @@ define <4 x float> @test_divps(<4 x floa
 
 define float @test_divss(float %a0, float %a1, float *%a2) {
 ; GENERIC-LABEL: test_divss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    divss %xmm1, %xmm0 # sched: [14:1.00]
 ; GENERIC-NEXT:    divss (%rdi), %xmm0 # sched: [20:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_divss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    divss %xmm1, %xmm0 # sched: [34:17.00]
 ; ATOM-NEXT:    divss (%rdi), %xmm0 # sched: [62:31.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_divss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    divss %xmm1, %xmm0 # sched: [34:34.00]
 ; SLM-NEXT:    divss (%rdi), %xmm0 # sched: [37:34.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_divss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vdivss %xmm1, %xmm0, %xmm0 # sched: [14:1.00]
 ; SANDY-NEXT:    vdivss (%rdi), %xmm0, %xmm0 # sched: [20:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_divss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vdivss %xmm1, %xmm0, %xmm0 # sched: [13:1.00]
 ; HASWELL-NEXT:    vdivss (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_divss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vdivss %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
 ; BROADWELL-NEXT:    vdivss (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_divss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vdivss %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
 ; SKYLAKE-NEXT:    vdivss (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_divss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vdivss %xmm1, %xmm0, %xmm0 # sched: [11:1.00]
 ; SKX-NEXT:    vdivss (%rdi), %xmm0, %xmm0 # sched: [16:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_divss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vdivss %xmm1, %xmm0, %xmm0 # sched: [19:19.00]
 ; BTVER2-NEXT:    vdivss (%rdi), %xmm0, %xmm0 # sched: [24:19.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_divss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vdivss %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
 ; ZNVER1-NEXT:    vdivss (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1194,61 +1194,61 @@ define float @test_divss(float %a0, floa
 
 define void @test_ldmxcsr(i32 %a0) {
 ; GENERIC-LABEL: test_ldmxcsr:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
 ; GENERIC-NEXT:    ldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_ldmxcsr:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
 ; ATOM-NEXT:    ldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:2.50]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_ldmxcsr:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
 ; SLM-NEXT:    ldmxcsr -{{[0-9]+}}(%rsp) # sched: [3:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_ldmxcsr:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
 ; SANDY-NEXT:    vldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_ldmxcsr:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
 ; HASWELL-NEXT:    vldmxcsr -{{[0-9]+}}(%rsp) # sched: [2:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_ldmxcsr:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
 ; BROADWELL-NEXT:    vldmxcsr -{{[0-9]+}}(%rsp) # sched: [7:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_ldmxcsr:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    vldmxcsr -{{[0-9]+}}(%rsp) # sched: [7:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_ldmxcsr:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
 ; SKX-NEXT:    vldmxcsr -{{[0-9]+}}(%rsp) # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_ldmxcsr:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
 ; BTVER2-NEXT:    vldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_ldmxcsr:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:0.50]
 ; ZNVER1-NEXT:    vldmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1262,61 +1262,61 @@ declare void @llvm.x86.sse.ldmxcsr(i8*)
 
 define <4 x float> @test_maxps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_maxps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    maxps %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    maxps (%rdi), %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_maxps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    maxps %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    maxps (%rdi), %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_maxps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    maxps %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    maxps (%rdi), %xmm0 # sched: [6:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_maxps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmaxps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_maxps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmaxps (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_maxps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmaxps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_maxps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_maxps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_maxps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vmaxps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maxps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1329,61 +1329,61 @@ declare <4 x float> @llvm.x86.sse.max.ps
 
 define <4 x float> @test_maxss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_maxss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    maxss %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    maxss (%rdi), %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_maxss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    maxss %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    maxss (%rdi), %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_maxss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    maxss %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    maxss (%rdi), %xmm0 # sched: [6:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_maxss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_maxss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_maxss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_maxss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_maxss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_maxss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maxss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1396,61 +1396,61 @@ declare <4 x float> @llvm.x86.sse.max.ss
 
 define <4 x float> @test_minps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_minps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    minps %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    minps (%rdi), %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_minps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    minps %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    minps (%rdi), %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_minps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    minps %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    minps (%rdi), %xmm0 # sched: [6:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_minps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vminps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_minps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vminps (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_minps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vminps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_minps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vminps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vminps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_minps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vminps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vminps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_minps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vminps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_minps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vminps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1463,61 +1463,61 @@ declare <4 x float> @llvm.x86.sse.min.ps
 
 define <4 x float> @test_minss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_minss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    minss %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    minss (%rdi), %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_minss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    minss %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    minss (%rdi), %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_minss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    minss %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    minss (%rdi), %xmm0 # sched: [6:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_minss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vminss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_minss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vminss (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_minss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vminss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_minss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vminss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vminss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_minss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vminss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vminss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_minss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vminss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_minss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vminss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1530,70 +1530,70 @@ declare <4 x float> @llvm.x86.sse.min.ss
 
 define void @test_movaps(<4 x float> *%a0, <4 x float> *%a1) {
 ; GENERIC-LABEL: test_movaps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movaps (%rdi), %xmm0 # sched: [6:0.50]
 ; GENERIC-NEXT:    addps %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    movaps %xmm0, (%rsi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_movaps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movaps (%rdi), %xmm0 # sched: [1:1.00]
 ; ATOM-NEXT:    addps %xmm0, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    movaps %xmm0, (%rsi) # sched: [1:1.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_movaps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movaps (%rdi), %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    addps %xmm0, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    movaps %xmm0, (%rsi) # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_movaps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovaps (%rdi), %xmm0 # sched: [6:0.50]
 ; SANDY-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmovaps %xmm0, (%rsi) # sched: [5:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movaps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovaps (%rdi), %xmm0 # sched: [1:0.50]
 ; HASWELL-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmovaps %xmm0, (%rsi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movaps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovaps (%rdi), %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmovaps %xmm0, (%rsi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movaps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovaps (%rdi), %xmm0 # sched: [6:0.50]
 ; SKYLAKE-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmovaps %xmm0, (%rsi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movaps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps (%rdi), %xmm0 # sched: [6:0.50]
 ; SKX-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmovaps %xmm0, (%rsi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movaps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovaps (%rdi), %xmm0 # sched: [5:1.00]
 ; BTVER2-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vmovaps %xmm0, (%rsi) # sched: [1:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movaps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovaps (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovaps %xmm0, (%rsi) # sched: [1:0.50]
@@ -1608,12 +1608,12 @@ define void @test_movaps(<4 x float> *%a
 
 define <4 x float> @test_movhlps(<4 x float> %a0, <4 x float> %a1) {
 ; GENERIC-LABEL: test_movhlps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_movhlps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -1624,42 +1624,42 @@ define <4 x float> @test_movhlps(<4 x fl
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_movhlps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_movhlps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movhlps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movhlps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movhlps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movhlps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movhlps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movhlps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
@@ -1670,7 +1670,7 @@ define <4 x float> @test_movhlps(<4 x fl
 
 define void @test_movhps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
 ; GENERIC-LABEL: test_movhps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:1.00]
 ; GENERIC-NEXT:    addps %xmm0, %xmm1 # sched: [3:1.00]
 ; GENERIC-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1] sched: [1:1.00]
@@ -1678,7 +1678,7 @@ define void @test_movhps(<4 x float> %a0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_movhps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [1:1.00]
 ; ATOM-NEXT:    addps %xmm0, %xmm1 # sched: [5:5.00]
 ; ATOM-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1] sched: [1:1.00]
@@ -1686,56 +1686,56 @@ define void @test_movhps(<4 x float> %a0
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_movhps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [4:1.00]
 ; SLM-NEXT:    addps %xmm0, %xmm1 # sched: [3:1.00]
 ; SLM-NEXT:    pextrq $1, %xmm1, (%rdi) # sched: [4:2.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_movhps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [7:1.00]
 ; SANDY-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vpextrq $1, %xmm0, (%rdi) # sched: [5:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movhps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [1:1.00]
 ; HASWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vpextrq $1, %xmm0, (%rdi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movhps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
 ; BROADWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vpextrq $1, %xmm0, (%rdi) # sched: [2:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movhps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
 ; SKYLAKE-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vpextrq $1, %xmm0, (%rdi) # sched: [2:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movhps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
 ; SKX-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vpextrq $1, %xmm0, (%rdi) # sched: [2:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movhps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
 ; BTVER2-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vpextrq $1, %xmm0, (%rdi) # sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movhps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00]
@@ -1754,61 +1754,61 @@ define void @test_movhps(<4 x float> %a0
 
 define <4 x float> @test_movlhps(<4 x float> %a0, <4 x float> %a1) {
 ; GENERIC-LABEL: test_movlhps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
 ; GENERIC-NEXT:    addps %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_movlhps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
 ; ATOM-NEXT:    addps %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_movlhps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
 ; SLM-NEXT:    addps %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_movlhps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
 ; SANDY-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movlhps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
 ; HASWELL-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movlhps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
 ; BROADWELL-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movlhps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movlhps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00]
 ; SKX-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movlhps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
 ; BTVER2-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movlhps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1819,70 +1819,70 @@ define <4 x float> @test_movlhps(<4 x fl
 
 define void @test_movlps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
 ; GENERIC-LABEL: test_movlps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [7:1.00]
 ; GENERIC-NEXT:    addps %xmm0, %xmm1 # sched: [3:1.00]
 ; GENERIC-NEXT:    movlps %xmm1, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_movlps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [1:1.00]
 ; ATOM-NEXT:    addps %xmm0, %xmm1 # sched: [5:5.00]
 ; ATOM-NEXT:    movlps %xmm1, (%rdi) # sched: [1:1.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_movlps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [4:1.00]
 ; SLM-NEXT:    addps %xmm0, %xmm1 # sched: [3:1.00]
 ; SLM-NEXT:    movlps %xmm1, (%rdi) # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_movlps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [7:1.00]
 ; SANDY-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmovlps %xmm0, (%rdi) # sched: [5:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movlps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [1:1.00]
 ; HASWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmovlps %xmm0, (%rdi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movlps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
 ; BROADWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmovlps %xmm0, (%rdi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movlps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
 ; SKYLAKE-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmovlps %xmm0, (%rdi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movlps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
 ; SKX-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmovlps %xmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movlps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00]
 ; BTVER2-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vmovlps %xmm0, (%rdi) # sched: [1:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movlps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovlps %xmm0, (%rdi) # sched: [1:0.50]
@@ -1899,54 +1899,54 @@ define void @test_movlps(<4 x float> %a0
 
 define i32 @test_movmskps(<4 x float> %a0) {
 ; GENERIC-LABEL: test_movmskps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movmskps %xmm0, %eax # sched: [2:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_movmskps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movmskps %xmm0, %eax # sched: [3:3.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_movmskps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movmskps %xmm0, %eax # sched: [1:0.50]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_movmskps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovmskps %xmm0, %eax # sched: [2:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movmskps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovmskps %xmm0, %eax # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movmskps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovmskps %xmm0, %eax # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movmskps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovmskps %xmm0, %eax # sched: [2:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movmskps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovmskps %xmm0, %eax # sched: [2:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movmskps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovmskps %xmm0, %eax # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movmskps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovmskps %xmm0, %eax # sched: [1:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0)
@@ -1956,12 +1956,12 @@ declare i32 @llvm.x86.sse.movmsk.ps(<4 x
 
 define void @test_movntps(<4 x float> %a0, <4 x float> *%a1) {
 ; GENERIC-LABEL: test_movntps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movntps %xmm0, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_movntps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movntps %xmm0, (%rdi) # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -1972,42 +1972,42 @@ define void @test_movntps(<4 x float> %a
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_movntps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movntps %xmm0, (%rdi) # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_movntps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovntps %xmm0, (%rdi) # sched: [5:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movntps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovntps %xmm0, (%rdi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movntps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovntps %xmm0, (%rdi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movntps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovntps %xmm0, (%rdi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movntps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovntps %xmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movntps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovntps %xmm0, (%rdi) # sched: [1:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movntps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovntps %xmm0, (%rdi) # sched: [1:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   store <4 x float> %a0, <4 x float> *%a1, align 16, !nontemporal !0
@@ -2016,70 +2016,70 @@ define void @test_movntps(<4 x float> %a
 
 define void @test_movss_mem(float* %a0, float* %a1) {
 ; GENERIC-LABEL: test_movss_mem:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
 ; GENERIC-NEXT:    addss %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    movss %xmm0, (%rsi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_movss_mem:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [1:1.00]
 ; ATOM-NEXT:    addss %xmm0, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    movss %xmm0, (%rsi) # sched: [1:1.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_movss_mem:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [3:1.00]
 ; SLM-NEXT:    addss %xmm0, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    movss %xmm0, (%rsi) # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_movss_mem:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
 ; SANDY-NEXT:    vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmovss %xmm0, (%rsi) # sched: [5:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movss_mem:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [1:0.50]
 ; HASWELL-NEXT:    vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmovss %xmm0, (%rsi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movss_mem:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
 ; BROADWELL-NEXT:    vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmovss %xmm0, (%rsi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movss_mem:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKYLAKE-NEXT:    vaddss %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmovss %xmm0, (%rsi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movss_mem:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKX-NEXT:    vaddss %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmovss %xmm0, (%rsi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movss_mem:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:1.00]
 ; BTVER2-NEXT:    vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vmovss %xmm0, (%rsi) # sched: [1:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movss_mem:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovss %xmm0, (%rsi) # sched: [1:0.50]
@@ -2092,12 +2092,12 @@ define void @test_movss_mem(float* %a0,
 
 define <4 x float> @test_movss_reg(<4 x float> %a0, <4 x float> %a1) {
 ; GENERIC-LABEL: test_movss_reg:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_movss_reg:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -2108,42 +2108,42 @@ define <4 x float> @test_movss_reg(<4 x
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_movss_reg:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_movss_reg:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movss_reg:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.33]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movss_reg:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.33]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movss_reg:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.33]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movss_reg:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movss_reg:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movss_reg:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
@@ -2152,70 +2152,70 @@ define <4 x float> @test_movss_reg(<4 x
 
 define void @test_movups(<4 x float> *%a0, <4 x float> *%a1) {
 ; GENERIC-LABEL: test_movups:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movups (%rdi), %xmm0 # sched: [6:0.50]
 ; GENERIC-NEXT:    addps %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    movups %xmm0, (%rsi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_movups:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movups (%rdi), %xmm0 # sched: [3:1.50]
 ; ATOM-NEXT:    addps %xmm0, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    movups %xmm0, (%rsi) # sched: [2:1.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_movups:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movups (%rdi), %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    addps %xmm0, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    movups %xmm0, (%rsi) # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_movups:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovups (%rdi), %xmm0 # sched: [6:0.50]
 ; SANDY-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmovups %xmm0, (%rsi) # sched: [5:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movups:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovups (%rdi), %xmm0 # sched: [1:0.50]
 ; HASWELL-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmovups %xmm0, (%rsi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movups:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovups (%rdi), %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmovups %xmm0, (%rsi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movups:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovups (%rdi), %xmm0 # sched: [6:0.50]
 ; SKYLAKE-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmovups %xmm0, (%rsi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movups:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %xmm0 # sched: [6:0.50]
 ; SKX-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmovups %xmm0, (%rsi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movups:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovups (%rdi), %xmm0 # sched: [5:1.00]
 ; BTVER2-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vmovups %xmm0, (%rsi) # sched: [1:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movups:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovups (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovups %xmm0, (%rsi) # sched: [1:0.50]
@@ -2228,61 +2228,61 @@ define void @test_movups(<4 x float> *%a
 
 define <4 x float> @test_mulps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_mulps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    mulps %xmm1, %xmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    mulps (%rdi), %xmm0 # sched: [11:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_mulps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    mulps %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    mulps (%rdi), %xmm0 # sched: [10:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_mulps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    mulps %xmm1, %xmm0 # sched: [5:2.00]
 ; SLM-NEXT:    mulps (%rdi), %xmm0 # sched: [8:2.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_mulps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
 ; SANDY-NEXT:    vmulps (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_mulps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmulps %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vmulps (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_mulps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmulps %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
 ; BROADWELL-NEXT:    vmulps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_mulps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmulps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmulps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_mulps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmulps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_mulps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
 ; BTVER2-NEXT:    vmulps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_mulps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmulps %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
 ; ZNVER1-NEXT:    vmulps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2294,61 +2294,61 @@ define <4 x float> @test_mulps(<4 x floa
 
 define float @test_mulss(float %a0, float %a1, float *%a2) {
 ; GENERIC-LABEL: test_mulss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    mulss %xmm1, %xmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    mulss (%rdi), %xmm0 # sched: [11:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_mulss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    mulss %xmm1, %xmm0 # sched: [4:4.00]
 ; ATOM-NEXT:    mulss (%rdi), %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_mulss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    mulss %xmm1, %xmm0 # sched: [5:2.00]
 ; SLM-NEXT:    mulss (%rdi), %xmm0 # sched: [8:2.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_mulss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
 ; SANDY-NEXT:    vmulss (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_mulss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmulss %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vmulss (%rdi), %xmm0, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_mulss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmulss %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
 ; BROADWELL-NEXT:    vmulss (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_mulss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmulss %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmulss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_mulss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmulss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_mulss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
 ; BTVER2-NEXT:    vmulss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_mulss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmulss %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
 ; ZNVER1-NEXT:    vmulss (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2360,13 +2360,13 @@ define float @test_mulss(float %a0, floa
 
 define <4 x float> @test_orps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_orps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    orps %xmm1, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    orps (%rdi), %xmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_orps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    orps %xmm1, %xmm0 # sched: [1:0.50]
 ; ATOM-NEXT:    orps (%rdi), %xmm0 # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -2376,49 +2376,49 @@ define <4 x float> @test_orps(<4 x float
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_orps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    orps %xmm1, %xmm0 # sched: [1:0.50]
 ; SLM-NEXT:    orps (%rdi), %xmm0 # sched: [4:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_orps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vorps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_orps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vorps (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_orps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_orps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vorps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_orps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKX-NEXT:    vorps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_orps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_orps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2434,12 +2434,12 @@ define <4 x float> @test_orps(<4 x float
 
 define void @test_prefetchnta(i8* %a0) {
 ; GENERIC-LABEL: test_prefetchnta:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    prefetchnta (%rdi) # sched: [5:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_prefetchnta:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    prefetchnta (%rdi) # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -2450,42 +2450,42 @@ define void @test_prefetchnta(i8* %a0) {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_prefetchnta:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    prefetchnta (%rdi) # sched: [3:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_prefetchnta:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    prefetchnta (%rdi) # sched: [5:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_prefetchnta:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    prefetchnta (%rdi) # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_prefetchnta:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    prefetchnta (%rdi) # sched: [5:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_prefetchnta:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    prefetchnta (%rdi) # sched: [5:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_prefetchnta:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    prefetchnta (%rdi) # sched: [5:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_prefetchnta:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    prefetchnta (%rdi) # sched: [5:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_prefetchnta:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    prefetchnta (%rdi) # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1)
@@ -2495,14 +2495,14 @@ declare void @llvm.prefetch(i8* nocaptur
 
 define <4 x float> @test_rcpps(<4 x float> %a0, <4 x float> *%a1) {
 ; GENERIC-LABEL: test_rcpps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    rcpps %xmm0, %xmm1 # sched: [5:1.00]
 ; GENERIC-NEXT:    rcpps (%rdi), %xmm0 # sched: [11:1.00]
 ; GENERIC-NEXT:    addps %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_rcpps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    rcpps (%rdi), %xmm1 # sched: [10:5.00]
 ; ATOM-NEXT:    rcpps %xmm0, %xmm0 # sched: [9:4.50]
 ; ATOM-NEXT:    addps %xmm0, %xmm1 # sched: [5:5.00]
@@ -2510,7 +2510,7 @@ define <4 x float> @test_rcpps(<4 x floa
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_rcpps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    rcpps (%rdi), %xmm1 # sched: [8:1.00]
 ; SLM-NEXT:    rcpps %xmm0, %xmm0 # sched: [5:1.00]
 ; SLM-NEXT:    addps %xmm0, %xmm1 # sched: [3:1.00]
@@ -2518,49 +2518,49 @@ define <4 x float> @test_rcpps(<4 x floa
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_rcpps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vrcpps %xmm0, %xmm0 # sched: [5:1.00]
 ; SANDY-NEXT:    vrcpps (%rdi), %xmm1 # sched: [11:1.00]
 ; SANDY-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_rcpps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpps %xmm0, %xmm0 # sched: [5:1.00]
 ; HASWELL-NEXT:    vrcpps (%rdi), %xmm1 # sched: [5:1.00]
 ; HASWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_rcpps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vrcpps %xmm0, %xmm0 # sched: [5:1.00]
 ; BROADWELL-NEXT:    vrcpps (%rdi), %xmm1 # sched: [10:1.00]
 ; BROADWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_rcpps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vrcpps %xmm0, %xmm0 # sched: [4:1.00]
 ; SKYLAKE-NEXT:    vrcpps (%rdi), %xmm1 # sched: [10:1.00]
 ; SKYLAKE-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_rcpps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpps %xmm0, %xmm0 # sched: [4:1.00]
 ; SKX-NEXT:    vrcpps (%rdi), %xmm1 # sched: [10:1.00]
 ; SKX-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_rcpps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vrcpps (%rdi), %xmm1 # sched: [7:1.00]
 ; BTVER2-NEXT:    vrcpps %xmm0, %xmm0 # sched: [2:1.00]
 ; BTVER2-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_rcpps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vrcpps (%rdi), %xmm1 # sched: [12:0.50]
 ; ZNVER1-NEXT:    vrcpps %xmm0, %xmm0 # sched: [5:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -2577,7 +2577,7 @@ declare <4 x float> @llvm.x86.sse.rcp.ps
 
 define <4 x float> @test_rcpss(float %a0, float *%a1) {
 ; GENERIC-LABEL: test_rcpss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    rcpss %xmm0, %xmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
 ; GENERIC-NEXT:    rcpss %xmm1, %xmm1 # sched: [5:1.00]
@@ -2585,7 +2585,7 @@ define <4 x float> @test_rcpss(float %a0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_rcpss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:1.00]
 ; ATOM-NEXT:    rcpss %xmm0, %xmm0
 ; ATOM-NEXT:    rcpss %xmm1, %xmm1
@@ -2593,7 +2593,7 @@ define <4 x float> @test_rcpss(float %a0
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_rcpss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [3:1.00]
 ; SLM-NEXT:    rcpss %xmm0, %xmm0 # sched: [8:1.00]
 ; SLM-NEXT:    rcpss %xmm1, %xmm1 # sched: [8:1.00]
@@ -2601,7 +2601,7 @@ define <4 x float> @test_rcpss(float %a0
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_rcpss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
 ; SANDY-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
 ; SANDY-NEXT:    vrcpss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2609,7 +2609,7 @@ define <4 x float> @test_rcpss(float %a0
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_rcpss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
 ; HASWELL-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:0.50]
 ; HASWELL-NEXT:    vrcpss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2617,7 +2617,7 @@ define <4 x float> @test_rcpss(float %a0
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_rcpss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
 ; BROADWELL-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
 ; BROADWELL-NEXT:    vrcpss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2625,7 +2625,7 @@ define <4 x float> @test_rcpss(float %a0
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_rcpss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vrcpss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
 ; SKYLAKE-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKYLAKE-NEXT:    vrcpss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -2633,7 +2633,7 @@ define <4 x float> @test_rcpss(float %a0
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_rcpss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
 ; SKX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKX-NEXT:    vrcpss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -2641,7 +2641,7 @@ define <4 x float> @test_rcpss(float %a0
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_rcpss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:1.00]
 ; BTVER2-NEXT:    vrcpss %xmm0, %xmm0, %xmm0 # sched: [7:1.00]
 ; BTVER2-NEXT:    vrcpss %xmm1, %xmm1, %xmm1 # sched: [7:1.00]
@@ -2649,7 +2649,7 @@ define <4 x float> @test_rcpss(float %a0
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_rcpss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vrcpss %xmm0, %xmm0, %xmm0 # sched: [12:0.50]
 ; ZNVER1-NEXT:    vrcpss %xmm1, %xmm1, %xmm1 # sched: [12:0.50]
@@ -2667,14 +2667,14 @@ declare <4 x float> @llvm.x86.sse.rcp.ss
 
 define <4 x float> @test_rsqrtps(<4 x float> %a0, <4 x float> *%a1) {
 ; GENERIC-LABEL: test_rsqrtps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    rsqrtps %xmm0, %xmm1 # sched: [5:1.00]
 ; GENERIC-NEXT:    rsqrtps (%rdi), %xmm0 # sched: [11:1.00]
 ; GENERIC-NEXT:    addps %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_rsqrtps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    rsqrtps (%rdi), %xmm1 # sched: [10:5.00]
 ; ATOM-NEXT:    rsqrtps %xmm0, %xmm0 # sched: [9:4.50]
 ; ATOM-NEXT:    addps %xmm0, %xmm1 # sched: [5:5.00]
@@ -2682,7 +2682,7 @@ define <4 x float> @test_rsqrtps(<4 x fl
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_rsqrtps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    rsqrtps (%rdi), %xmm1 # sched: [8:1.00]
 ; SLM-NEXT:    rsqrtps %xmm0, %xmm0 # sched: [5:1.00]
 ; SLM-NEXT:    addps %xmm0, %xmm1 # sched: [3:1.00]
@@ -2690,49 +2690,49 @@ define <4 x float> @test_rsqrtps(<4 x fl
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_rsqrtps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vrsqrtps %xmm0, %xmm0 # sched: [5:1.00]
 ; SANDY-NEXT:    vrsqrtps (%rdi), %xmm1 # sched: [11:1.00]
 ; SANDY-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_rsqrtps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrsqrtps %xmm0, %xmm0 # sched: [5:1.00]
 ; HASWELL-NEXT:    vrsqrtps (%rdi), %xmm1 # sched: [5:1.00]
 ; HASWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_rsqrtps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vrsqrtps %xmm0, %xmm0 # sched: [5:1.00]
 ; BROADWELL-NEXT:    vrsqrtps (%rdi), %xmm1 # sched: [10:1.00]
 ; BROADWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_rsqrtps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vrsqrtps %xmm0, %xmm0 # sched: [4:1.00]
 ; SKYLAKE-NEXT:    vrsqrtps (%rdi), %xmm1 # sched: [10:1.00]
 ; SKYLAKE-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_rsqrtps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vrsqrtps %xmm0, %xmm0 # sched: [4:1.00]
 ; SKX-NEXT:    vrsqrtps (%rdi), %xmm1 # sched: [10:1.00]
 ; SKX-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_rsqrtps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vrsqrtps (%rdi), %xmm1 # sched: [7:1.00]
 ; BTVER2-NEXT:    vrsqrtps %xmm0, %xmm0 # sched: [2:1.00]
 ; BTVER2-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_rsqrtps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vrsqrtps (%rdi), %xmm1 # sched: [12:0.50]
 ; ZNVER1-NEXT:    vrsqrtps %xmm0, %xmm0 # sched: [5:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -2749,7 +2749,7 @@ declare <4 x float> @llvm.x86.sse.rsqrt.
 
 define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
 ; GENERIC-LABEL: test_rsqrtss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    rsqrtss %xmm0, %xmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
 ; GENERIC-NEXT:    rsqrtss %xmm1, %xmm1 # sched: [5:1.00]
@@ -2757,7 +2757,7 @@ define <4 x float> @test_rsqrtss(float %
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_rsqrtss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:1.00]
 ; ATOM-NEXT:    rsqrtss %xmm0, %xmm0
 ; ATOM-NEXT:    rsqrtss %xmm1, %xmm1
@@ -2765,7 +2765,7 @@ define <4 x float> @test_rsqrtss(float %
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_rsqrtss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [3:1.00]
 ; SLM-NEXT:    rsqrtss %xmm0, %xmm0 # sched: [8:1.00]
 ; SLM-NEXT:    rsqrtss %xmm1, %xmm1 # sched: [8:1.00]
@@ -2773,7 +2773,7 @@ define <4 x float> @test_rsqrtss(float %
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_rsqrtss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
 ; SANDY-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [6:0.50]
 ; SANDY-NEXT:    vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2781,7 +2781,7 @@ define <4 x float> @test_rsqrtss(float %
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_rsqrtss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
 ; HASWELL-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [1:0.50]
 ; HASWELL-NEXT:    vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2789,7 +2789,7 @@ define <4 x float> @test_rsqrtss(float %
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_rsqrtss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
 ; BROADWELL-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
 ; BROADWELL-NEXT:    vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
@@ -2797,7 +2797,7 @@ define <4 x float> @test_rsqrtss(float %
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_rsqrtss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
 ; SKYLAKE-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKYLAKE-NEXT:    vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -2805,7 +2805,7 @@ define <4 x float> @test_rsqrtss(float %
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_rsqrtss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
 ; SKX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKX-NEXT:    vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
@@ -2813,7 +2813,7 @@ define <4 x float> @test_rsqrtss(float %
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_rsqrtss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:1.00]
 ; BTVER2-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [7:1.00]
 ; BTVER2-NEXT:    vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [7:1.00]
@@ -2821,7 +2821,7 @@ define <4 x float> @test_rsqrtss(float %
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_rsqrtss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:0.50]
 ; ZNVER1-NEXT:    vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:0.50]
@@ -2839,12 +2839,12 @@ declare <4 x float> @llvm.x86.sse.rsqrt.
 
 define void @test_sfence() {
 ; GENERIC-LABEL: test_sfence:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    sfence # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_sfence:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    sfence # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -2855,42 +2855,42 @@ define void @test_sfence() {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_sfence:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    sfence # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_sfence:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    sfence # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_sfence:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    sfence # sched: [1:0.33]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_sfence:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    sfence # sched: [2:0.33]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_sfence:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    sfence # sched: [2:0.33]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_sfence:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    sfence # sched: [2:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_sfence:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    sfence # sched: [1:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_sfence:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    sfence # sched: [1:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   call void @llvm.x86.sse.sfence()
@@ -2900,13 +2900,13 @@ declare void @llvm.x86.sse.sfence() noun
 
 define <4 x float> @test_shufps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) nounwind {
 ; GENERIC-LABEL: test_shufps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
 ; GENERIC-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_shufps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
 ; ATOM-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -2916,49 +2916,49 @@ define <4 x float> @test_shufps(<4 x flo
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_shufps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
 ; SLM-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [4:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_shufps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
 ; SANDY-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_shufps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
 ; HASWELL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_shufps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
 ; BROADWELL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [6:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_shufps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [7:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_shufps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:1.00]
 ; SKX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_shufps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50]
 ; BTVER2-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_shufps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50]
 ; ZNVER1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2970,21 +2970,21 @@ define <4 x float> @test_shufps(<4 x flo
 
 define <4 x float> @test_sqrtps(<4 x float> %a0, <4 x float> *%a1) {
 ; GENERIC-LABEL: test_sqrtps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    sqrtps %xmm0, %xmm1 # sched: [14:1.00]
 ; GENERIC-NEXT:    sqrtps (%rdi), %xmm0 # sched: [20:1.00]
 ; GENERIC-NEXT:    addps %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_sqrtps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    sqrtps %xmm0, %xmm1 # sched: [70:35.00]
 ; ATOM-NEXT:    sqrtps (%rdi), %xmm0 # sched: [70:35.00]
 ; ATOM-NEXT:    addps %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_sqrtps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    sqrtps (%rdi), %xmm1 # sched: [18:1.00]
 ; SLM-NEXT:    sqrtps %xmm0, %xmm0 # sched: [15:1.00]
 ; SLM-NEXT:    addps %xmm0, %xmm1 # sched: [3:1.00]
@@ -2992,49 +2992,49 @@ define <4 x float> @test_sqrtps(<4 x flo
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_sqrtps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vsqrtps %xmm0, %xmm0 # sched: [14:1.00]
 ; SANDY-NEXT:    vsqrtps (%rdi), %xmm1 # sched: [20:1.00]
 ; SANDY-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_sqrtps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vsqrtps %xmm0, %xmm0 # sched: [14:1.00]
 ; HASWELL-NEXT:    vsqrtps (%rdi), %xmm1 # sched: [14:1.00]
 ; HASWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_sqrtps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vsqrtps %xmm0, %xmm0 # sched: [14:1.00]
 ; BROADWELL-NEXT:    vsqrtps (%rdi), %xmm1 # sched: [19:1.00]
 ; BROADWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_sqrtps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vsqrtps %xmm0, %xmm0 # sched: [12:1.00]
 ; SKYLAKE-NEXT:    vsqrtps (%rdi), %xmm1 # sched: [18:1.00]
 ; SKYLAKE-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_sqrtps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vsqrtps %xmm0, %xmm0 # sched: [12:1.00]
 ; SKX-NEXT:    vsqrtps (%rdi), %xmm1 # sched: [18:1.00]
 ; SKX-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_sqrtps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vsqrtps (%rdi), %xmm1 # sched: [26:21.00]
 ; BTVER2-NEXT:    vsqrtps %xmm0, %xmm0 # sched: [21:21.00]
 ; BTVER2-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_sqrtps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vsqrtps (%rdi), %xmm1 # sched: [27:1.00]
 ; ZNVER1-NEXT:    vsqrtps %xmm0, %xmm0 # sched: [20:1.00]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -3051,7 +3051,7 @@ declare <4 x float> @llvm.x86.sse.sqrt.p
 
 define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
 ; GENERIC-LABEL: test_sqrtss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    sqrtss %xmm0, %xmm0 # sched: [14:1.00]
 ; GENERIC-NEXT:    movaps (%rdi), %xmm1 # sched: [6:0.50]
 ; GENERIC-NEXT:    sqrtss %xmm1, %xmm1 # sched: [14:1.00]
@@ -3059,7 +3059,7 @@ define <4 x float> @test_sqrtss(<4 x flo
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_sqrtss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    movaps (%rdi), %xmm1 # sched: [1:1.00]
 ; ATOM-NEXT:    sqrtss %xmm0, %xmm0
 ; ATOM-NEXT:    sqrtss %xmm1, %xmm1
@@ -3067,7 +3067,7 @@ define <4 x float> @test_sqrtss(<4 x flo
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_sqrtss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    movaps (%rdi), %xmm1 # sched: [3:1.00]
 ; SLM-NEXT:    sqrtss %xmm0, %xmm0 # sched: [18:1.00]
 ; SLM-NEXT:    sqrtss %xmm1, %xmm1 # sched: [18:1.00]
@@ -3075,7 +3075,7 @@ define <4 x float> @test_sqrtss(<4 x flo
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_sqrtss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [114:1.00]
 ; SANDY-NEXT:    vmovaps (%rdi), %xmm1 # sched: [6:0.50]
 ; SANDY-NEXT:    vsqrtss %xmm1, %xmm1, %xmm1 # sched: [114:1.00]
@@ -3083,7 +3083,7 @@ define <4 x float> @test_sqrtss(<4 x flo
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_sqrtss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [14:1.00]
 ; HASWELL-NEXT:    vmovaps (%rdi), %xmm1 # sched: [1:0.50]
 ; HASWELL-NEXT:    vsqrtss %xmm1, %xmm1, %xmm1 # sched: [14:1.00]
@@ -3091,7 +3091,7 @@ define <4 x float> @test_sqrtss(<4 x flo
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_sqrtss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [14:1.00]
 ; BROADWELL-NEXT:    vmovaps (%rdi), %xmm1 # sched: [5:0.50]
 ; BROADWELL-NEXT:    vsqrtss %xmm1, %xmm1, %xmm1 # sched: [14:1.00]
@@ -3099,7 +3099,7 @@ define <4 x float> @test_sqrtss(<4 x flo
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_sqrtss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:1.00]
 ; SKYLAKE-NEXT:    vmovaps (%rdi), %xmm1 # sched: [6:0.50]
 ; SKYLAKE-NEXT:    vsqrtss %xmm1, %xmm1, %xmm1 # sched: [12:1.00]
@@ -3107,7 +3107,7 @@ define <4 x float> @test_sqrtss(<4 x flo
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_sqrtss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:1.00]
 ; SKX-NEXT:    vmovaps (%rdi), %xmm1 # sched: [6:0.50]
 ; SKX-NEXT:    vsqrtss %xmm1, %xmm1, %xmm1 # sched: [12:1.00]
@@ -3115,7 +3115,7 @@ define <4 x float> @test_sqrtss(<4 x flo
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_sqrtss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovaps (%rdi), %xmm1 # sched: [5:1.00]
 ; BTVER2-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [26:21.00]
 ; BTVER2-NEXT:    vsqrtss %xmm1, %xmm1, %xmm1 # sched: [26:21.00]
@@ -3123,7 +3123,7 @@ define <4 x float> @test_sqrtss(<4 x flo
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_sqrtss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovaps (%rdi), %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [27:1.00]
 ; ZNVER1-NEXT:    vsqrtss %xmm1, %xmm1, %xmm1 # sched: [27:1.00]
@@ -3139,61 +3139,61 @@ declare <4 x float> @llvm.x86.sse.sqrt.s
 
 define i32 @test_stmxcsr() {
 ; GENERIC-LABEL: test_stmxcsr:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    stmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
 ; GENERIC-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # sched: [5:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_stmxcsr:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    stmxcsr -{{[0-9]+}}(%rsp) # sched: [15:7.50]
 ; ATOM-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # sched: [1:1.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_stmxcsr:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    stmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00]
 ; SLM-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # sched: [3:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_stmxcsr:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vstmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
 ; SANDY-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # sched: [5:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_stmxcsr:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00]
 ; HASWELL-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_stmxcsr:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vstmxcsr -{{[0-9]+}}(%rsp) # sched: [2:1.00]
 ; BROADWELL-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # sched: [5:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_stmxcsr:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vstmxcsr -{{[0-9]+}}(%rsp) # sched: [2:1.00]
 ; SKYLAKE-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # sched: [5:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_stmxcsr:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vstmxcsr -{{[0-9]+}}(%rsp) # sched: [2:1.00]
 ; SKX-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # sched: [5:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_stmxcsr:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00]
 ; BTVER2-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # sched: [5:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_stmxcsr:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vstmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?]
 ; ZNVER1-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -3207,61 +3207,61 @@ declare void @llvm.x86.sse.stmxcsr(i8*)
 
 define <4 x float> @test_subps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_subps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    subps %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    subps (%rdi), %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_subps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    subps %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    subps (%rdi), %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_subps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    subps %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    subps (%rdi), %xmm0 # sched: [6:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_subps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vsubps (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_subps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vsubps (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_subps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_subps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vsubps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vsubps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_subps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vsubps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vsubps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_subps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_subps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -3273,61 +3273,61 @@ define <4 x float> @test_subps(<4 x floa
 
 define float @test_subss(float %a0, float %a1, float *%a2) {
 ; GENERIC-LABEL: test_subss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    subss %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    subss (%rdi), %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_subss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    subss %xmm1, %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    subss (%rdi), %xmm0 # sched: [5:5.00]
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_subss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    subss %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    subss (%rdi), %xmm0 # sched: [6:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_subss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vsubss (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_subss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vsubss (%rdi), %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_subss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vsubss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_subss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vsubss %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vsubss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_subss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vsubss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vsubss (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_subss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vsubss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_subss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vsubss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -3339,7 +3339,7 @@ define float @test_subss(float %a0, floa
 
 define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_ucomiss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    ucomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    setnp %al # sched: [1:0.50]
 ; GENERIC-NEXT:    sete %cl # sched: [1:0.50]
@@ -3353,7 +3353,7 @@ define i32 @test_ucomiss(<4 x float> %a0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_ucomiss:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    ucomiss %xmm1, %xmm0 # sched: [9:4.50]
 ; ATOM-NEXT:    setnp %al # sched: [1:0.50]
 ; ATOM-NEXT:    sete %cl # sched: [1:0.50]
@@ -3367,7 +3367,7 @@ define i32 @test_ucomiss(<4 x float> %a0
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_ucomiss:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    ucomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; SLM-NEXT:    setnp %al # sched: [1:0.50]
 ; SLM-NEXT:    sete %cl # sched: [1:0.50]
@@ -3381,7 +3381,7 @@ define i32 @test_ucomiss(<4 x float> %a0
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_ucomiss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vucomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    setnp %al # sched: [1:0.50]
 ; SANDY-NEXT:    sete %cl # sched: [1:0.50]
@@ -3395,7 +3395,7 @@ define i32 @test_ucomiss(<4 x float> %a0
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_ucomiss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vucomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    setnp %al # sched: [1:0.50]
 ; HASWELL-NEXT:    sete %cl # sched: [1:0.50]
@@ -3409,7 +3409,7 @@ define i32 @test_ucomiss(<4 x float> %a0
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_ucomiss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vucomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    setnp %al # sched: [1:0.50]
 ; BROADWELL-NEXT:    sete %cl # sched: [1:0.50]
@@ -3423,7 +3423,7 @@ define i32 @test_ucomiss(<4 x float> %a0
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_ucomiss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vucomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; SKYLAKE-NEXT:    setnp %al # sched: [1:0.50]
 ; SKYLAKE-NEXT:    sete %cl # sched: [1:0.50]
@@ -3437,7 +3437,7 @@ define i32 @test_ucomiss(<4 x float> %a0
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_ucomiss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vucomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; SKX-NEXT:    setnp %al # sched: [1:0.50]
 ; SKX-NEXT:    sete %cl # sched: [1:0.50]
@@ -3451,7 +3451,7 @@ define i32 @test_ucomiss(<4 x float> %a0
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_ucomiss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vucomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    setnp %al # sched: [1:0.50]
 ; BTVER2-NEXT:    sete %cl # sched: [1:0.50]
@@ -3465,7 +3465,7 @@ define i32 @test_ucomiss(<4 x float> %a0
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_ucomiss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vucomiss %xmm1, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    setnp %al # sched: [1:0.25]
 ; ZNVER1-NEXT:    sete %cl # sched: [1:0.25]
@@ -3487,13 +3487,13 @@ declare i32 @llvm.x86.sse.ucomieq.ss(<4
 
 define <4 x float> @test_unpckhps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_unpckhps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
 ; GENERIC-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_unpckhps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
 ; ATOM-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -3503,49 +3503,49 @@ define <4 x float> @test_unpckhps(<4 x f
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_unpckhps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
 ; SLM-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [4:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_unpckhps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
 ; SANDY-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_unpckhps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
 ; HASWELL-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_unpckhps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
 ; BROADWELL-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_unpckhps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_unpckhps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00]
 ; SKX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_unpckhps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
 ; BTVER2-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_unpckhps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -3557,13 +3557,13 @@ define <4 x float> @test_unpckhps(<4 x f
 
 define <4 x float> @test_unpcklps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_unpcklps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
 ; GENERIC-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_unpcklps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
 ; ATOM-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -3573,49 +3573,49 @@ define <4 x float> @test_unpcklps(<4 x f
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_unpcklps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
 ; SLM-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [4:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_unpcklps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
 ; SANDY-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_unpcklps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
 ; HASWELL-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_unpcklps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
 ; BROADWELL-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [6:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_unpcklps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_unpcklps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00]
 ; SKX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_unpcklps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
 ; BTVER2-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_unpcklps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -3627,13 +3627,13 @@ define <4 x float> @test_unpcklps(<4 x f
 
 define <4 x float> @test_xorps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_xorps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    xorps %xmm1, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    xorps (%rdi), %xmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_xorps:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    xorps %xmm1, %xmm0 # sched: [1:0.50]
 ; ATOM-NEXT:    xorps (%rdi), %xmm0 # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -3643,49 +3643,49 @@ define <4 x float> @test_xorps(<4 x floa
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_xorps:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    xorps %xmm1, %xmm0 # sched: [1:0.50]
 ; SLM-NEXT:    xorps (%rdi), %xmm0 # sched: [4:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_xorps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vxorps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_xorps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vxorps (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_xorps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vxorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_xorps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vxorps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_xorps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKX-NEXT:    vxorps (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_xorps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vxorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_xorps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vxorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]

Modified: llvm/trunk/test/CodeGen/X86/sse1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse1.ll Mon Dec  4 09:18:51 2017
@@ -14,7 +14,7 @@
 ; rdar://8368414
 define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind {
 ; X32-LABEL: test4:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movaps %xmm0, %xmm2
 ; X32-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
 ; X32-NEXT:    addss %xmm1, %xmm0
@@ -24,7 +24,7 @@ define <2 x float> @test4(<2 x float> %A
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movaps %xmm0, %xmm2
 ; X64-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
 ; X64-NEXT:    addss %xmm1, %xmm0
@@ -52,11 +52,11 @@ entry:
 
 define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
 ; X32-LABEL: vselect:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    cmpl $0, {{[0-9]+}}(%esp)
 ; X32-NEXT:    xorps %xmm0, %xmm0
 ; X32-NEXT:    je .LBB1_1
-; X32-NEXT:  # BB#2: # %entry
+; X32-NEXT:  # %bb.2: # %entry
 ; X32-NEXT:    xorps %xmm1, %xmm1
 ; X32-NEXT:    cmpl $0, {{[0-9]+}}(%esp)
 ; X32-NEXT:    jne .LBB1_5
@@ -91,11 +91,11 @@ define <4 x float> @vselect(<4 x float>*
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: vselect:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    testl %edx, %edx
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    je .LBB1_1
-; X64-NEXT:  # BB#2: # %entry
+; X64-NEXT:  # %bb.2: # %entry
 ; X64-NEXT:    xorps %xmm1, %xmm1
 ; X64-NEXT:    testl %ecx, %ecx
 ; X64-NEXT:    jne .LBB1_5
@@ -138,12 +138,12 @@ entry:
 
 define <4 x float> @PR28044(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: PR28044:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpeqps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: PR28044:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpeqps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %cmp = fcmp oeq <4 x float> %a0, %a1
@@ -157,7 +157,7 @@ define <4 x float> @PR28044(<4 x float>
 
 define <4 x i32> @PR30512(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X32-LABEL: PR30512:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    pushl %edi
 ; X32-NEXT:    pushl %esi
@@ -203,7 +203,7 @@ define <4 x i32> @PR30512(<4 x i32> %x,
 ; X32-NEXT:    retl $4
 ;
 ; X64-LABEL: PR30512:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpl {{[0-9]+}}(%rsp), %r8d
 ; X64-NEXT:    sete %al
@@ -251,12 +251,12 @@ define <4 x i32> @PR30512(<4 x i32> %x,
 
 define <2 x float> @PR31672() #0 {
 ; X32-LABEL: PR31672:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    sqrtps {{\.LCPI.*}}, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: PR31672:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    sqrtps {{.*}}(%rip), %xmm0
 ; X64-NEXT:    retq
   %t0 = call fast <2 x float> @llvm.sqrt.v2f32(<2 x float> <float 42.0, float 3.0>)

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define i64 @test_mm_cvtsd_si64(<2 x double> %a0) nounwind {
 ; X64-LABEL: test_mm_cvtsd_si64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtsd2si %xmm0, %rax
 ; X64-NEXT:    retq
   %res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0)
@@ -15,7 +15,7 @@ declare i64 @llvm.x86.sse2.cvtsd2si64(<2
 
 define i64 @test_mm_cvtsi128_si64(<2 x i64> %a0) nounwind {
 ; X64-LABEL: test_mm_cvtsi128_si64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %xmm0, %rax
 ; X64-NEXT:    retq
   %res = extractelement <2 x i64> %a0, i32 0
@@ -24,7 +24,7 @@ define i64 @test_mm_cvtsi128_si64(<2 x i
 
 define <2 x double> @test_mm_cvtsi64_sd(<2 x double> %a0, i64 %a1) nounwind {
 ; X64-LABEL: test_mm_cvtsi64_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtsi2sdq %rdi, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1)
@@ -34,7 +34,7 @@ declare <2 x double> @llvm.x86.sse2.cvts
 
 define <2 x i64> @test_mm_cvtsi64_si128(i64 %a0) nounwind {
 ; X64-LABEL: test_mm_cvtsi64_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdi, %xmm0
 ; X64-NEXT:    retq
   %res0 = insertelement <2 x i64> undef, i64 %a0, i32 0
@@ -44,7 +44,7 @@ define <2 x i64> @test_mm_cvtsi64_si128(
 
 define i64 @test_mm_cvttsd_si64(<2 x double> %a0) nounwind {
 ; X64-LABEL: test_mm_cvttsd_si64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttsd2si %xmm0, %rax
 ; X64-NEXT:    retq
   %res = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0)
@@ -54,7 +54,7 @@ declare i64 @llvm.x86.sse2.cvttsd2si64(<
 
 define <2 x i64> @test_mm_loadu_si64(i64* %a0) nounwind {
 ; X64-LABEL: test_mm_loadu_si64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    retq
   %ld = load i64, i64* %a0, align 1
@@ -65,7 +65,7 @@ define <2 x i64> @test_mm_loadu_si64(i64
 
 define void @test_mm_stream_si64(i64 *%a0, i64 %a1) {
 ; X64-LABEL: test_mm_stream_si64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movntiq %rsi, (%rdi)
 ; X64-NEXT:    retq
   store i64 %a1, i64* %a0, align 1, !nontemporal !0

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll Mon Dec  4 09:18:51 2017
@@ -6,12 +6,12 @@
 
 define <2 x i64> @test_mm_add_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_add_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    paddb %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_add_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    paddb %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -23,12 +23,12 @@ define <2 x i64> @test_mm_add_epi8(<2 x
 
 define <2 x i64> @test_mm_add_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_add_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    paddw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_add_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    paddw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -40,12 +40,12 @@ define <2 x i64> @test_mm_add_epi16(<2 x
 
 define <2 x i64> @test_mm_add_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_add_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    paddd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_add_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    paddd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -57,12 +57,12 @@ define <2 x i64> @test_mm_add_epi32(<2 x
 
 define <2 x i64> @test_mm_add_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_add_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    paddq %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_add_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    paddq %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = add <2 x i64> %a0, %a1
@@ -71,12 +71,12 @@ define <2 x i64> @test_mm_add_epi64(<2 x
 
 define <2 x double> @test_mm_add_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_add_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    addpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_add_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    addpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = fadd <2 x double> %a0, %a1
@@ -85,12 +85,12 @@ define <2 x double> @test_mm_add_pd(<2 x
 
 define <2 x double> @test_mm_add_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_add_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    addsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_add_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    addsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %ext0 = extractelement <2 x double> %a0, i32 0
@@ -102,12 +102,12 @@ define <2 x double> @test_mm_add_sd(<2 x
 
 define <2 x i64> @test_mm_adds_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_adds_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    paddsb %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_adds_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    paddsb %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -120,12 +120,12 @@ declare <16 x i8> @llvm.x86.sse2.padds.b
 
 define <2 x i64> @test_mm_adds_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_adds_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    paddsw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_adds_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    paddsw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -138,12 +138,12 @@ declare <8 x i16> @llvm.x86.sse2.padds.w
 
 define <2 x i64> @test_mm_adds_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_adds_epu8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    paddusb %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_adds_epu8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    paddusb %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -156,12 +156,12 @@ declare <16 x i8> @llvm.x86.sse2.paddus.
 
 define <2 x i64> @test_mm_adds_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_adds_epu16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    paddusw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_adds_epu16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    paddusw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -174,12 +174,12 @@ declare <8 x i16> @llvm.x86.sse2.paddus.
 
 define <2 x double> @test_mm_and_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_and_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    andps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_and_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x double> %a0 to <4 x i32>
@@ -191,12 +191,12 @@ define <2 x double> @test_mm_and_pd(<2 x
 
 define <2 x i64> @test_mm_and_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_and_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    andps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_and_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = and <2 x i64> %a0, %a1
@@ -205,12 +205,12 @@ define <2 x i64> @test_mm_and_si128(<2 x
 
 define <2 x double> @test_mm_andnot_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_andnot_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    andnps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_andnot_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andnps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x double> %a0 to <4 x i32>
@@ -223,14 +223,14 @@ define <2 x double> @test_mm_andnot_pd(<
 
 define <2 x i64> @test_mm_andnot_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_andnot_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pcmpeqd %xmm2, %xmm2
 ; X32-NEXT:    pxor %xmm2, %xmm0
 ; X32-NEXT:    pand %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_andnot_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpeqd %xmm2, %xmm2
 ; X64-NEXT:    pxor %xmm2, %xmm0
 ; X64-NEXT:    pand %xmm1, %xmm0
@@ -242,12 +242,12 @@ define <2 x i64> @test_mm_andnot_si128(<
 
 define <2 x i64> @test_mm_avg_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_avg_epu8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pavgb %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_avg_epu8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pavgb %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -264,12 +264,12 @@ define <2 x i64> @test_mm_avg_epu8(<2 x
 
 define <2 x i64> @test_mm_avg_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_avg_epu16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pavgw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_avg_epu16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pavgw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -286,12 +286,12 @@ define <2 x i64> @test_mm_avg_epu16(<2 x
 
 define <2 x i64> @test_mm_bslli_si128(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_bslli_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_bslli_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -302,12 +302,12 @@ define <2 x i64> @test_mm_bslli_si128(<2
 
 define <2 x i64> @test_mm_bsrli_si128(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_bsrli_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_bsrli_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -318,11 +318,11 @@ define <2 x i64> @test_mm_bsrli_si128(<2
 
 define <4 x float> @test_mm_castpd_ps(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm_castpd_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_castpd_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <2 x double> %a0 to <4 x float>
   ret <4 x float> %res
@@ -330,11 +330,11 @@ define <4 x float> @test_mm_castpd_ps(<2
 
 define <2 x i64> @test_mm_castpd_si128(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm_castpd_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_castpd_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <2 x double> %a0 to <2 x i64>
   ret <2 x i64> %res
@@ -342,11 +342,11 @@ define <2 x i64> @test_mm_castpd_si128(<
 
 define <2 x double> @test_mm_castps_pd(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_castps_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_castps_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <4 x float> %a0 to <2 x double>
   ret <2 x double> %res
@@ -354,11 +354,11 @@ define <2 x double> @test_mm_castps_pd(<
 
 define <2 x i64> @test_mm_castps_si128(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_castps_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_castps_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <4 x float> %a0 to <2 x i64>
   ret <2 x i64> %res
@@ -366,11 +366,11 @@ define <2 x i64> @test_mm_castps_si128(<
 
 define <2 x double> @test_mm_castsi128_pd(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_castsi128_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_castsi128_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <2 x i64> %a0 to <2 x double>
   ret <2 x double> %res
@@ -378,11 +378,11 @@ define <2 x double> @test_mm_castsi128_p
 
 define <4 x float> @test_mm_castsi128_ps(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_castsi128_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_castsi128_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <2 x i64> %a0 to <4 x float>
   ret <4 x float> %res
@@ -390,13 +390,13 @@ define <4 x float> @test_mm_castsi128_ps
 
 define void @test_mm_clflush(i8* %a0) nounwind {
 ; X32-LABEL: test_mm_clflush:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    clflush (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_clflush:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    clflush (%rdi)
 ; X64-NEXT:    retq
   call void @llvm.x86.sse2.clflush(i8* %a0)
@@ -406,12 +406,12 @@ declare void @llvm.x86.sse2.clflush(i8*)
 
 define <2 x i64> @test_mm_cmpeq_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpeq_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pcmpeqb %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpeq_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpeqb %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -424,12 +424,12 @@ define <2 x i64> @test_mm_cmpeq_epi8(<2
 
 define <2 x i64> @test_mm_cmpeq_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpeq_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pcmpeqw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpeq_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpeqw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -442,12 +442,12 @@ define <2 x i64> @test_mm_cmpeq_epi16(<2
 
 define <2 x i64> @test_mm_cmpeq_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpeq_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pcmpeqd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpeq_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpeqd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -460,12 +460,12 @@ define <2 x i64> @test_mm_cmpeq_epi32(<2
 
 define <2 x double> @test_mm_cmpeq_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpeq_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpeqpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpeq_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpeqpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %fcmp = fcmp oeq <2 x double> %a0, %a1
@@ -476,12 +476,12 @@ define <2 x double> @test_mm_cmpeq_pd(<2
 
 define <2 x double> @test_mm_cmpeq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpeq_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpeqsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpeq_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpeqsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 0)
@@ -491,13 +491,13 @@ declare <2 x double> @llvm.x86.sse2.cmp.
 
 define <2 x double> @test_mm_cmpge_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpge_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmplepd %xmm0, %xmm1
 ; X32-NEXT:    movapd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpge_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmplepd %xmm0, %xmm1
 ; X64-NEXT:    movapd %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -509,13 +509,13 @@ define <2 x double> @test_mm_cmpge_pd(<2
 
 define <2 x double> @test_mm_cmpge_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpge_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmplesd %xmm0, %xmm1
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpge_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmplesd %xmm0, %xmm1
 ; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X64-NEXT:    retq
@@ -529,12 +529,12 @@ define <2 x double> @test_mm_cmpge_sd(<2
 
 define <2 x i64> @test_mm_cmpgt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpgt_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pcmpgtb %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpgt_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpgtb %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -547,12 +547,12 @@ define <2 x i64> @test_mm_cmpgt_epi8(<2
 
 define <2 x i64> @test_mm_cmpgt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpgt_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pcmpgtw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpgt_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpgtw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -565,12 +565,12 @@ define <2 x i64> @test_mm_cmpgt_epi16(<2
 
 define <2 x i64> @test_mm_cmpgt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpgt_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pcmpgtd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpgt_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpgtd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -583,13 +583,13 @@ define <2 x i64> @test_mm_cmpgt_epi32(<2
 
 define <2 x double> @test_mm_cmpgt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpgt_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpltpd %xmm0, %xmm1
 ; X32-NEXT:    movapd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpgt_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpltpd %xmm0, %xmm1
 ; X64-NEXT:    movapd %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -601,13 +601,13 @@ define <2 x double> @test_mm_cmpgt_pd(<2
 
 define <2 x double> @test_mm_cmpgt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpgt_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpltsd %xmm0, %xmm1
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpgt_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpltsd %xmm0, %xmm1
 ; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X64-NEXT:    retq
@@ -621,12 +621,12 @@ define <2 x double> @test_mm_cmpgt_sd(<2
 
 define <2 x double> @test_mm_cmple_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmple_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmplepd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmple_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmplepd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %fcmp = fcmp ole <2 x double> %a0, %a1
@@ -637,12 +637,12 @@ define <2 x double> @test_mm_cmple_pd(<2
 
 define <2 x double> @test_mm_cmple_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmple_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmplesd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmple_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmplesd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 2)
@@ -651,13 +651,13 @@ define <2 x double> @test_mm_cmple_sd(<2
 
 define <2 x i64> @test_mm_cmplt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_cmplt_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pcmpgtb %xmm0, %xmm1
 ; X32-NEXT:    movdqa %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmplt_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpgtb %xmm0, %xmm1
 ; X64-NEXT:    movdqa %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -671,13 +671,13 @@ define <2 x i64> @test_mm_cmplt_epi8(<2
 
 define <2 x i64> @test_mm_cmplt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_cmplt_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pcmpgtw %xmm0, %xmm1
 ; X32-NEXT:    movdqa %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmplt_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpgtw %xmm0, %xmm1
 ; X64-NEXT:    movdqa %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -691,13 +691,13 @@ define <2 x i64> @test_mm_cmplt_epi16(<2
 
 define <2 x i64> @test_mm_cmplt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_cmplt_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pcmpgtd %xmm0, %xmm1
 ; X32-NEXT:    movdqa %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmplt_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pcmpgtd %xmm0, %xmm1
 ; X64-NEXT:    movdqa %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -711,12 +711,12 @@ define <2 x i64> @test_mm_cmplt_epi32(<2
 
 define <2 x double> @test_mm_cmplt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmplt_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpltpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmplt_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpltpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %fcmp = fcmp olt <2 x double> %a0, %a1
@@ -727,12 +727,12 @@ define <2 x double> @test_mm_cmplt_pd(<2
 
 define <2 x double> @test_mm_cmplt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmplt_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpltsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmplt_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpltsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 1)
@@ -741,12 +741,12 @@ define <2 x double> @test_mm_cmplt_sd(<2
 
 define <2 x double> @test_mm_cmpneq_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpneq_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpneqpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpneq_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpneqpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %fcmp = fcmp une <2 x double> %a0, %a1
@@ -757,12 +757,12 @@ define <2 x double> @test_mm_cmpneq_pd(<
 
 define <2 x double> @test_mm_cmpneq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpneq_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpneqsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpneq_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpneqsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 4)
@@ -771,13 +771,13 @@ define <2 x double> @test_mm_cmpneq_sd(<
 
 define <2 x double> @test_mm_cmpnge_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnge_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnlepd %xmm0, %xmm1
 ; X32-NEXT:    movapd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnge_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnlepd %xmm0, %xmm1
 ; X64-NEXT:    movapd %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -789,13 +789,13 @@ define <2 x double> @test_mm_cmpnge_pd(<
 
 define <2 x double> @test_mm_cmpnge_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnge_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnlesd %xmm0, %xmm1
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnge_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnlesd %xmm0, %xmm1
 ; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X64-NEXT:    retq
@@ -809,13 +809,13 @@ define <2 x double> @test_mm_cmpnge_sd(<
 
 define <2 x double> @test_mm_cmpngt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpngt_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnltpd %xmm0, %xmm1
 ; X32-NEXT:    movapd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpngt_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnltpd %xmm0, %xmm1
 ; X64-NEXT:    movapd %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -827,13 +827,13 @@ define <2 x double> @test_mm_cmpngt_pd(<
 
 define <2 x double> @test_mm_cmpngt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpngt_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnltsd %xmm0, %xmm1
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpngt_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnltsd %xmm0, %xmm1
 ; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X64-NEXT:    retq
@@ -847,12 +847,12 @@ define <2 x double> @test_mm_cmpngt_sd(<
 
 define <2 x double> @test_mm_cmpnle_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnle_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnlepd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnle_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnlepd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %fcmp = fcmp ugt <2 x double> %a0, %a1
@@ -863,12 +863,12 @@ define <2 x double> @test_mm_cmpnle_pd(<
 
 define <2 x double> @test_mm_cmpnle_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnle_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnlesd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnle_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnlesd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 6)
@@ -877,12 +877,12 @@ define <2 x double> @test_mm_cmpnle_sd(<
 
 define <2 x double> @test_mm_cmpnlt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnlt_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnltpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnlt_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnltpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %fcmp = fcmp uge <2 x double> %a0, %a1
@@ -893,12 +893,12 @@ define <2 x double> @test_mm_cmpnlt_pd(<
 
 define <2 x double> @test_mm_cmpnlt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpnlt_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpnltsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpnlt_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpnltsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 5)
@@ -907,12 +907,12 @@ define <2 x double> @test_mm_cmpnlt_sd(<
 
 define <2 x double> @test_mm_cmpord_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpord_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpordpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpord_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpordpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %fcmp = fcmp ord <2 x double> %a0, %a1
@@ -923,12 +923,12 @@ define <2 x double> @test_mm_cmpord_pd(<
 
 define <2 x double> @test_mm_cmpord_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpord_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpordsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpord_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpordsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 7)
@@ -937,12 +937,12 @@ define <2 x double> @test_mm_cmpord_sd(<
 
 define <2 x double> @test_mm_cmpunord_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpunord_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpunordpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpunord_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpunordpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %fcmp = fcmp uno <2 x double> %a0, %a1
@@ -953,12 +953,12 @@ define <2 x double> @test_mm_cmpunord_pd
 
 define <2 x double> @test_mm_cmpunord_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmpunord_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cmpunordsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmpunord_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpunordsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 3)
@@ -967,7 +967,7 @@ define <2 x double> @test_mm_cmpunord_sd
 
 define i32 @test_mm_comieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_comieq_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    comisd %xmm1, %xmm0
 ; X32-NEXT:    setnp %al
 ; X32-NEXT:    sete %cl
@@ -976,7 +976,7 @@ define i32 @test_mm_comieq_sd(<2 x doubl
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comieq_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    comisd %xmm1, %xmm0
 ; X64-NEXT:    setnp %al
 ; X64-NEXT:    sete %cl
@@ -990,14 +990,14 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2
 
 define i32 @test_mm_comige_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_comige_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    comisd %xmm1, %xmm0
 ; X32-NEXT:    setae %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comige_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    comisd %xmm1, %xmm0
 ; X64-NEXT:    setae %al
@@ -1009,14 +1009,14 @@ declare i32 @llvm.x86.sse2.comige.sd(<2
 
 define i32 @test_mm_comigt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_comigt_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    comisd %xmm1, %xmm0
 ; X32-NEXT:    seta %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comigt_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    comisd %xmm1, %xmm0
 ; X64-NEXT:    seta %al
@@ -1028,14 +1028,14 @@ declare i32 @llvm.x86.sse2.comigt.sd(<2
 
 define i32 @test_mm_comile_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_comile_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    comisd %xmm0, %xmm1
 ; X32-NEXT:    setae %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comile_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    comisd %xmm0, %xmm1
 ; X64-NEXT:    setae %al
@@ -1047,14 +1047,14 @@ declare i32 @llvm.x86.sse2.comile.sd(<2
 
 define i32 @test_mm_comilt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_comilt_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    comisd %xmm0, %xmm1
 ; X32-NEXT:    seta %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comilt_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    comisd %xmm0, %xmm1
 ; X64-NEXT:    seta %al
@@ -1066,7 +1066,7 @@ declare i32 @llvm.x86.sse2.comilt.sd(<2
 
 define i32 @test_mm_comineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_comineq_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    comisd %xmm1, %xmm0
 ; X32-NEXT:    setp %al
 ; X32-NEXT:    setne %cl
@@ -1075,7 +1075,7 @@ define i32 @test_mm_comineq_sd(<2 x doub
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_comineq_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    comisd %xmm1, %xmm0
 ; X64-NEXT:    setp %al
 ; X64-NEXT:    setne %cl
@@ -1089,12 +1089,12 @@ declare i32 @llvm.x86.sse2.comineq.sd(<2
 
 define <2 x double> @test_mm_cvtepi32_pd(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtepi32_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtdq2pd %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtepi32_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtdq2pd %xmm0, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -1105,12 +1105,12 @@ define <2 x double> @test_mm_cvtepi32_pd
 
 define <4 x float> @test_mm_cvtepi32_ps(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtepi32_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtepi32_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -1121,12 +1121,12 @@ declare <4 x float> @llvm.x86.sse2.cvtdq
 
 define <2 x i64> @test_mm_cvtpd_epi32(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtpd_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtpd2dq %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtpd_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtpd2dq %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
@@ -1137,12 +1137,12 @@ declare <4 x i32> @llvm.x86.sse2.cvtpd2d
 
 define <4 x float> @test_mm_cvtpd_ps(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtpd_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtpd2ps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtpd_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtpd2ps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
@@ -1152,12 +1152,12 @@ declare <4 x float> @llvm.x86.sse2.cvtpd
 
 define <2 x i64> @test_mm_cvtps_epi32(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtps_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtps2dq %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtps_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtps2dq %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
@@ -1168,12 +1168,12 @@ declare <4 x i32> @llvm.x86.sse2.cvtps2d
 
 define <2 x double> @test_mm_cvtps_pd(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtps_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtps2pd %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtps_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtps2pd %xmm0, %xmm0
 ; X64-NEXT:    retq
   %ext = shufflevector <4 x float> %a0, <4 x float> %a0, <2 x i32> <i32 0, i32 1>
@@ -1183,7 +1183,7 @@ define <2 x double> @test_mm_cvtps_pd(<4
 
 define double @test_mm_cvtsd_f64(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtsd_f64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-8, %esp
@@ -1195,7 +1195,7 @@ define double @test_mm_cvtsd_f64(<2 x do
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtsd_f64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = extractelement <2 x double> %a0, i32 0
   ret double %res
@@ -1203,12 +1203,12 @@ define double @test_mm_cvtsd_f64(<2 x do
 
 define i32 @test_mm_cvtsd_si32(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtsd_si32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtsd2si %xmm0, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtsd_si32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtsd2si %xmm0, %eax
 ; X64-NEXT:    retq
   %res = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0)
@@ -1218,12 +1218,12 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x
 
 define <4 x float> @test_mm_cvtsd_ss(<4 x float> %a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_cvtsd_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtsd2ss %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtsd_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtsd2ss %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1)
@@ -1233,13 +1233,13 @@ declare <4 x float> @llvm.x86.sse2.cvtsd
 
 define <4 x float> @test_mm_cvtsd_ss_load(<4 x float> %a0, <2 x double>* %p1) {
 ; X32-LABEL: test_mm_cvtsd_ss_load:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    cvtsd2ss (%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtsd_ss_load:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtsd2ss (%rdi), %xmm0
 ; X64-NEXT:    retq
   %a1 = load <2 x double>, <2 x double>* %p1
@@ -1249,12 +1249,12 @@ define <4 x float> @test_mm_cvtsd_ss_loa
 
 define i32 @test_mm_cvtsi128_si32(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_cvtsi128_si32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movd %xmm0, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtsi128_si32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movd %xmm0, %eax
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -1264,12 +1264,12 @@ define i32 @test_mm_cvtsi128_si32(<2 x i
 
 define <2 x double> @test_mm_cvtsi32_sd(<2 x double> %a0, i32 %a1) nounwind {
 ; X32-LABEL: test_mm_cvtsi32_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtsi2sdl {{[0-9]+}}(%esp), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtsi32_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtsi2sdl %edi, %xmm0
 ; X64-NEXT:    retq
   %cvt = sitofp i32 %a1 to double
@@ -1279,12 +1279,12 @@ define <2 x double> @test_mm_cvtsi32_sd(
 
 define <2 x i64> @test_mm_cvtsi32_si128(i32 %a0) nounwind {
 ; X32-LABEL: test_mm_cvtsi32_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtsi32_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movd %edi, %xmm0
 ; X64-NEXT:    retq
   %res0 = insertelement <4 x i32> undef, i32 %a0, i32 0
@@ -1297,12 +1297,12 @@ define <2 x i64> @test_mm_cvtsi32_si128(
 
 define <2 x double> @test_mm_cvtss_sd(<2 x double> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cvtss_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvtss2sd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvtss_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtss2sd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %ext = extractelement <4 x float> %a1, i32 0
@@ -1313,12 +1313,12 @@ define <2 x double> @test_mm_cvtss_sd(<2
 
 define <2 x i64> @test_mm_cvttpd_epi32(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm_cvttpd_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvttpd2dq %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvttpd_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttpd2dq %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
@@ -1329,12 +1329,12 @@ declare <4 x i32> @llvm.x86.sse2.cvttpd2
 
 define <2 x i64> @test_mm_cvttps_epi32(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_cvttps_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvttps2dq %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvttps_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttps2dq %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %a0)
@@ -1345,12 +1345,12 @@ declare <4 x i32> @llvm.x86.sse2.cvttps2
 
 define i32 @test_mm_cvttsd_si32(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm_cvttsd_si32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    cvttsd2si %xmm0, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cvttsd_si32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttsd2si %xmm0, %eax
 ; X64-NEXT:    retq
   %res = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0)
@@ -1360,12 +1360,12 @@ declare i32 @llvm.x86.sse2.cvttsd2si(<2
 
 define <2 x double> @test_mm_div_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_div_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    divpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_div_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    divpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = fdiv <2 x double> %a0, %a1
@@ -1374,12 +1374,12 @@ define <2 x double> @test_mm_div_pd(<2 x
 
 define <2 x double> @test_mm_div_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_div_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    divsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_div_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    divsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %ext0 = extractelement <2 x double> %a0, i32 0
@@ -1391,13 +1391,13 @@ define <2 x double> @test_mm_div_sd(<2 x
 
 define i32 @test_mm_extract_epi16(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_extract_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pextrw $1, %xmm0, %eax
 ; X32-NEXT:    movzwl %ax, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_extract_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pextrw $1, %xmm0, %eax
 ; X64-NEXT:    movzwl %ax, %eax
 ; X64-NEXT:    retq
@@ -1409,13 +1409,13 @@ define i32 @test_mm_extract_epi16(<2 x i
 
 define <2 x i64> @test_mm_insert_epi16(<2 x i64> %a0, i16 %a1) nounwind {
 ; X32-LABEL: test_mm_insert_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    pinsrw $1, %eax, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_insert_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pinsrw $1, %edi, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1426,12 +1426,12 @@ define <2 x i64> @test_mm_insert_epi16(<
 
 define void @test_mm_lfence() nounwind {
 ; X32-LABEL: test_mm_lfence:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    lfence
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_lfence:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    lfence
 ; X64-NEXT:    retq
   call void @llvm.x86.sse2.lfence()
@@ -1441,13 +1441,13 @@ declare void @llvm.x86.sse2.lfence() nou
 
 define <2 x double> @test_mm_load_pd(double* %a0) nounwind {
 ; X32-LABEL: test_mm_load_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movaps (%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_load_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps (%rdi), %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
@@ -1457,13 +1457,13 @@ define <2 x double> @test_mm_load_pd(dou
 
 define <2 x double> @test_mm_load_sd(double* %a0) nounwind {
 ; X32-LABEL: test_mm_load_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_load_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    retq
   %ld = load double, double* %a0, align 1
@@ -1474,13 +1474,13 @@ define <2 x double> @test_mm_load_sd(dou
 
 define <2 x i64> @test_mm_load_si128(<2 x i64>* %a0) nounwind {
 ; X32-LABEL: test_mm_load_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movaps (%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_load_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps (%rdi), %xmm0
 ; X64-NEXT:    retq
   %res = load <2 x i64>, <2 x i64>* %a0, align 16
@@ -1489,14 +1489,14 @@ define <2 x i64> @test_mm_load_si128(<2
 
 define <2 x double> @test_mm_load1_pd(double* %a0) nounwind {
 ; X32-LABEL: test_mm_load1_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_load1_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; X64-NEXT:    retq
@@ -1508,13 +1508,13 @@ define <2 x double> @test_mm_load1_pd(do
 
 define <2 x double> @test_mm_loadh_pd(<2 x double> %a0, double* %a1) nounwind {
 ; X32-LABEL: test_mm_loadh_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_loadh_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; X64-NEXT:    retq
   %ld = load double, double* %a1, align 8
@@ -1524,13 +1524,13 @@ define <2 x double> @test_mm_loadh_pd(<2
 
 define <2 x i64> @test_mm_loadl_epi64(<2 x i64> %a0, <2 x i64>* %a1) nounwind {
 ; X32-LABEL: test_mm_loadl_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_loadl_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    retq
   %bc = bitcast <2 x i64>* %a1 to i64*
@@ -1542,13 +1542,13 @@ define <2 x i64> @test_mm_loadl_epi64(<2
 
 define <2 x double> @test_mm_loadl_pd(<2 x double> %a0, double* %a1) nounwind {
 ; X32-LABEL: test_mm_loadl_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_loadl_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
 ; X64-NEXT:    retq
   %ld = load double, double* %a1, align 8
@@ -1558,14 +1558,14 @@ define <2 x double> @test_mm_loadl_pd(<2
 
 define <2 x double> @test_mm_loadr_pd(double* %a0) nounwind {
 ; X32-LABEL: test_mm_loadr_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movapd (%eax), %xmm0
 ; X32-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_loadr_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movapd (%rdi), %xmm0
 ; X64-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
 ; X64-NEXT:    retq
@@ -1577,13 +1577,13 @@ define <2 x double> @test_mm_loadr_pd(do
 
 define <2 x double> @test_mm_loadu_pd(double* %a0) nounwind {
 ; X32-LABEL: test_mm_loadu_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movups (%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_loadu_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movups (%rdi), %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
@@ -1593,13 +1593,13 @@ define <2 x double> @test_mm_loadu_pd(do
 
 define <2 x i64> @test_mm_loadu_si128(<2 x i64>* %a0) nounwind {
 ; X32-LABEL: test_mm_loadu_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movups (%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_loadu_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movups (%rdi), %xmm0
 ; X64-NEXT:    retq
   %res = load <2 x i64>, <2 x i64>* %a0, align 1
@@ -1608,12 +1608,12 @@ define <2 x i64> @test_mm_loadu_si128(<2
 
 define <2 x i64> @test_mm_madd_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_madd_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pmaddwd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_madd_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pmaddwd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1626,7 +1626,7 @@ declare <4 x i32> @llvm.x86.sse2.pmadd.w
 
 define void @test_mm_maskmoveu_si128(<2 x i64> %a0, <2 x i64> %a1, i8* %a2) nounwind {
 ; X32-LABEL: test_mm_maskmoveu_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %edi
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X32-NEXT:    maskmovdqu %xmm1, %xmm0
@@ -1634,7 +1634,7 @@ define void @test_mm_maskmoveu_si128(<2
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_maskmoveu_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    maskmovdqu %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -1646,12 +1646,12 @@ declare void @llvm.x86.sse2.maskmov.dqu(
 
 define <2 x i64> @test_mm_max_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_max_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pmaxsw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_max_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pmaxsw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1664,12 +1664,12 @@ define <2 x i64> @test_mm_max_epi16(<2 x
 
 define <2 x i64> @test_mm_max_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_max_epu8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pmaxub %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_max_epu8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pmaxub %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -1682,12 +1682,12 @@ define <2 x i64> @test_mm_max_epu8(<2 x
 
 define <2 x double> @test_mm_max_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_max_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    maxpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_max_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    maxpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
@@ -1697,12 +1697,12 @@ declare <2 x double> @llvm.x86.sse2.max.
 
 define <2 x double> @test_mm_max_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_max_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    maxsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_max_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    maxsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
@@ -1712,12 +1712,12 @@ declare <2 x double> @llvm.x86.sse2.max.
 
 define void @test_mm_mfence() nounwind {
 ; X32-LABEL: test_mm_mfence:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    mfence
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mfence:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    mfence
 ; X64-NEXT:    retq
   call void @llvm.x86.sse2.mfence()
@@ -1727,12 +1727,12 @@ declare void @llvm.x86.sse2.mfence() nou
 
 define <2 x i64> @test_mm_min_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_min_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pminsw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_min_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pminsw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1745,12 +1745,12 @@ define <2 x i64> @test_mm_min_epi16(<2 x
 
 define <2 x i64> @test_mm_min_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_min_epu8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pminub %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_min_epu8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pminub %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -1763,12 +1763,12 @@ define <2 x i64> @test_mm_min_epu8(<2 x
 
 define <2 x double> @test_mm_min_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_min_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    minpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_min_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    minpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
@@ -1778,12 +1778,12 @@ declare <2 x double> @llvm.x86.sse2.min.
 
 define <2 x double> @test_mm_min_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_min_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    minsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_min_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    minsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
@@ -1793,12 +1793,12 @@ declare <2 x double> @llvm.x86.sse2.min.
 
 define <2 x i64> @test_mm_move_epi64(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_move_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_move_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; X64-NEXT:    retq
   %res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
@@ -1807,12 +1807,12 @@ define <2 x i64> @test_mm_move_epi64(<2
 
 define <2 x double> @test_mm_move_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_move_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_move_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X64-NEXT:    retq
   %ext0 = extractelement <2 x double> %a1, i32 0
@@ -1824,12 +1824,12 @@ define <2 x double> @test_mm_move_sd(<2
 
 define i32 @test_mm_movemask_epi8(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_movemask_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pmovmskb %xmm0, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_movemask_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pmovmskb %xmm0, %eax
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -1840,12 +1840,12 @@ declare i32 @llvm.x86.sse2.pmovmskb.128(
 
 define i32 @test_mm_movemask_pd(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm_movemask_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movmskpd %xmm0, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_movemask_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movmskpd %xmm0, %eax
 ; X64-NEXT:    retq
   %res = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
@@ -1855,12 +1855,12 @@ declare i32 @llvm.x86.sse2.movmsk.pd(<2
 
 define <2 x i64> @test_mm_mul_epu32(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_mul_epu32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pmuludq %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mul_epu32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pmuludq %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -1872,12 +1872,12 @@ declare <2 x i64> @llvm.x86.sse2.pmulu.d
 
 define <2 x double> @test_mm_mul_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_mul_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    mulpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mul_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    mulpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = fmul <2 x double> %a0, %a1
@@ -1886,12 +1886,12 @@ define <2 x double> @test_mm_mul_pd(<2 x
 
 define <2 x double> @test_mm_mul_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_mul_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    mulsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mul_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    mulsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %ext0 = extractelement <2 x double> %a0, i32 0
@@ -1903,12 +1903,12 @@ define <2 x double> @test_mm_mul_sd(<2 x
 
 define <2 x i64> @test_mm_mulhi_epi16(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_mulhi_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pmulhw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mulhi_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pmulhw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1921,12 +1921,12 @@ declare <8 x i16> @llvm.x86.sse2.pmulh.w
 
 define <2 x i64> @test_mm_mulhi_epu16(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_mulhi_epu16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pmulhuw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mulhi_epu16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pmulhuw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1939,12 +1939,12 @@ declare <8 x i16> @llvm.x86.sse2.pmulhu.
 
 define <2 x i64> @test_mm_mullo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_mullo_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pmullw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mullo_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pmullw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -1956,12 +1956,12 @@ define <2 x i64> @test_mm_mullo_epi16(<2
 
 define <2 x double> @test_mm_or_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_or_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    orps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_or_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    orps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x double> %a0 to <4 x i32>
@@ -1973,12 +1973,12 @@ define <2 x double> @test_mm_or_pd(<2 x
 
 define <2 x i64> @test_mm_or_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_or_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    orps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_or_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    orps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = or <2 x i64> %a0, %a1
@@ -1987,12 +1987,12 @@ define <2 x i64> @test_mm_or_si128(<2 x
 
 define <2 x i64> @test_mm_packs_epi16(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_packs_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    packsswb %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_packs_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    packsswb %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2005,12 +2005,12 @@ declare <16 x i8> @llvm.x86.sse2.packssw
 
 define <2 x i64> @test_mm_packs_epi32(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_packs_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    packssdw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_packs_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    packssdw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2023,12 +2023,12 @@ declare <8 x i16> @llvm.x86.sse2.packssd
 
 define <2 x i64> @test_mm_packus_epi16(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_packus_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    packuswb %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_packus_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    packuswb %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2041,12 +2041,12 @@ declare <16 x i8> @llvm.x86.sse2.packusw
 
 define void @test_mm_pause() nounwind {
 ; X32-LABEL: test_mm_pause:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pause
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_pause:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pause
 ; X64-NEXT:    retq
   call void @llvm.x86.sse2.pause()
@@ -2056,12 +2056,12 @@ declare void @llvm.x86.sse2.pause() noun
 
 define <2 x i64> @test_mm_sad_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_sad_epu8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psadbw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sad_epu8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psadbw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -2073,7 +2073,7 @@ declare <2 x i64> @llvm.x86.sse2.psad.bw
 
 define <2 x i64> @test_mm_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) nounwind {
 ; X32-LABEL: test_mm_set_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movd %eax, %xmm0
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
@@ -2124,7 +2124,7 @@ define <2 x i64> @test_mm_set_epi8(i8 %a
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    movd %eax, %xmm0
 ; X64-NEXT:    movzbl %sil, %eax
@@ -2195,7 +2195,7 @@ define <2 x i64> @test_mm_set_epi8(i8 %a
 
 define <2 x i64> @test_mm_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
 ; X32-LABEL: test_mm_set_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movd %eax, %xmm1
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
@@ -2222,7 +2222,7 @@ define <2 x i64> @test_mm_set_epi16(i16
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
 ; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
 ; X64-NEXT:    movd %edi, %xmm0
@@ -2255,7 +2255,7 @@ define <2 x i64> @test_mm_set_epi16(i16
 
 define <2 x i64> @test_mm_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
 ; X32-LABEL: test_mm_set_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2266,7 +2266,7 @@ define <2 x i64> @test_mm_set_epi32(i32
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movd %edi, %xmm0
 ; X64-NEXT:    movd %esi, %xmm1
 ; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2287,7 +2287,7 @@ define <2 x i64> @test_mm_set_epi32(i32
 
 define <2 x i64> @test_mm_set_epi64x(i64 %a0, i64 %a1) nounwind {
 ; X32-LABEL: test_mm_set_epi64x:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2298,7 +2298,7 @@ define <2 x i64> @test_mm_set_epi64x(i64
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set_epi64x:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdi, %xmm1
 ; X64-NEXT:    movq %rsi, %xmm0
 ; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -2310,14 +2310,14 @@ define <2 x i64> @test_mm_set_epi64x(i64
 
 define <2 x double> @test_mm_set_pd(double %a0, double %a1) nounwind {
 ; X32-LABEL: test_mm_set_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
 ; X64-NEXT:    movaps %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -2328,13 +2328,13 @@ define <2 x double> @test_mm_set_pd(doub
 
 define <2 x double> @test_mm_set_pd1(double %a0) nounwind {
 ; X32-LABEL: test_mm_set_pd1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set_pd1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; X64-NEXT:    retq
   %res0  = insertelement <2 x double> undef, double %a0, i32 0
@@ -2344,13 +2344,13 @@ define <2 x double> @test_mm_set_pd1(dou
 
 define <2 x double> @test_mm_set_sd(double %a0) nounwind {
 ; X32-LABEL: test_mm_set_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; X64-NEXT:    retq
   %res0  = insertelement <2 x double> undef, double %a0, i32 0
@@ -2360,7 +2360,7 @@ define <2 x double> @test_mm_set_sd(doub
 
 define <2 x i64> @test_mm_set1_epi8(i8 %a0) nounwind {
 ; X32-LABEL: test_mm_set1_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movd %eax, %xmm0
 ; X32-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -2369,7 +2369,7 @@ define <2 x i64> @test_mm_set1_epi8(i8 %
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set1_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    movd %eax, %xmm0
 ; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -2398,7 +2398,7 @@ define <2 x i64> @test_mm_set1_epi8(i8 %
 
 define <2 x i64> @test_mm_set1_epi16(i16 %a0) nounwind {
 ; X32-LABEL: test_mm_set1_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movd %eax, %xmm0
 ; X32-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -2406,7 +2406,7 @@ define <2 x i64> @test_mm_set1_epi16(i16
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set1_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movd %edi, %xmm0
 ; X64-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -2425,13 +2425,13 @@ define <2 x i64> @test_mm_set1_epi16(i16
 
 define <2 x i64> @test_mm_set1_epi32(i32 %a0) nounwind {
 ; X32-LABEL: test_mm_set1_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set1_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movd %edi, %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-NEXT:    retq
@@ -2447,7 +2447,7 @@ define <2 x i64> @test_mm_set1_epi32(i32
 
 define <2 x i64> @test_mm_set1_epi64x(i64 %a0) nounwind {
 ; X32-LABEL: test_mm_set1_epi64x:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -2455,7 +2455,7 @@ define <2 x i64> @test_mm_set1_epi64x(i6
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set1_epi64x:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdi, %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; X64-NEXT:    retq
@@ -2466,13 +2466,13 @@ define <2 x i64> @test_mm_set1_epi64x(i6
 
 define <2 x double> @test_mm_set1_pd(double %a0) nounwind {
 ; X32-LABEL: test_mm_set1_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_set1_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; X64-NEXT:    retq
   %res0  = insertelement <2 x double> undef, double %a0, i32 0
@@ -2482,7 +2482,7 @@ define <2 x double> @test_mm_set1_pd(dou
 
 define <2 x i64> @test_mm_setr_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) nounwind {
 ; X32-LABEL: test_mm_setr_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movd %eax, %xmm0
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
@@ -2533,7 +2533,7 @@ define <2 x i64> @test_mm_setr_epi8(i8 %
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_setr_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
 ; X64-NEXT:    movd %eax, %xmm0
 ; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
@@ -2604,7 +2604,7 @@ define <2 x i64> @test_mm_setr_epi8(i8 %
 
 define <2 x i64> @test_mm_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
 ; X32-LABEL: test_mm_setr_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movd %eax, %xmm1
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
@@ -2631,7 +2631,7 @@ define <2 x i64> @test_mm_setr_epi16(i16
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_setr_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
 ; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
 ; X64-NEXT:    movd %eax, %xmm0
@@ -2664,7 +2664,7 @@ define <2 x i64> @test_mm_setr_epi16(i16
 
 define <2 x i64> @test_mm_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
 ; X32-LABEL: test_mm_setr_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2675,7 +2675,7 @@ define <2 x i64> @test_mm_setr_epi32(i32
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_setr_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movd %ecx, %xmm0
 ; X64-NEXT:    movd %edx, %xmm1
 ; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2696,7 +2696,7 @@ define <2 x i64> @test_mm_setr_epi32(i32
 
 define <2 x i64> @test_mm_setr_epi64x(i64 %a0, i64 %a1) nounwind {
 ; X32-LABEL: test_mm_setr_epi64x:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2707,7 +2707,7 @@ define <2 x i64> @test_mm_setr_epi64x(i6
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_setr_epi64x:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rsi, %xmm1
 ; X64-NEXT:    movq %rdi, %xmm0
 ; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -2719,14 +2719,14 @@ define <2 x i64> @test_mm_setr_epi64x(i6
 
 define <2 x double> @test_mm_setr_pd(double %a0, double %a1) nounwind {
 ; X32-LABEL: test_mm_setr_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_setr_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X64-NEXT:    retq
   %res0  = insertelement <2 x double> undef, double %a0, i32 0
@@ -2736,12 +2736,12 @@ define <2 x double> @test_mm_setr_pd(dou
 
 define <2 x double> @test_mm_setzero_pd() {
 ; X32-LABEL: test_mm_setzero_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_setzero_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    retq
   ret <2 x double> zeroinitializer
@@ -2749,12 +2749,12 @@ define <2 x double> @test_mm_setzero_pd(
 
 define <2 x i64> @test_mm_setzero_si128() {
 ; X32-LABEL: test_mm_setzero_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_setzero_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    retq
   ret <2 x i64> zeroinitializer
@@ -2762,12 +2762,12 @@ define <2 x i64> @test_mm_setzero_si128(
 
 define <2 x i64> @test_mm_shuffle_epi32(<2 x i64> %a0) {
 ; X32-LABEL: test_mm_shuffle_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_shuffle_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2778,12 +2778,12 @@ define <2 x i64> @test_mm_shuffle_epi32(
 
 define <2 x double> @test_mm_shuffle_pd(<2 x double> %a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_shuffle_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_shuffle_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
 ; X64-NEXT:    retq
   %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
@@ -2792,12 +2792,12 @@ define <2 x double> @test_mm_shuffle_pd(
 
 define <2 x i64> @test_mm_shufflehi_epi16(<2 x i64> %a0) {
 ; X32-LABEL: test_mm_shufflehi_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_shufflehi_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2808,12 +2808,12 @@ define <2 x i64> @test_mm_shufflehi_epi1
 
 define <2 x i64> @test_mm_shufflelo_epi16(<2 x i64> %a0) {
 ; X32-LABEL: test_mm_shufflelo_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_shufflelo_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2824,12 +2824,12 @@ define <2 x i64> @test_mm_shufflelo_epi1
 
 define <2 x i64> @test_mm_sll_epi16(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_sll_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psllw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sll_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psllw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2842,12 +2842,12 @@ declare <8 x i16> @llvm.x86.sse2.psll.w(
 
 define <2 x i64> @test_mm_sll_epi32(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_sll_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pslld %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sll_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pslld %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2860,12 +2860,12 @@ declare <4 x i32> @llvm.x86.sse2.psll.d(
 
 define <2 x i64> @test_mm_sll_epi64(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_sll_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psllq %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sll_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psllq %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
@@ -2875,12 +2875,12 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(
 
 define <2 x i64> @test_mm_slli_epi16(<2 x i64> %a0) {
 ; X32-LABEL: test_mm_slli_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psllw $1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_slli_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psllw $1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2892,12 +2892,12 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w
 
 define <2 x i64> @test_mm_slli_epi32(<2 x i64> %a0) {
 ; X32-LABEL: test_mm_slli_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pslld $1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_slli_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pslld $1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2909,12 +2909,12 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d
 
 define <2 x i64> @test_mm_slli_epi64(<2 x i64> %a0) {
 ; X32-LABEL: test_mm_slli_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psllq $1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_slli_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psllq $1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 1)
@@ -2924,12 +2924,12 @@ declare <2 x i64> @llvm.x86.sse2.pslli.q
 
 define <2 x i64> @test_mm_slli_si128(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_slli_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_slli_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -2940,12 +2940,12 @@ define <2 x i64> @test_mm_slli_si128(<2
 
 define <2 x double> @test_mm_sqrt_pd(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm_sqrt_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    sqrtpd %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sqrt_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    sqrtpd %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0)
@@ -2955,13 +2955,13 @@ declare <2 x double> @llvm.x86.sse2.sqrt
 
 define <2 x double> @test_mm_sqrt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_sqrt_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    sqrtsd %xmm0, %xmm1
 ; X32-NEXT:    movapd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sqrt_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    sqrtsd %xmm0, %xmm1
 ; X64-NEXT:    movapd %xmm1, %xmm0
 ; X64-NEXT:    retq
@@ -2976,12 +2976,12 @@ declare <2 x double> @llvm.x86.sse2.sqrt
 
 define <2 x i64> @test_mm_sra_epi16(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_sra_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psraw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sra_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psraw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -2994,12 +2994,12 @@ declare <8 x i16> @llvm.x86.sse2.psra.w(
 
 define <2 x i64> @test_mm_sra_epi32(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_sra_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psrad %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sra_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psrad %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3012,12 +3012,12 @@ declare <4 x i32> @llvm.x86.sse2.psra.d(
 
 define <2 x i64> @test_mm_srai_epi16(<2 x i64> %a0) {
 ; X32-LABEL: test_mm_srai_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psraw $1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_srai_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psraw $1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3029,12 +3029,12 @@ declare <8 x i16> @llvm.x86.sse2.psrai.w
 
 define <2 x i64> @test_mm_srai_epi32(<2 x i64> %a0) {
 ; X32-LABEL: test_mm_srai_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psrad $1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_srai_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psrad $1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3046,12 +3046,12 @@ declare <4 x i32> @llvm.x86.sse2.psrai.d
 
 define <2 x i64> @test_mm_srl_epi16(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_srl_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psrlw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_srl_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psrlw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3064,12 +3064,12 @@ declare <8 x i16> @llvm.x86.sse2.psrl.w(
 
 define <2 x i64> @test_mm_srl_epi32(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_srl_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psrld %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_srl_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psrld %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3082,12 +3082,12 @@ declare <4 x i32> @llvm.x86.sse2.psrl.d(
 
 define <2 x i64> @test_mm_srl_epi64(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_srl_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psrlq %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_srl_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psrlq %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
@@ -3097,12 +3097,12 @@ declare <2 x i64> @llvm.x86.sse2.psrl.q(
 
 define <2 x i64> @test_mm_srli_epi16(<2 x i64> %a0) {
 ; X32-LABEL: test_mm_srli_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psrlw $1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_srli_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psrlw $1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3114,12 +3114,12 @@ declare <8 x i16> @llvm.x86.sse2.psrli.w
 
 define <2 x i64> @test_mm_srli_epi32(<2 x i64> %a0) {
 ; X32-LABEL: test_mm_srli_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psrld $1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_srli_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psrld $1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3131,12 +3131,12 @@ declare <4 x i32> @llvm.x86.sse2.psrli.d
 
 define <2 x i64> @test_mm_srli_epi64(<2 x i64> %a0) {
 ; X32-LABEL: test_mm_srli_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psrlq $1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_srli_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psrlq $1, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %a0, i32 1)
@@ -3146,12 +3146,12 @@ declare <2 x i64> @llvm.x86.sse2.psrli.q
 
 define <2 x i64> @test_mm_srli_si128(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm_srli_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_srli_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3162,13 +3162,13 @@ define <2 x i64> @test_mm_srli_si128(<2
 
 define void @test_mm_store_pd(double *%a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_store_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movaps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_store_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
@@ -3178,14 +3178,14 @@ define void @test_mm_store_pd(double *%a
 
 define void @test_mm_store_pd1(double *%a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_store_pd1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; X32-NEXT:    movaps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_store_pd1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; X64-NEXT:    movaps %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -3197,13 +3197,13 @@ define void @test_mm_store_pd1(double *%
 
 define void @test_mm_store_sd(double *%a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_store_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movsd %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_store_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movsd %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %ext = extractelement <2 x double> %a1, i32 0
@@ -3213,13 +3213,13 @@ define void @test_mm_store_sd(double *%a
 
 define void @test_mm_store_si128(<2 x i64> *%a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_store_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movaps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_store_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps %xmm0, (%rdi)
 ; X64-NEXT:    retq
   store <2 x i64> %a1, <2 x i64>* %a0, align 16
@@ -3228,14 +3228,14 @@ define void @test_mm_store_si128(<2 x i6
 
 define void @test_mm_store1_pd(double *%a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_store1_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; X32-NEXT:    movaps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_store1_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
 ; X64-NEXT:    movaps %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -3247,14 +3247,14 @@ define void @test_mm_store1_pd(double *%
 
 define void @test_mm_storeh_sd(double *%a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_storeh_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
 ; X32-NEXT:    movsd %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_storeh_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
 ; X64-NEXT:    movsd %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -3265,13 +3265,13 @@ define void @test_mm_storeh_sd(double *%
 
 define void @test_mm_storel_epi64(<2 x i64> *%a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_storel_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movlps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_storel_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %xmm0, %rax
 ; X64-NEXT:    movq %rax, (%rdi)
 ; X64-NEXT:    retq
@@ -3283,13 +3283,13 @@ define void @test_mm_storel_epi64(<2 x i
 
 define void @test_mm_storel_sd(double *%a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_storel_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movsd %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_storel_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movsd %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %ext = extractelement <2 x double> %a1, i32 0
@@ -3299,14 +3299,14 @@ define void @test_mm_storel_sd(double *%
 
 define void @test_mm_storer_pd(double *%a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_storer_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
 ; X32-NEXT:    movapd %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_storer_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
 ; X64-NEXT:    movapd %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -3318,13 +3318,13 @@ define void @test_mm_storer_pd(double *%
 
 define void @test_mm_storeu_pd(double *%a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_storeu_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movups %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_storeu_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movups %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
@@ -3334,13 +3334,13 @@ define void @test_mm_storeu_pd(double *%
 
 define void @test_mm_storeu_si128(<2 x i64> *%a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_storeu_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movups %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_storeu_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movups %xmm0, (%rdi)
 ; X64-NEXT:    retq
   store <2 x i64> %a1, <2 x i64>* %a0, align 1
@@ -3349,13 +3349,13 @@ define void @test_mm_storeu_si128(<2 x i
 
 define void @test_mm_stream_pd(double *%a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_stream_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movntps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_stream_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movntps %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %arg0 = bitcast double* %a0 to <2 x double>*
@@ -3365,14 +3365,14 @@ define void @test_mm_stream_pd(double *%
 
 define void @test_mm_stream_si32(i32 *%a0, i32 %a1) {
 ; X32-LABEL: test_mm_stream_si32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movntil %eax, (%ecx)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_stream_si32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movntil %esi, (%rdi)
 ; X64-NEXT:    retq
   store i32 %a1, i32* %a0, align 1, !nontemporal !0
@@ -3381,13 +3381,13 @@ define void @test_mm_stream_si32(i32 *%a
 
 define void @test_mm_stream_si128(<2 x i64> *%a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_stream_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movntps %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_stream_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movntps %xmm0, (%rdi)
 ; X64-NEXT:    retq
   store <2 x i64> %a1, <2 x i64>* %a0, align 16, !nontemporal !0
@@ -3396,12 +3396,12 @@ define void @test_mm_stream_si128(<2 x i
 
 define <2 x i64> @test_mm_sub_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_sub_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psubb %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sub_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psubb %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3413,12 +3413,12 @@ define <2 x i64> @test_mm_sub_epi8(<2 x
 
 define <2 x i64> @test_mm_sub_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_sub_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psubw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sub_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psubw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3430,12 +3430,12 @@ define <2 x i64> @test_mm_sub_epi16(<2 x
 
 define <2 x i64> @test_mm_sub_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_sub_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psubd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sub_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psubd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3447,12 +3447,12 @@ define <2 x i64> @test_mm_sub_epi32(<2 x
 
 define <2 x i64> @test_mm_sub_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_sub_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psubq %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sub_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psubq %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = sub <2 x i64> %a0, %a1
@@ -3461,12 +3461,12 @@ define <2 x i64> @test_mm_sub_epi64(<2 x
 
 define <2 x double> @test_mm_sub_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_sub_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subpd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sub_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subpd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = fsub <2 x double> %a0, %a1
@@ -3475,12 +3475,12 @@ define <2 x double> @test_mm_sub_pd(<2 x
 
 define <2 x double> @test_mm_sub_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_sub_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subsd %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_sub_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subsd %xmm1, %xmm0
 ; X64-NEXT:    retq
   %ext0 = extractelement <2 x double> %a0, i32 0
@@ -3492,12 +3492,12 @@ define <2 x double> @test_mm_sub_sd(<2 x
 
 define <2 x i64> @test_mm_subs_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_subs_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psubsb %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_subs_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psubsb %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3510,12 +3510,12 @@ declare <16 x i8> @llvm.x86.sse2.psubs.b
 
 define <2 x i64> @test_mm_subs_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_subs_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psubsw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_subs_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psubsw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3528,12 +3528,12 @@ declare <8 x i16> @llvm.x86.sse2.psubs.w
 
 define <2 x i64> @test_mm_subs_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_subs_epu8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psubusb %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_subs_epu8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psubusb %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3546,12 +3546,12 @@ declare <16 x i8> @llvm.x86.sse2.psubus.
 
 define <2 x i64> @test_mm_subs_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_subs_epu16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    psubusw %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_subs_epu16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    psubusw %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3564,7 +3564,7 @@ declare <8 x i16> @llvm.x86.sse2.psubus.
 
 define i32 @test_mm_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomieq_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    ucomisd %xmm1, %xmm0
 ; X32-NEXT:    setnp %al
 ; X32-NEXT:    sete %cl
@@ -3573,7 +3573,7 @@ define i32 @test_mm_ucomieq_sd(<2 x doub
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomieq_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    ucomisd %xmm1, %xmm0
 ; X64-NEXT:    setnp %al
 ; X64-NEXT:    sete %cl
@@ -3587,14 +3587,14 @@ declare i32 @llvm.x86.sse2.ucomieq.sd(<2
 
 define i32 @test_mm_ucomige_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomige_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    ucomisd %xmm1, %xmm0
 ; X32-NEXT:    setae %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomige_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    ucomisd %xmm1, %xmm0
 ; X64-NEXT:    setae %al
@@ -3606,14 +3606,14 @@ declare i32 @llvm.x86.sse2.ucomige.sd(<2
 
 define i32 @test_mm_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomigt_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    ucomisd %xmm1, %xmm0
 ; X32-NEXT:    seta %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomigt_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    ucomisd %xmm1, %xmm0
 ; X64-NEXT:    seta %al
@@ -3625,14 +3625,14 @@ declare i32 @llvm.x86.sse2.ucomigt.sd(<2
 
 define i32 @test_mm_ucomile_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomile_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    ucomisd %xmm0, %xmm1
 ; X32-NEXT:    setae %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomile_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    ucomisd %xmm0, %xmm1
 ; X64-NEXT:    setae %al
@@ -3644,14 +3644,14 @@ declare i32 @llvm.x86.sse2.ucomile.sd(<2
 
 define i32 @test_mm_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomilt_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    ucomisd %xmm0, %xmm1
 ; X32-NEXT:    seta %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomilt_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    ucomisd %xmm0, %xmm1
 ; X64-NEXT:    seta %al
@@ -3663,7 +3663,7 @@ declare i32 @llvm.x86.sse2.ucomilt.sd(<2
 
 define i32 @test_mm_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_ucomineq_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    ucomisd %xmm1, %xmm0
 ; X32-NEXT:    setp %al
 ; X32-NEXT:    setne %cl
@@ -3672,7 +3672,7 @@ define i32 @test_mm_ucomineq_sd(<2 x dou
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_ucomineq_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    ucomisd %xmm1, %xmm0
 ; X64-NEXT:    setp %al
 ; X64-NEXT:    setne %cl
@@ -3686,34 +3686,34 @@ declare i32 @llvm.x86.sse2.ucomineq.sd(<
 
 define <2 x double> @test_mm_undefined_pd() {
 ; X32-LABEL: test_mm_undefined_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_undefined_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   ret <2 x double> undef
 }
 
 define <2 x i64> @test_mm_undefined_si128() {
 ; X32-LABEL: test_mm_undefined_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_undefined_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   ret <2 x i64> undef
 }
 
 define <2 x i64> @test_mm_unpackhi_epi8(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_unpackhi_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpackhi_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3725,12 +3725,12 @@ define <2 x i64> @test_mm_unpackhi_epi8(
 
 define <2 x i64> @test_mm_unpackhi_epi16(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_unpackhi_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpackhi_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3742,12 +3742,12 @@ define <2 x i64> @test_mm_unpackhi_epi16
 
 define <2 x i64> @test_mm_unpackhi_epi32(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_unpackhi_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpackhi_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3759,12 +3759,12 @@ define <2 x i64> @test_mm_unpackhi_epi32
 
 define <2 x i64> @test_mm_unpackhi_epi64(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_unpackhi_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpackhi_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; X64-NEXT:    retq
   %res = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3>
@@ -3773,12 +3773,12 @@ define <2 x i64> @test_mm_unpackhi_epi64
 
 define <2 x double> @test_mm_unpackhi_pd(<2 x double> %a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_unpackhi_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpackhi_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; X64-NEXT:    retq
   %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
@@ -3787,12 +3787,12 @@ define <2 x double> @test_mm_unpackhi_pd
 
 define <2 x i64> @test_mm_unpacklo_epi8(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_unpacklo_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpacklo_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -3804,12 +3804,12 @@ define <2 x i64> @test_mm_unpacklo_epi8(
 
 define <2 x i64> @test_mm_unpacklo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_unpacklo_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpacklo_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -3821,12 +3821,12 @@ define <2 x i64> @test_mm_unpacklo_epi16
 
 define <2 x i64> @test_mm_unpacklo_epi32(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_unpacklo_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpacklo_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -3838,12 +3838,12 @@ define <2 x i64> @test_mm_unpacklo_epi32
 
 define <2 x i64> @test_mm_unpacklo_epi64(<2 x i64> %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm_unpacklo_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpacklo_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X64-NEXT:    retq
   %res = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2>
@@ -3852,12 +3852,12 @@ define <2 x i64> @test_mm_unpacklo_epi64
 
 define <2 x double> @test_mm_unpacklo_pd(<2 x double> %a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm_unpacklo_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_unpacklo_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X64-NEXT:    retq
   %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
@@ -3866,12 +3866,12 @@ define <2 x double> @test_mm_unpacklo_pd
 
 define <2 x double> @test_mm_xor_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_xor_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_xor_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x double> %a0 to <4 x i32>
@@ -3883,12 +3883,12 @@ define <2 x double> @test_mm_xor_pd(<2 x
 
 define <2 x i64> @test_mm_xor_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_xor_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_xor_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm1, %xmm0
 ; X64-NEXT:    retq
   %res = xor <2 x i64> %a0, %a1

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <2 x i64> @test_x86_sse2_psll_dq_bs(<2 x i64> %a0) {
 ; CHECK-LABEL: test_x86_sse2_psll_dq_bs:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8]
 ; CHECK-NEXT:    retl
   %res = call <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
@@ -14,7 +14,7 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq
 
 define <2 x i64> @test_x86_sse2_psrl_dq_bs(<2 x i64> %a0) {
 ; CHECK-LABEL: test_x86_sse2_psrl_dq_bs:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero
 ; CHECK-NEXT:    retl
   %res = call <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
@@ -24,7 +24,7 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq
 
 define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) {
 ; CHECK-LABEL: test_x86_sse2_psll_dq:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
 ; CHECK-NEXT:    retl
   %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
@@ -35,7 +35,7 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq
 
 define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) {
 ; CHECK-LABEL: test_x86_sse2_psrl_dq:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
 ; CHECK-NEXT:    retl
   %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
@@ -46,7 +46,7 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq
 
 define <2 x double> @test_x86_sse2_cvtdq2pd(<4 x i32> %a0) {
 ; CHECK-LABEL: test_x86_sse2_cvtdq2pd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cvtdq2pd %xmm0, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32> %a0) ; <<2 x double>> [#uses=1]
@@ -57,7 +57,7 @@ declare <2 x double> @llvm.x86.sse2.cvtd
 
 define <2 x double> @test_x86_sse2_cvtps2pd(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_sse2_cvtps2pd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    cvtps2pd %xmm0, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float> %a0) ; <<2 x double>> [#uses=1]
@@ -68,7 +68,7 @@ declare <2 x double> @llvm.x86.sse2.cvtp
 
 define void @test_x86_sse2_storel_dq(i8* %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_sse2_storel_dq:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movlps %xmm0, (%eax)
 ; CHECK-NEXT:    retl
@@ -81,7 +81,7 @@ declare void @llvm.x86.sse2.storel.dq(i8
 define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) {
   ; add operation forces the execution domain.
 ; CHECK-LABEL: test_x86_sse2_storeu_dq:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    pcmpeqd %xmm1, %xmm1
 ; CHECK-NEXT:    psubb %xmm1, %xmm0
@@ -97,7 +97,7 @@ declare void @llvm.x86.sse2.storeu.dq(i8
 define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
   ; fadd operation forces the execution domain.
 ; CHECK-LABEL: test_x86_sse2_storeu_pd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    xorpd %xmm1, %xmm1
 ; CHECK-NEXT:    movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
@@ -112,7 +112,7 @@ declare void @llvm.x86.sse2.storeu.pd(i8
 
 define <4 x i32> @test_x86_sse2_pshuf_d(<4 x i32> %a) {
 ; CHECK-LABEL: test_x86_sse2_pshuf_d:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
 ; CHECK-NEXT:    retl
 entry:
@@ -123,7 +123,7 @@ declare <4 x i32> @llvm.x86.sse2.pshuf.d
 
 define <8 x i16> @test_x86_sse2_pshufl_w(<8 x i16> %a) {
 ; CHECK-LABEL: test_x86_sse2_pshufl_w:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
 ; CHECK-NEXT:    retl
 entry:
@@ -134,7 +134,7 @@ declare <8 x i16> @llvm.x86.sse2.pshufl.
 
 define <8 x i16> @test_x86_sse2_pshufh_w(<8 x i16> %a) {
 ; CHECK-LABEL: test_x86_sse2_pshufh_w:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
 ; CHECK-NEXT:    retl
 entry:
@@ -145,7 +145,7 @@ declare <8 x i16> @llvm.x86.sse2.pshufh.
 
 define <16 x i8> @max_epu8(<16 x i8> %a0, <16 x i8> %a1) {
 ; CHECK-LABEL: max_epu8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pmaxub %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1)
@@ -155,7 +155,7 @@ declare <16 x i8> @llvm.x86.sse2.pmaxu.b
 
 define <16 x i8> @min_epu8(<16 x i8> %a0, <16 x i8> %a1) {
 ; CHECK-LABEL: min_epu8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pminub %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1)
@@ -165,7 +165,7 @@ declare <16 x i8> @llvm.x86.sse2.pminu.b
 
 define <8 x i16> @max_epi16(<8 x i16> %a0, <8 x i16> %a1) {
 ; CHECK-LABEL: max_epi16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pmaxsw %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1)
@@ -175,7 +175,7 @@ declare <8 x i16> @llvm.x86.sse2.pmaxs.w
 
 define <8 x i16> @min_epi16(<8 x i16> %a0, <8 x i16> %a1) {
 ; CHECK-LABEL: min_epi16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pminsw %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1)
@@ -185,21 +185,21 @@ declare <8 x i16> @llvm.x86.sse2.pmins.w
 
 define <2 x double> @test_x86_sse2_add_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_add_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    addsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x58,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_add_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x58,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_add_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x08,0x58,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
 ; CHECK-LABEL: test_x86_sse2_add_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    addsd %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <2 x double> @llvm.x86.sse2.add.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -210,21 +210,21 @@ declare <2 x double> @llvm.x86.sse2.add.
 
 define <2 x double> @test_x86_sse2_sub_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_sub_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    subsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5c,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_sub_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vsubsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5c,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_sub_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x08,0x5c,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
 ; CHECK-LABEL: test_x86_sse2_sub_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    subsd %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <2 x double> @llvm.x86.sse2.sub.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -235,21 +235,21 @@ declare <2 x double> @llvm.x86.sse2.sub.
 
 define <2 x double> @test_x86_sse2_mul_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_mul_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    mulsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x59,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_mul_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x59,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_mul_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x08,0x59,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
 ; CHECK-LABEL: test_x86_sse2_mul_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    mulsd %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <2 x double> @llvm.x86.sse2.mul.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -260,21 +260,21 @@ declare <2 x double> @llvm.x86.sse2.mul.
 
 define <2 x double> @test_x86_sse2_div_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_div_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    divsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5e,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_div_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vdivsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5e,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_div_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x08,0x5e,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
 ; CHECK-LABEL: test_x86_sse2_div_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    divsd %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <2 x double> @llvm.x86.sse2.div.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -284,7 +284,7 @@ declare <2 x double> @llvm.x86.sse2.div.
 
 define <16 x i8> @mm_avg_epu8(<16 x i8> %a0, <16 x i8> %a1) {
 ; CHECK-LABEL: mm_avg_epu8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pavgb %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -294,7 +294,7 @@ declare <16 x i8> @llvm.x86.sse2.pavg.b(
 
 define <8 x i16> @mm_avg_epu16(<8 x i16> %a0, <8 x i16> %a1) {
 ; CHECK-LABEL: mm_avg_epu16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pavgw %xmm1, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86.ll Mon Dec  4 09:18:51 2017
@@ -5,12 +5,12 @@
 
 define <2 x double> @test_x86_sse2_cmp_pd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_cmp_pd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpordpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xc2,0xc1,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse2_cmp_pd:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vcmpordpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc2,0xc1,0x07]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
@@ -21,12 +21,12 @@ declare <2 x double> @llvm.x86.sse2.cmp.
 
 define <2 x double> @test_x86_sse2_cmp_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_cmp_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cmpordsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0xc2,0xc1,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse2_cmp_sd:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vcmpordsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xc2,0xc1,0x07]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
@@ -37,7 +37,7 @@ declare <2 x double> @llvm.x86.sse2.cmp.
 
 define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_comieq_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    comisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2f,0xc1]
 ; SSE-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; SSE-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -46,7 +46,7 @@ define i32 @test_x86_sse2_comieq_sd(<2 x
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_comieq_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
 ; AVX2-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; AVX2-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -55,7 +55,7 @@ define i32 @test_x86_sse2_comieq_sd(<2 x
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_comieq_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
 ; SKX-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; SKX-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -70,21 +70,21 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2
 
 define i32 @test_x86_sse2_comige_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_comige_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2f,0xc1]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_comige_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
 ; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_comige_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
 ; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -97,21 +97,21 @@ declare i32 @llvm.x86.sse2.comige.sd(<2
 
 define i32 @test_x86_sse2_comigt_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_comigt_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2f,0xc1]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_comigt_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
 ; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_comigt_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
 ; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -124,21 +124,21 @@ declare i32 @llvm.x86.sse2.comigt.sd(<2
 
 define i32 @test_x86_sse2_comile_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_comile_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2f,0xc8]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_comile_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vcomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2f,0xc8]
 ; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_comile_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vcomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
 ; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -151,21 +151,21 @@ declare i32 @llvm.x86.sse2.comile.sd(<2
 
 define i32 @test_x86_sse2_comilt_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_comilt_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    comisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2f,0xc8]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_comilt_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vcomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2f,0xc8]
 ; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_comilt_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vcomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
 ; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -178,7 +178,7 @@ declare i32 @llvm.x86.sse2.comilt.sd(<2
 
 define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_comineq_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    comisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2f,0xc1]
 ; SSE-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; SSE-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -187,7 +187,7 @@ define i32 @test_x86_sse2_comineq_sd(<2
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_comineq_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
 ; AVX2-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; AVX2-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -196,7 +196,7 @@ define i32 @test_x86_sse2_comineq_sd(<2
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_comineq_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
 ; SKX-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; SKX-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -211,17 +211,17 @@ declare i32 @llvm.x86.sse2.comineq.sd(<2
 
 define <4 x float> @test_x86_sse2_cvtdq2ps(<4 x i32> %a0) {
 ; SSE-LABEL: test_x86_sse2_cvtdq2ps:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtdq2ps %xmm0, %xmm0 ## encoding: [0x0f,0x5b,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvtdq2ps:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtdq2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5b,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvtdq2ps:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtdq2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5b,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %a0) ; <<4 x float>> [#uses=1]
@@ -232,17 +232,17 @@ declare <4 x float> @llvm.x86.sse2.cvtdq
 
 define <4 x i32> @test_x86_sse2_cvtpd2dq(<2 x double> %a0) {
 ; SSE-LABEL: test_x86_sse2_cvtpd2dq:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtpd2dq %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0xe6,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvtpd2dq:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xe6,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvtpd2dq:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0) ; <<4 x i32>> [#uses=1]
@@ -253,17 +253,17 @@ declare <4 x i32> @llvm.x86.sse2.cvtpd2d
 
 define <2 x i64> @test_mm_cvtpd_epi32_zext(<2 x double> %a0) nounwind {
 ; SSE-LABEL: test_mm_cvtpd_epi32_zext:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtpd2dq %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0xe6,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_mm_cvtpd_epi32_zext:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xe6,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_mm_cvtpd_epi32_zext:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %cvt = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
@@ -275,19 +275,19 @@ define <2 x i64> @test_mm_cvtpd_epi32_ze
 
 define <2 x i64> @test_mm_cvtpd_epi32_zext_load(<2 x double>* %p0) nounwind {
 ; SSE-LABEL: test_mm_cvtpd_epi32_zext_load:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SSE-NEXT:    cvtpd2dq (%eax), %xmm0 ## encoding: [0xf2,0x0f,0xe6,0x00]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_mm_cvtpd_epi32_zext_load:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; AVX2-NEXT:    vcvtpd2dqx (%eax), %xmm0 ## encoding: [0xc5,0xfb,0xe6,0x00]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_mm_cvtpd_epi32_zext_load:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SKX-NEXT:    vcvtpd2dqx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0x00]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
@@ -301,17 +301,17 @@ define <2 x i64> @test_mm_cvtpd_epi32_ze
 
 define <4 x float> @test_x86_sse2_cvtpd2ps(<2 x double> %a0) {
 ; SSE-LABEL: test_x86_sse2_cvtpd2ps:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtpd2ps %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x5a,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvtpd2ps:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5a,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvtpd2ps:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0) ; <<4 x float>> [#uses=1]
@@ -321,17 +321,17 @@ declare <4 x float> @llvm.x86.sse2.cvtpd
 
 define <4 x float> @test_x86_sse2_cvtpd2ps_zext(<2 x double> %a0) nounwind {
 ; SSE-LABEL: test_x86_sse2_cvtpd2ps_zext:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtpd2ps %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x5a,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvtpd2ps_zext:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5a,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvtpd2ps_zext:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtpd2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %cvt = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
@@ -341,19 +341,19 @@ define <4 x float> @test_x86_sse2_cvtpd2
 
 define <4 x float> @test_x86_sse2_cvtpd2ps_zext_load(<2 x double>* %p0) nounwind {
 ; SSE-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SSE-NEXT:    cvtpd2ps (%eax), %xmm0 ## encoding: [0x66,0x0f,0x5a,0x00]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; AVX2-NEXT:    vcvtpd2psx (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x5a,0x00]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvtpd2ps_zext_load:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SKX-NEXT:    vcvtpd2psx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0x00]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
@@ -365,12 +365,12 @@ define <4 x float> @test_x86_sse2_cvtpd2
 
 define <4 x i32> @test_x86_sse2_cvtps2dq(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse2_cvtps2dq:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtps2dq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x5b,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse2_cvtps2dq:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vcvtps2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5b,0xc0]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0) ; <<4 x i32>> [#uses=1]
@@ -381,17 +381,17 @@ declare <4 x i32> @llvm.x86.sse2.cvtps2d
 
 define i32 @test_x86_sse2_cvtsd2si(<2 x double> %a0) {
 ; SSE-LABEL: test_x86_sse2_cvtsd2si:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtsd2si %xmm0, %eax ## encoding: [0xf2,0x0f,0x2d,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvtsd2si:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtsd2si %xmm0, %eax ## encoding: [0xc5,0xfb,0x2d,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvtsd2si:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtsd2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2d,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0) ; <i32> [#uses=1]
@@ -402,12 +402,12 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x
 
 define <4 x float> @test_x86_sse2_cvtsd2ss(<4 x float> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_cvtsd2ss:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtsd2ss %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5a,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse2_cvtsd2ss:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vcvtsd2ss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0xc1]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1) ; <<4 x float>> [#uses=1]
@@ -418,13 +418,13 @@ declare <4 x float> @llvm.x86.sse2.cvtsd
 
 define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, <2 x double>* %p1) {
 ; SSE-LABEL: test_x86_sse2_cvtsd2ss_load:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SSE-NEXT:    cvtsd2ss (%eax), %xmm0 ## encoding: [0xf2,0x0f,0x5a,0x00]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse2_cvtsd2ss_load:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; VCHECK-NEXT:    vcvtsd2ss (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0x00]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
@@ -436,13 +436,13 @@ define <4 x float> @test_x86_sse2_cvtsd2
 
 define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, <2 x double>* %p1) optsize {
 ; SSE-LABEL: test_x86_sse2_cvtsd2ss_load_optsize:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SSE-NEXT:    cvtsd2ss (%eax), %xmm0 ## encoding: [0xf2,0x0f,0x5a,0x00]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse2_cvtsd2ss_load_optsize:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; VCHECK-NEXT:    vcvtsd2ss (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0x00]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
@@ -454,17 +454,17 @@ define <4 x float> @test_x86_sse2_cvtsd2
 
 define <2 x double> @test_x86_sse2_cvtsi2sd(<2 x double> %a0, i32 %a1) {
 ; SSE-LABEL: test_x86_sse2_cvtsi2sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtsi2sdl {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xf2,0x0f,0x2a,0x44,0x24,0x04]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvtsi2sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvtsi2sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> %a0, i32 %a1) ; <<2 x double>> [#uses=1]
@@ -475,12 +475,12 @@ declare <2 x double> @llvm.x86.sse2.cvts
 
 define <2 x double> @test_x86_sse2_cvtss2sd(<2 x double> %a0, <4 x float> %a1) {
 ; SSE-LABEL: test_x86_sse2_cvtss2sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtss2sd %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5a,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse2_cvtss2sd:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5a,0xc1]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> %a0, <4 x float> %a1) ; <<2 x double>> [#uses=1]
@@ -491,13 +491,13 @@ declare <2 x double> @llvm.x86.sse2.cvts
 
 define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, <4 x float>* %p1) {
 ; SSE-LABEL: test_x86_sse2_cvtss2sd_load:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SSE-NEXT:    cvtss2sd (%eax), %xmm0 ## encoding: [0xf3,0x0f,0x5a,0x00]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse2_cvtss2sd_load:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; VCHECK-NEXT:    vcvtss2sd (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5a,0x00]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
@@ -509,13 +509,13 @@ define <2 x double> @test_x86_sse2_cvtss
 
 define <2 x double> @test_x86_sse2_cvtss2sd_load_optsize(<2 x double> %a0, <4 x float>* %p1) optsize {
 ; SSE-LABEL: test_x86_sse2_cvtss2sd_load_optsize:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SSE-NEXT:    cvtss2sd (%eax), %xmm0 ## encoding: [0xf3,0x0f,0x5a,0x00]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse2_cvtss2sd_load_optsize:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; VCHECK-NEXT:    vcvtss2sd (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5a,0x00]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
@@ -527,17 +527,17 @@ define <2 x double> @test_x86_sse2_cvtss
 
 define <4 x i32> @test_x86_sse2_cvttpd2dq(<2 x double> %a0) {
 ; SSE-LABEL: test_x86_sse2_cvttpd2dq:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttpd2dq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xe6,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvttpd2dq:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe6,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvttpd2dq:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0) ; <<4 x i32>> [#uses=1]
@@ -548,17 +548,17 @@ declare <4 x i32> @llvm.x86.sse2.cvttpd2
 
 define <2 x i64> @test_mm_cvttpd_epi32_zext(<2 x double> %a0) nounwind {
 ; SSE-LABEL: test_mm_cvttpd_epi32_zext:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttpd2dq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0xe6,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_mm_cvttpd_epi32_zext:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe6,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_mm_cvttpd_epi32_zext:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvttpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %cvt = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
@@ -570,19 +570,19 @@ define <2 x i64> @test_mm_cvttpd_epi32_z
 
 define <2 x i64> @test_mm_cvttpd_epi32_zext_load(<2 x double>* %p0) nounwind {
 ; SSE-LABEL: test_mm_cvttpd_epi32_zext_load:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SSE-NEXT:    cvttpd2dq (%eax), %xmm0 ## encoding: [0x66,0x0f,0xe6,0x00]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_mm_cvttpd_epi32_zext_load:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; AVX2-NEXT:    vcvttpd2dqx (%eax), %xmm0 ## encoding: [0xc5,0xf9,0xe6,0x00]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_mm_cvttpd_epi32_zext_load:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SKX-NEXT:    vcvttpd2dqx (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0x00]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
@@ -596,17 +596,17 @@ define <2 x i64> @test_mm_cvttpd_epi32_z
 
 define <4 x i32> @test_x86_sse2_cvttps2dq(<4 x float> %a0) {
 ; SSE-LABEL: test_x86_sse2_cvttps2dq:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttps2dq %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x5b,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvttps2dq:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvttps2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5b,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvttps2dq:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvttps2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5b,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %a0) ; <<4 x i32>> [#uses=1]
@@ -617,17 +617,17 @@ declare <4 x i32> @llvm.x86.sse2.cvttps2
 
 define i32 @test_x86_sse2_cvttsd2si(<2 x double> %a0) {
 ; SSE-LABEL: test_x86_sse2_cvttsd2si:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttsd2si %xmm0, %eax ## encoding: [0xf2,0x0f,0x2c,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvttsd2si:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvttsd2si %xmm0, %eax ## encoding: [0xc5,0xfb,0x2c,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvttsd2si:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvttsd2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2c,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0) ; <i32> [#uses=1]
@@ -638,17 +638,17 @@ declare i32 @llvm.x86.sse2.cvttsd2si(<2
 
 define <2 x double> @test_x86_sse2_max_pd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_max_pd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    maxpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x5f,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_max_pd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5f,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_max_pd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5f,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -659,17 +659,17 @@ declare <2 x double> @llvm.x86.sse2.max.
 
 define <2 x double> @test_x86_sse2_max_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_max_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    maxsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5f,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_max_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5f,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_max_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5f,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -680,17 +680,17 @@ declare <2 x double> @llvm.x86.sse2.max.
 
 define <2 x double> @test_x86_sse2_min_pd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_min_pd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    minpd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x5d,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_min_pd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vminpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5d,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_min_pd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vminpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5d,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -701,17 +701,17 @@ declare <2 x double> @llvm.x86.sse2.min.
 
 define <2 x double> @test_x86_sse2_min_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_min_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    minsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5d,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_min_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5d,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_min_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vminsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5d,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -722,12 +722,12 @@ declare <2 x double> @llvm.x86.sse2.min.
 
 define i32 @test_x86_sse2_movmsk_pd(<2 x double> %a0) {
 ; SSE-LABEL: test_x86_sse2_movmsk_pd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movmskpd %xmm0, %eax ## encoding: [0x66,0x0f,0x50,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse2_movmsk_pd:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vmovmskpd %xmm0, %eax ## encoding: [0xc5,0xf9,0x50,0xc0]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0) ; <i32> [#uses=1]
@@ -738,17 +738,17 @@ declare i32 @llvm.x86.sse2.movmsk.pd(<2
 
 define <8 x i16> @test_x86_sse2_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) {
 ; SSE-LABEL: test_x86_sse2_packssdw_128:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    packssdw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x6b,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_packssdw_128:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x6b,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_packssdw_128:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
@@ -759,21 +759,21 @@ declare <8 x i16> @llvm.x86.sse2.packssd
 
 define <8 x i16> @test_x86_sse2_packssdw_128_fold() {
 ; SSE-LABEL: test_x86_sse2_packssdw_128_fold:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
 ; SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
 ; SSE-NEXT:    ## fixup A - offset: 3, value: LCPI35_0, kind: FK_Data_4
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_packssdw_128_fold:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vmovaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
 ; AVX2-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
 ; AVX2-NEXT:    ## fixup A - offset: 4, value: LCPI35_0, kind: FK_Data_4
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_packssdw_128_fold:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vmovaps LCPI35_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,32767,32767,65535,32768]
 ; SKX-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
 ; SKX-NEXT:    ## fixup A - offset: 4, value: LCPI35_0, kind: FK_Data_4
@@ -785,17 +785,17 @@ define <8 x i16> @test_x86_sse2_packssdw
 
 define <16 x i8> @test_x86_sse2_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_packsswb_128:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    packsswb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x63,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_packsswb_128:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x63,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_packsswb_128:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1) ; <<16 x i8>> [#uses=1]
@@ -806,21 +806,21 @@ declare <16 x i8> @llvm.x86.sse2.packssw
 
 define <16 x i8> @test_x86_sse2_packsswb_128_fold() {
 ; SSE-LABEL: test_x86_sse2_packsswb_128_fold:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
 ; SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
 ; SSE-NEXT:    ## fixup A - offset: 3, value: LCPI37_0, kind: FK_Data_4
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_packsswb_128_fold:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vmovaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
 ; AVX2-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
 ; AVX2-NEXT:    ## fixup A - offset: 4, value: LCPI37_0, kind: FK_Data_4
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_packsswb_128_fold:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vmovaps LCPI37_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
 ; SKX-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
 ; SKX-NEXT:    ## fixup A - offset: 4, value: LCPI37_0, kind: FK_Data_4
@@ -832,17 +832,17 @@ define <16 x i8> @test_x86_sse2_packsswb
 
 define <16 x i8> @test_x86_sse2_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_packuswb_128:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    packuswb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x67,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_packuswb_128:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x67,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_packuswb_128:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1) ; <<16 x i8>> [#uses=1]
@@ -853,21 +853,21 @@ declare <16 x i8> @llvm.x86.sse2.packusw
 
 define <16 x i8> @test_x86_sse2_packuswb_128_fold() {
 ; SSE-LABEL: test_x86_sse2_packuswb_128_fold:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
 ; SSE-NEXT:    ## encoding: [0x0f,0x28,0x05,A,A,A,A]
 ; SSE-NEXT:    ## fixup A - offset: 3, value: LCPI39_0, kind: FK_Data_4
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_packuswb_128_fold:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX2-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
 ; AVX2-NEXT:    ## fixup A - offset: 4, value: LCPI39_0, kind: FK_Data_4
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_packuswb_128_fold:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vmovaps LCPI39_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
 ; SKX-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
 ; SKX-NEXT:    ## fixup A - offset: 4, value: LCPI39_0, kind: FK_Data_4
@@ -879,17 +879,17 @@ define <16 x i8> @test_x86_sse2_packuswb
 
 define <16 x i8> @test_x86_sse2_padds_b(<16 x i8> %a0, <16 x i8> %a1) {
 ; SSE-LABEL: test_x86_sse2_padds_b:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    paddsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xec,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_padds_b:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xec,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_padds_b:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -900,17 +900,17 @@ declare <16 x i8> @llvm.x86.sse2.padds.b
 
 define <8 x i16> @test_x86_sse2_padds_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_padds_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    paddsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xed,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_padds_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xed,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_padds_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -921,17 +921,17 @@ declare <8 x i16> @llvm.x86.sse2.padds.w
 
 define <16 x i8> @test_x86_sse2_paddus_b(<16 x i8> %a0, <16 x i8> %a1) {
 ; SSE-LABEL: test_x86_sse2_paddus_b:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    paddusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdc,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_paddus_b:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_paddus_b:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -942,17 +942,17 @@ declare <16 x i8> @llvm.x86.sse2.paddus.
 
 define <8 x i16> @test_x86_sse2_paddus_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_paddus_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    paddusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdd,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_paddus_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_paddus_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -963,17 +963,17 @@ declare <8 x i16> @llvm.x86.sse2.paddus.
 
 define <4 x i32> @test_x86_sse2_pmadd_wd(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_pmadd_wd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmaddwd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf5,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_pmadd_wd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf5,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_pmadd_wd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf5,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1) ; <<4 x i32>> [#uses=1]
@@ -984,17 +984,17 @@ declare <4 x i32> @llvm.x86.sse2.pmadd.w
 
 define <8 x i16> @test_x86_sse2_pmaxs_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_pmaxs_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmaxsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xee,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_pmaxs_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xee,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_pmaxs_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xee,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1005,17 +1005,17 @@ declare <8 x i16> @llvm.x86.sse2.pmaxs.w
 
 define <16 x i8> @test_x86_sse2_pmaxu_b(<16 x i8> %a0, <16 x i8> %a1) {
 ; SSE-LABEL: test_x86_sse2_pmaxu_b:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmaxub %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xde,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_pmaxu_b:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xde,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_pmaxu_b:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xde,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -1026,17 +1026,17 @@ declare <16 x i8> @llvm.x86.sse2.pmaxu.b
 
 define <8 x i16> @test_x86_sse2_pmins_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_pmins_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    pminsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xea,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_pmins_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xea,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_pmins_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xea,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1047,17 +1047,17 @@ declare <8 x i16> @llvm.x86.sse2.pmins.w
 
 define <16 x i8> @test_x86_sse2_pminu_b(<16 x i8> %a0, <16 x i8> %a1) {
 ; SSE-LABEL: test_x86_sse2_pminu_b:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    pminub %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xda,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_pminu_b:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xda,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_pminu_b:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpminub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xda,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -1068,12 +1068,12 @@ declare <16 x i8> @llvm.x86.sse2.pminu.b
 
 define i32 @test_x86_sse2_pmovmskb_128(<16 x i8> %a0) {
 ; SSE-LABEL: test_x86_sse2_pmovmskb_128:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmovmskb %xmm0, %eax ## encoding: [0x66,0x0f,0xd7,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; VCHECK-LABEL: test_x86_sse2_pmovmskb_128:
-; VCHECK:       ## BB#0:
+; VCHECK:       ## %bb.0:
 ; VCHECK-NEXT:    vpmovmskb %xmm0, %eax ## encoding: [0xc5,0xf9,0xd7,0xc0]
 ; VCHECK-NEXT:    retl ## encoding: [0xc3]
   %res = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0) ; <i32> [#uses=1]
@@ -1084,17 +1084,17 @@ declare i32 @llvm.x86.sse2.pmovmskb.128(
 
 define <8 x i16> @test_x86_sse2_pmulh_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_pmulh_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmulhw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe5,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_pmulh_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpmulhw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe5,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_pmulh_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpmulhw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe5,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1105,17 +1105,17 @@ declare <8 x i16> @llvm.x86.sse2.pmulh.w
 
 define <8 x i16> @test_x86_sse2_pmulhu_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_pmulhu_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmulhuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe4,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_pmulhu_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe4,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_pmulhu_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe4,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1126,17 +1126,17 @@ declare <8 x i16> @llvm.x86.sse2.pmulhu.
 
 define <2 x i64> @test_x86_sse2_pmulu_dq(<4 x i32> %a0, <4 x i32> %a1) {
 ; SSE-LABEL: test_x86_sse2_pmulu_dq:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    pmuludq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf4,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_pmulu_dq:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf4,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_pmulu_dq:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf4,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1) ; <<2 x i64>> [#uses=1]
@@ -1147,17 +1147,17 @@ declare <2 x i64> @llvm.x86.sse2.pmulu.d
 
 define <2 x i64> @test_x86_sse2_psad_bw(<16 x i8> %a0, <16 x i8> %a1) {
 ; SSE-LABEL: test_x86_sse2_psad_bw:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psadbw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf6,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psad_bw:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf6,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psad_bw:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf6,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1) ; <<2 x i64>> [#uses=1]
@@ -1168,17 +1168,17 @@ declare <2 x i64> @llvm.x86.sse2.psad.bw
 
 define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) {
 ; SSE-LABEL: test_x86_sse2_psll_d:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    pslld %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf2,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psll_d:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpslld %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf2,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psll_d:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpslld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf2,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1189,17 +1189,17 @@ declare <4 x i32> @llvm.x86.sse2.psll.d(
 
 define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) {
 ; SSE-LABEL: test_x86_sse2_psll_q:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psllq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf3,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psll_q:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsllq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf3,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psll_q:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf3,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1210,17 +1210,17 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(
 
 define <8 x i16> @test_x86_sse2_psll_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_psll_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psllw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf1,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psll_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsllw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf1,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psll_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf1,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1231,17 +1231,17 @@ declare <8 x i16> @llvm.x86.sse2.psll.w(
 
 define <4 x i32> @test_x86_sse2_pslli_d(<4 x i32> %a0) {
 ; SSE-LABEL: test_x86_sse2_pslli_d:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    pslld $7, %xmm0 ## encoding: [0x66,0x0f,0x72,0xf0,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_pslli_d:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpslld $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xf0,0x07]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_pslli_d:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpslld $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x07]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
@@ -1252,17 +1252,17 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d
 
 define <2 x i64> @test_x86_sse2_pslli_q(<2 x i64> %a0) {
 ; SSE-LABEL: test_x86_sse2_pslli_q:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psllq $7, %xmm0 ## encoding: [0x66,0x0f,0x73,0xf0,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_pslli_q:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsllq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xf0,0x07]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_pslli_q:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x07]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
@@ -1273,17 +1273,17 @@ declare <2 x i64> @llvm.x86.sse2.pslli.q
 
 define <8 x i16> @test_x86_sse2_pslli_w(<8 x i16> %a0) {
 ; SSE-LABEL: test_x86_sse2_pslli_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psllw $7, %xmm0 ## encoding: [0x66,0x0f,0x71,0xf0,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_pslli_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsllw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xf0,0x07]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_pslli_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x07]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
@@ -1294,17 +1294,17 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w
 
 define <4 x i32> @test_x86_sse2_psra_d(<4 x i32> %a0, <4 x i32> %a1) {
 ; SSE-LABEL: test_x86_sse2_psra_d:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrad %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe2,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psra_d:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsrad %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe2,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psra_d:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsrad %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe2,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1315,17 +1315,17 @@ declare <4 x i32> @llvm.x86.sse2.psra.d(
 
 define <8 x i16> @test_x86_sse2_psra_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_psra_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psraw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe1,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psra_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsraw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe1,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psra_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsraw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe1,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1336,17 +1336,17 @@ declare <8 x i16> @llvm.x86.sse2.psra.w(
 
 define <4 x i32> @test_x86_sse2_psrai_d(<4 x i32> %a0) {
 ; SSE-LABEL: test_x86_sse2_psrai_d:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrad $7, %xmm0 ## encoding: [0x66,0x0f,0x72,0xe0,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psrai_d:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsrad $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xe0,0x07]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psrai_d:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsrad $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xe0,0x07]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
@@ -1357,17 +1357,17 @@ declare <4 x i32> @llvm.x86.sse2.psrai.d
 
 define <8 x i16> @test_x86_sse2_psrai_w(<8 x i16> %a0) {
 ; SSE-LABEL: test_x86_sse2_psrai_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psraw $7, %xmm0 ## encoding: [0x66,0x0f,0x71,0xe0,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psrai_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsraw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xe0,0x07]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psrai_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsraw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xe0,0x07]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
@@ -1378,17 +1378,17 @@ declare <8 x i16> @llvm.x86.sse2.psrai.w
 
 define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) {
 ; SSE-LABEL: test_x86_sse2_psrl_d:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrld %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd2,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psrl_d:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsrld %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd2,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psrl_d:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsrld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd2,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1399,17 +1399,17 @@ declare <4 x i32> @llvm.x86.sse2.psrl.d(
 
 define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) {
 ; SSE-LABEL: test_x86_sse2_psrl_q:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrlq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd3,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psrl_q:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd3,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psrl_q:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd3,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1420,17 +1420,17 @@ declare <2 x i64> @llvm.x86.sse2.psrl.q(
 
 define <8 x i16> @test_x86_sse2_psrl_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_psrl_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrlw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd1,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psrl_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd1,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psrl_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1441,17 +1441,17 @@ declare <8 x i16> @llvm.x86.sse2.psrl.w(
 
 define <4 x i32> @test_x86_sse2_psrli_d(<4 x i32> %a0) {
 ; SSE-LABEL: test_x86_sse2_psrli_d:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrld $7, %xmm0 ## encoding: [0x66,0x0f,0x72,0xd0,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psrli_d:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsrld $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xd0,0x07]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psrli_d:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsrld $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xd0,0x07]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
@@ -1462,17 +1462,17 @@ declare <4 x i32> @llvm.x86.sse2.psrli.d
 
 define <2 x i64> @test_x86_sse2_psrli_q(<2 x i64> %a0) {
 ; SSE-LABEL: test_x86_sse2_psrli_q:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrlq $7, %xmm0 ## encoding: [0x66,0x0f,0x73,0xd0,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psrli_q:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsrlq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xd0,0x07]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psrli_q:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsrlq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd0,0x07]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
@@ -1483,17 +1483,17 @@ declare <2 x i64> @llvm.x86.sse2.psrli.q
 
 define <8 x i16> @test_x86_sse2_psrli_w(<8 x i16> %a0) {
 ; SSE-LABEL: test_x86_sse2_psrli_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psrlw $7, %xmm0 ## encoding: [0x66,0x0f,0x71,0xd0,0x07]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psrli_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsrlw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xd0,0x07]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psrli_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsrlw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xd0,0x07]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
@@ -1504,17 +1504,17 @@ declare <8 x i16> @llvm.x86.sse2.psrli.w
 
 define <16 x i8> @test_x86_sse2_psubs_b(<16 x i8> %a0, <16 x i8> %a1) {
 ; SSE-LABEL: test_x86_sse2_psubs_b:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psubsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe8,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psubs_b:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe8,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psubs_b:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -1525,17 +1525,17 @@ declare <16 x i8> @llvm.x86.sse2.psubs.b
 
 define <8 x i16> @test_x86_sse2_psubs_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_psubs_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psubsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe9,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psubs_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe9,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psubs_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1546,17 +1546,17 @@ declare <8 x i16> @llvm.x86.sse2.psubs.w
 
 define <16 x i8> @test_x86_sse2_psubus_b(<16 x i8> %a0, <16 x i8> %a1) {
 ; SSE-LABEL: test_x86_sse2_psubus_b:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psubusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd8,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psubus_b:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psubus_b:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -1567,17 +1567,17 @@ declare <16 x i8> @llvm.x86.sse2.psubus.
 
 define <8 x i16> @test_x86_sse2_psubus_w(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE-LABEL: test_x86_sse2_psubus_w:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    psubusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd9,0xc1]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_psubus_w:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_psubus_w:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1588,17 +1588,17 @@ declare <8 x i16> @llvm.x86.sse2.psubus.
 
 define <2 x double> @test_x86_sse2_sqrt_pd(<2 x double> %a0) {
 ; SSE-LABEL: test_x86_sse2_sqrt_pd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    sqrtpd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x51,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_sqrt_pd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vsqrtpd %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x51,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_sqrt_pd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vsqrtpd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x51,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0) ; <<2 x double>> [#uses=1]
@@ -1609,17 +1609,17 @@ declare <2 x double> @llvm.x86.sse2.sqrt
 
 define <2 x double> @test_x86_sse2_sqrt_sd(<2 x double> %a0) {
 ; SSE-LABEL: test_x86_sse2_sqrt_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_sqrt_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_sqrt_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
 ; SKX-NEXT:    retl ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0) ; <<2 x double>> [#uses=1]
@@ -1630,21 +1630,21 @@ declare <2 x double> @llvm.x86.sse2.sqrt
 
 define <2 x double> @test_x86_sse2_sqrt_sd_vec_load(<2 x double>* %a0) {
 ; SSE-LABEL: test_x86_sse2_sqrt_sd_vec_load:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SSE-NEXT:    movapd (%eax), %xmm0 ## encoding: [0x66,0x0f,0x28,0x00]
 ; SSE-NEXT:    sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_sqrt_sd_vec_load:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; AVX2-NEXT:    vmovapd (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x28,0x00]
 ; AVX2-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_sqrt_sd_vec_load:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; SKX-NEXT:    vmovapd (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x00]
 ; SKX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
@@ -1657,7 +1657,7 @@ define <2 x double> @test_x86_sse2_sqrt_
 
 define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_ucomieq_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    ucomisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2e,0xc1]
 ; SSE-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; SSE-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -1666,7 +1666,7 @@ define i32 @test_x86_sse2_ucomieq_sd(<2
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_ucomieq_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
 ; AVX2-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; AVX2-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -1675,7 +1675,7 @@ define i32 @test_x86_sse2_ucomieq_sd(<2
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_ucomieq_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
 ; SKX-NEXT:    setnp %al ## encoding: [0x0f,0x9b,0xc0]
 ; SKX-NEXT:    sete %cl ## encoding: [0x0f,0x94,0xc1]
@@ -1690,21 +1690,21 @@ declare i32 @llvm.x86.sse2.ucomieq.sd(<2
 
 define i32 @test_x86_sse2_ucomige_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_ucomige_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2e,0xc1]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_ucomige_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
 ; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_ucomige_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
 ; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -1717,21 +1717,21 @@ declare i32 @llvm.x86.sse2.ucomige.sd(<2
 
 define i32 @test_x86_sse2_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_ucomigt_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2e,0xc1]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_ucomigt_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
 ; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_ucomigt_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
 ; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -1744,21 +1744,21 @@ declare i32 @llvm.x86.sse2.ucomigt.sd(<2
 
 define i32 @test_x86_sse2_ucomile_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_ucomile_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2e,0xc8]
 ; SSE-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_ucomile_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vucomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2e,0xc8]
 ; AVX2-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_ucomile_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vucomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
 ; SKX-NEXT:    setae %al ## encoding: [0x0f,0x93,0xc0]
@@ -1771,21 +1771,21 @@ declare i32 @llvm.x86.sse2.ucomile.sd(<2
 
 define i32 @test_x86_sse2_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_ucomilt_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SSE-NEXT:    ucomisd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x2e,0xc8]
 ; SSE-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_ucomilt_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; AVX2-NEXT:    vucomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2e,0xc8]
 ; AVX2-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_ucomilt_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; SKX-NEXT:    vucomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
 ; SKX-NEXT:    seta %al ## encoding: [0x0f,0x97,0xc0]
@@ -1798,7 +1798,7 @@ declare i32 @llvm.x86.sse2.ucomilt.sd(<2
 
 define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_x86_sse2_ucomineq_sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    ucomisd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x2e,0xc1]
 ; SSE-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; SSE-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -1807,7 +1807,7 @@ define i32 @test_x86_sse2_ucomineq_sd(<2
 ; SSE-NEXT:    retl ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_ucomineq_sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
 ; AVX2-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; AVX2-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -1816,7 +1816,7 @@ define i32 @test_x86_sse2_ucomineq_sd(<2
 ; AVX2-NEXT:    retl ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_ucomineq_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
 ; SKX-NEXT:    setp %al ## encoding: [0x0f,0x9a,0xc0]
 ; SKX-NEXT:    setne %cl ## encoding: [0x0f,0x95,0xc1]
@@ -1830,7 +1830,7 @@ declare i32 @llvm.x86.sse2.ucomineq.sd(<
 
 define void @test_x86_sse2_pause() {
 ; CHECK-LABEL: test_x86_sse2_pause:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pause ## encoding: [0xf3,0x90]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
   tail call void @llvm.x86.sse2.pause()
@@ -1840,7 +1840,7 @@ declare void @llvm.x86.sse2.pause() noun
 
 define void @lfence() nounwind {
 ; CHECK-LABEL: lfence:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    lfence ## encoding: [0x0f,0xae,0xe8]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
   tail call void @llvm.x86.sse2.lfence()
@@ -1850,7 +1850,7 @@ declare void @llvm.x86.sse2.lfence() nou
 
 define void @mfence() nounwind {
 ; CHECK-LABEL: mfence:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    mfence ## encoding: [0x0f,0xae,0xf0]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
   tail call void @llvm.x86.sse2.mfence()
@@ -1860,7 +1860,7 @@ declare void @llvm.x86.sse2.mfence() nou
 
 define void @clflush(i8* %p) nounwind {
 ; CHECK-LABEL: clflush:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
 ; CHECK-NEXT:    clflush (%eax) ## encoding: [0x0f,0xae,0x38]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64.ll Mon Dec  4 09:18:51 2017
@@ -5,21 +5,21 @@
 
 define i64 @test_x86_sse2_cvtsd2si64(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_sse2_cvtsd2si64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsd2si %xmm0, %rax
 ; CHECK-NEXT:    retq
 ; SSE-LABEL: test_x86_sse2_cvtsd2si64:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtsd2si %xmm0, %rax ## encoding: [0xf2,0x48,0x0f,0x2d,0xc0]
 ; SSE-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvtsd2si64:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtsd2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfb,0x2d,0xc0]
 ; AVX2-NEXT:    retq ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvtsd2si64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtsd2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2d,0xc0]
 ; SKX-NEXT:    retq ## encoding: [0xc3]
   %res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
@@ -30,21 +30,21 @@ declare i64 @llvm.x86.sse2.cvtsd2si64(<2
 
 define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
 ; CHECK-LABEL: test_x86_sse2_cvtsi642sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 ; SSE-LABEL: test_x86_sse2_cvtsi642sd:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvtsi2sdq %rdi, %xmm0 ## encoding: [0xf2,0x48,0x0f,0x2a,0xc7]
 ; SSE-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvtsi642sd:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
 ; AVX2-NEXT:    retq ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvtsi642sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
 ; SKX-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
@@ -55,21 +55,21 @@ declare <2 x double> @llvm.x86.sse2.cvts
 
 define i64 @test_x86_sse2_cvttsd2si64(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_sse2_cvttsd2si64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvttsd2si %xmm0, %rax
 ; CHECK-NEXT:    retq
 ; SSE-LABEL: test_x86_sse2_cvttsd2si64:
-; SSE:       ## BB#0:
+; SSE:       ## %bb.0:
 ; SSE-NEXT:    cvttsd2si %xmm0, %rax ## encoding: [0xf2,0x48,0x0f,0x2c,0xc0]
 ; SSE-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX2-LABEL: test_x86_sse2_cvttsd2si64:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vcvttsd2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfb,0x2c,0xc0]
 ; AVX2-NEXT:    retq ## encoding: [0xc3]
 ;
 ; SKX-LABEL: test_x86_sse2_cvttsd2si64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vcvttsd2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2c,0xc0]
 ; SKX-NEXT:    retq ## encoding: [0xc3]
   %res = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0) ; <i64> [#uses=1]




More information about the llvm-commits mailing list