[llvm] r319665 - [CodeGen] Unify MBB reference format in both MIR and debug output

Francis Visoiu Mistrih via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 4 09:18:56 PST 2017


Modified: llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define <16 x float> @test1(<16 x float> %x, float* %br, float %y) nounwind {
 ; CHECK-LABEL: test1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vinsertps {{.*#+}} xmm2 = xmm0[0],mem[0],xmm0[2,3]
 ; CHECK-NEXT:    vinsertf32x4 $0, %xmm2, %zmm0, %zmm2
 ; CHECK-NEXT:    vextractf32x4 $3, %zmm0, %xmm0
@@ -20,7 +20,7 @@ define <16 x float> @test1(<16 x float>
 
 define <8 x double> @test2(<8 x double> %x, double* %br, double %y) nounwind {
 ; CHECK-LABEL: test2:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovhpd {{.*#+}} xmm2 = xmm0[0],mem[0]
 ; CHECK-NEXT:    vinsertf32x4 $0, %xmm2, %zmm0, %zmm2
 ; CHECK-NEXT:    vextractf32x4 $3, %zmm0, %xmm0
@@ -35,7 +35,7 @@ define <8 x double> @test2(<8 x double>
 
 define <16 x float> @test3(<16 x float> %x) nounwind {
 ; CHECK-LABEL: test3:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vinsertps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[2,3]
 ; CHECK-NEXT:    vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
@@ -47,7 +47,7 @@ define <16 x float> @test3(<16 x float>
 
 define <8 x i64> @test4(<8 x i64> %x) nounwind {
 ; CHECK-LABEL: test4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
 ; CHECK-NEXT:    vmovq %xmm1, %rax
 ; CHECK-NEXT:    vpinsrq $1, %rax, %xmm0, %xmm1
@@ -60,7 +60,7 @@ define <8 x i64> @test4(<8 x i64> %x) no
 
 define i32 @test5(<4 x float> %x) nounwind {
 ; CHECK-LABEL: test5:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractps $3, %xmm0, %eax
 ; CHECK-NEXT:    retq
   %ef = extractelement <4 x float> %x, i32 3
@@ -70,7 +70,7 @@ define i32 @test5(<4 x float> %x) nounwi
 
 define void @test6(<4 x float> %x, float* %out) nounwind {
 ; CHECK-LABEL: test6:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractps $3, %xmm0, (%rdi)
 ; CHECK-NEXT:    retq
   %ef = extractelement <4 x float> %x, i32 3
@@ -80,7 +80,7 @@ define void @test6(<4 x float> %x, float
 
 define float @test7(<16 x float> %x, i32 %ind) nounwind {
 ; CHECK-LABEL: test7:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    movq %rsp, %rbp
 ; CHECK-NEXT:    andq $-64, %rsp
@@ -99,7 +99,7 @@ define float @test7(<16 x float> %x, i32
 
 define double @test8(<8 x double> %x, i32 %ind) nounwind {
 ; CHECK-LABEL: test8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    movq %rsp, %rbp
 ; CHECK-NEXT:    andq $-64, %rsp
@@ -118,7 +118,7 @@ define double @test8(<8 x double> %x, i3
 
 define float @test9(<8 x float> %x, i32 %ind) nounwind {
 ; CHECK-LABEL: test9:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    movq %rsp, %rbp
 ; CHECK-NEXT:    andq $-32, %rsp
@@ -137,7 +137,7 @@ define float @test9(<8 x float> %x, i32
 
 define i32 @test10(<16 x i32> %x, i32 %ind) nounwind {
 ; CHECK-LABEL: test10:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    movq %rsp, %rbp
 ; CHECK-NEXT:    andq $-64, %rsp
@@ -156,14 +156,14 @@ define i32 @test10(<16 x i32> %x, i32 %i
 
 define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
 ; KNL-LABEL: test11:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
 ; KNL-NEXT:    kshiftlw $11, %k0, %k0
 ; KNL-NEXT:    kshiftrw $15, %k0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    testb $1, %al
 ; KNL-NEXT:    je LBB10_2
-; KNL-NEXT:  ## BB#1: ## %A
+; KNL-NEXT:  ## %bb.1: ## %A
 ; KNL-NEXT:    vmovdqa64 %zmm1, %zmm0
 ; KNL-NEXT:    retq
 ; KNL-NEXT:  LBB10_2: ## %B
@@ -171,14 +171,14 @@ define <16 x i32> @test11(<16 x i32>%a,
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test11:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpltud %zmm1, %zmm0, %k0
 ; SKX-NEXT:    kshiftlw $11, %k0, %k0
 ; SKX-NEXT:    kshiftrw $15, %k0, %k0
 ; SKX-NEXT:    kmovd %k0, %eax
 ; SKX-NEXT:    testb $1, %al
 ; SKX-NEXT:    je LBB10_2
-; SKX-NEXT:  ## BB#1: ## %A
+; SKX-NEXT:  ## %bb.1: ## %A
 ; SKX-NEXT:    vmovdqa64 %zmm1, %zmm0
 ; SKX-NEXT:    retq
 ; SKX-NEXT:  LBB10_2: ## %B
@@ -196,7 +196,7 @@ define <16 x i32> @test11(<16 x i32>%a,
 
 define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
 ; KNL-LABEL: test12:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpcmpgtq %zmm0, %zmm2, %k0
 ; KNL-NEXT:    kshiftlw $15, %k0, %k0
 ; KNL-NEXT:    kshiftrw $15, %k0, %k0
@@ -208,7 +208,7 @@ define i64 @test12(<16 x i64>%a, <16 x i
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test12:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpgtq %zmm0, %zmm2, %k0
 ; SKX-NEXT:    kshiftlb $7, %k0, %k0
 ; SKX-NEXT:    kshiftrb $7, %k0, %k0
@@ -226,7 +226,7 @@ define i64 @test12(<16 x i64>%a, <16 x i
 
 define i16 @test13(i32 %a, i32 %b) {
 ; KNL-LABEL: test13:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    cmpl %esi, %edi
 ; KNL-NEXT:    setb %al
 ; KNL-NEXT:    movw $-4, %cx
@@ -241,7 +241,7 @@ define i16 @test13(i32 %a, i32 %b) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test13:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    cmpl %esi, %edi
 ; SKX-NEXT:    setb %al
 ; SKX-NEXT:    movw $-4, %cx
@@ -262,7 +262,7 @@ define i16 @test13(i32 %a, i32 %b) {
 
 define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
 ; KNL-LABEL: test14:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpcmpgtq %zmm0, %zmm1, %k0
 ; KNL-NEXT:    kshiftlw $11, %k0, %k0
 ; KNL-NEXT:    kshiftrw $15, %k0, %k0
@@ -274,7 +274,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test14:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpgtq %zmm0, %zmm1, %k0
 ; SKX-NEXT:    kshiftlb $3, %k0, %k0
 ; SKX-NEXT:    kshiftrb $7, %k0, %k0
@@ -292,7 +292,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64
 
 define i16 @test15(i1 *%addr) {
 ; CHECK-LABEL: test15:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movb (%rdi), %al
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    testb %al, %al
@@ -307,7 +307,7 @@ define i16 @test15(i1 *%addr) {
 
 define i16 @test16(i1 *%addr, i16 %a) {
 ; KNL-LABEL: test16:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    movb (%rdi), %al
 ; KNL-NEXT:    kmovw %esi, %k1
 ; KNL-NEXT:    kmovw %eax, %k2
@@ -323,7 +323,7 @@ define i16 @test16(i1 *%addr, i16 %a) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test16:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovb (%rdi), %k0
 ; SKX-NEXT:    kmovd %esi, %k1
 ; SKX-NEXT:    vpmovm2d %k0, %zmm0
@@ -344,7 +344,7 @@ define i16 @test16(i1 *%addr, i16 %a) {
 
 define i8 @test17(i1 *%addr, i8 %a) {
 ; KNL-LABEL: test17:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    movb (%rdi), %al
 ; KNL-NEXT:    kmovw %esi, %k1
 ; KNL-NEXT:    kmovw %eax, %k2
@@ -360,7 +360,7 @@ define i8 @test17(i1 *%addr, i8 %a) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test17:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovb (%rdi), %k0
 ; SKX-NEXT:    kmovd %esi, %k1
 ; SKX-NEXT:    vpmovm2q %k0, %zmm0
@@ -381,7 +381,7 @@ define i8 @test17(i1 *%addr, i8 %a) {
 
 define i64 @extract_v8i64(<8 x i64> %x, i64* %dst) {
 ; CHECK-LABEL: extract_v8i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpextrq $1, %xmm0, %rax
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpextrq $1, %xmm0, (%rdi)
@@ -395,7 +395,7 @@ define i64 @extract_v8i64(<8 x i64> %x,
 
 define i64 @extract_v4i64(<4 x i64> %x, i64* %dst) {
 ; CHECK-LABEL: extract_v4i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpextrq $1, %xmm0, %rax
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpextrq $1, %xmm0, (%rdi)
@@ -409,7 +409,7 @@ define i64 @extract_v4i64(<4 x i64> %x,
 
 define i64 @extract_v2i64(<2 x i64> %x, i64* %dst) {
 ; CHECK-LABEL: extract_v2i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovq %xmm0, %rax
 ; CHECK-NEXT:    vpextrq $1, %xmm0, (%rdi)
 ; CHECK-NEXT:    retq
@@ -421,7 +421,7 @@ define i64 @extract_v2i64(<2 x i64> %x,
 
 define i32 @extract_v16i32(<16 x i32> %x, i32* %dst) {
 ; CHECK-LABEL: extract_v16i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractps $1, %xmm0, %eax
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vextractps $1, %xmm0, (%rdi)
@@ -435,7 +435,7 @@ define i32 @extract_v16i32(<16 x i32> %x
 
 define i32 @extract_v8i32(<8 x i32> %x, i32* %dst) {
 ; CHECK-LABEL: extract_v8i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractps $1, %xmm0, %eax
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vextractps $1, %xmm0, (%rdi)
@@ -449,7 +449,7 @@ define i32 @extract_v8i32(<8 x i32> %x,
 
 define i32 @extract_v4i32(<4 x i32> %x, i32* %dst) {
 ; CHECK-LABEL: extract_v4i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractps $1, %xmm0, %eax
 ; CHECK-NEXT:    vextractps $3, %xmm0, (%rdi)
 ; CHECK-NEXT:    retq
@@ -461,7 +461,7 @@ define i32 @extract_v4i32(<4 x i32> %x,
 
 define i16 @extract_v32i16(<32 x i16> %x, i16* %dst) {
 ; CHECK-LABEL: extract_v32i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpextrw $1, %xmm0, %eax
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpextrw $1, %xmm0, (%rdi)
@@ -476,7 +476,7 @@ define i16 @extract_v32i16(<32 x i16> %x
 
 define i16 @extract_v16i16(<16 x i16> %x, i16* %dst) {
 ; CHECK-LABEL: extract_v16i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpextrw $1, %xmm0, %eax
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpextrw $1, %xmm0, (%rdi)
@@ -491,7 +491,7 @@ define i16 @extract_v16i16(<16 x i16> %x
 
 define i16 @extract_v8i16(<8 x i16> %x, i16* %dst) {
 ; CHECK-LABEL: extract_v8i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpextrw $1, %xmm0, %eax
 ; CHECK-NEXT:    vpextrw $3, %xmm0, (%rdi)
 ; CHECK-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -504,7 +504,7 @@ define i16 @extract_v8i16(<8 x i16> %x,
 
 define i8 @extract_v64i8(<64 x i8> %x, i8* %dst) {
 ; CHECK-LABEL: extract_v64i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpextrb $1, %xmm0, %eax
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpextrb $1, %xmm0, (%rdi)
@@ -519,7 +519,7 @@ define i8 @extract_v64i8(<64 x i8> %x, i
 
 define i8 @extract_v32i8(<32 x i8> %x, i8* %dst) {
 ; CHECK-LABEL: extract_v32i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpextrb $1, %xmm0, %eax
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpextrb $1, %xmm0, (%rdi)
@@ -534,7 +534,7 @@ define i8 @extract_v32i8(<32 x i8> %x, i
 
 define i8 @extract_v16i8(<16 x i8> %x, i8* %dst) {
 ; CHECK-LABEL: extract_v16i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpextrb $1, %xmm0, %eax
 ; CHECK-NEXT:    vpextrb $3, %xmm0, (%rdi)
 ; CHECK-NEXT:    ## kill: %al<def> %al<kill> %eax<kill>
@@ -547,7 +547,7 @@ define i8 @extract_v16i8(<16 x i8> %x, i
 
 define <8 x i64> @insert_v8i64(<8 x i64> %x, i64 %y , i64* %ptr) {
 ; CHECK-LABEL: insert_v8i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrq $1, (%rsi), %xmm0, %xmm1
 ; CHECK-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm1
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -562,7 +562,7 @@ define <8 x i64> @insert_v8i64(<8 x i64>
 
 define <4 x i64> @insert_v4i64(<4 x i64> %x, i64 %y , i64* %ptr) {
 ; CHECK-LABEL: insert_v4i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrq $1, (%rsi), %xmm0, %xmm1
 ; CHECK-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -577,7 +577,7 @@ define <4 x i64> @insert_v4i64(<4 x i64>
 
 define <2 x i64> @insert_v2i64(<2 x i64> %x, i64 %y , i64* %ptr) {
 ; CHECK-LABEL: insert_v2i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrq $0, %rdi, %xmm0, %xmm0
 ; CHECK-NEXT:    vpinsrq $1, (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -589,7 +589,7 @@ define <2 x i64> @insert_v2i64(<2 x i64>
 
 define <16 x i32> @insert_v16i32(<16 x i32> %x, i32 %y, i32* %ptr) {
 ; CHECK-LABEL: insert_v16i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrd $1, (%rsi), %xmm0, %xmm1
 ; CHECK-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm1
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -604,7 +604,7 @@ define <16 x i32> @insert_v16i32(<16 x i
 
 define <8 x i32> @insert_v8i32(<8 x i32> %x, i32 %y, i32* %ptr) {
 ; CHECK-LABEL: insert_v8i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrd $1, (%rsi), %xmm0, %xmm1
 ; CHECK-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -619,7 +619,7 @@ define <8 x i32> @insert_v8i32(<8 x i32>
 
 define <4 x i32> @insert_v4i32(<4 x i32> %x, i32 %y, i32* %ptr) {
 ; CHECK-LABEL: insert_v4i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrd $1, (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -631,7 +631,7 @@ define <4 x i32> @insert_v4i32(<4 x i32>
 
 define <32 x i16> @insert_v32i16(<32 x i16> %x, i16 %y, i16* %ptr) {
 ; KNL-LABEL: insert_v32i16:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm2
 ; KNL-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
 ; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -640,7 +640,7 @@ define <32 x i16> @insert_v32i16(<32 x i
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: insert_v32i16:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm1
 ; SKX-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm1
 ; SKX-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -655,7 +655,7 @@ define <32 x i16> @insert_v32i16(<32 x i
 
 define <16 x i16> @insert_v16i16(<16 x i16> %x, i16 %y, i16* %ptr) {
 ; CHECK-LABEL: insert_v16i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm1
 ; CHECK-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -670,7 +670,7 @@ define <16 x i16> @insert_v16i16(<16 x i
 
 define <8 x i16> @insert_v8i16(<8 x i16> %x, i16 %y, i16* %ptr) {
 ; CHECK-LABEL: insert_v8i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -682,7 +682,7 @@ define <8 x i16> @insert_v8i16(<8 x i16>
 
 define <64 x i8> @insert_v64i8(<64 x i8> %x, i8 %y, i8* %ptr) {
 ; KNL-LABEL: insert_v64i8:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpinsrb $1, (%rsi), %xmm0, %xmm2
 ; KNL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
 ; KNL-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -691,7 +691,7 @@ define <64 x i8> @insert_v64i8(<64 x i8>
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: insert_v64i8:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpinsrb $1, (%rsi), %xmm0, %xmm1
 ; SKX-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm1
 ; SKX-NEXT:    vextracti32x4 $3, %zmm0, %xmm0
@@ -706,7 +706,7 @@ define <64 x i8> @insert_v64i8(<64 x i8>
 
 define <32 x i8> @insert_v32i8(<32 x i8> %x, i8 %y, i8* %ptr) {
 ; CHECK-LABEL: insert_v32i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrb $1, (%rsi), %xmm0, %xmm1
 ; CHECK-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -721,7 +721,7 @@ define <32 x i8> @insert_v32i8(<32 x i8>
 
 define <16 x i8> @insert_v16i8(<16 x i8> %x, i8 %y, i8* %ptr) {
 ; CHECK-LABEL: insert_v16i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrb $3, (%rsi), %xmm0, %xmm0
 ; CHECK-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -733,7 +733,7 @@ define <16 x i8> @insert_v16i8(<16 x i8>
 
 define <8 x i64> @test_insert_128_v8i64(<8 x i64> %x, i64 %y) {
 ; CHECK-LABEL: test_insert_128_v8i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm1
 ; CHECK-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
@@ -743,7 +743,7 @@ define <8 x i64> @test_insert_128_v8i64(
 
 define <16 x i32> @test_insert_128_v16i32(<16 x i32> %x, i32 %y) {
 ; CHECK-LABEL: test_insert_128_v16i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm1
 ; CHECK-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
@@ -753,7 +753,7 @@ define <16 x i32> @test_insert_128_v16i3
 
 define <8 x double> @test_insert_128_v8f64(<8 x double> %x, double %y) {
 ; CHECK-LABEL: test_insert_128_v8f64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm1[0]
 ; CHECK-NEXT:    vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
@@ -763,7 +763,7 @@ define <8 x double> @test_insert_128_v8f
 
 define <16 x float> @test_insert_128_v16f32(<16 x float> %x, float %y) {
 ; CHECK-LABEL: test_insert_128_v16f32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vinsertps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[2,3]
 ; CHECK-NEXT:    vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
@@ -773,7 +773,7 @@ define <16 x float> @test_insert_128_v16
 
 define <16 x i16> @test_insert_128_v16i16(<16 x i16> %x, i16 %y) {
 ; CHECK-LABEL: test_insert_128_v16i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vpinsrw $2, %edi, %xmm1, %xmm1
 ; CHECK-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -784,7 +784,7 @@ define <16 x i16> @test_insert_128_v16i1
 
 define <32 x i8> @test_insert_128_v32i8(<32 x i8> %x, i8 %y) {
 ; CHECK-LABEL: test_insert_128_v32i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vpinsrb $4, %edi, %xmm1, %xmm1
 ; CHECK-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -795,7 +795,7 @@ define <32 x i8> @test_insert_128_v32i8(
 
 define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32> %y) {
 ; KNL-LABEL: test_insertelement_v32i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    pushq %rbp
 ; KNL-NEXT:    .cfi_def_cfa_offset 16
 ; KNL-NEXT:    .cfi_offset %rbp, -16
@@ -956,7 +956,7 @@ define i32 @test_insertelement_v32i1(i32
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_insertelement_v32i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    cmpl %esi, %edi
 ; SKX-NEXT:    setb %al
 ; SKX-NEXT:    vpcmpltud %zmm2, %zmm0, %k0
@@ -980,7 +980,7 @@ define i32 @test_insertelement_v32i1(i32
 
 define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y) {
 ; KNL-LABEL: test_iinsertelement_v4i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    cmpl %esi, %edi
 ; KNL-NEXT:    setb %al
 ; KNL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
@@ -1018,7 +1018,7 @@ define i8 @test_iinsertelement_v4i1(i32
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_iinsertelement_v4i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    cmpl %esi, %edi
 ; SKX-NEXT:    setb %al
 ; SKX-NEXT:    vpcmpltud %xmm1, %xmm0, %k0
@@ -1041,7 +1041,7 @@ define i8 @test_iinsertelement_v4i1(i32
 
 define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y) {
 ; KNL-LABEL: test_iinsertelement_v2i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    cmpl %esi, %edi
 ; KNL-NEXT:    setb %al
 ; KNL-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
@@ -1063,7 +1063,7 @@ define i8 @test_iinsertelement_v2i1(i32
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_iinsertelement_v2i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    cmpl %esi, %edi
 ; SKX-NEXT:    setb %al
 ; SKX-NEXT:    vpcmpltuq %xmm1, %xmm0, %k0
@@ -1085,7 +1085,7 @@ define i8 @test_iinsertelement_v2i1(i32
 
 define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
 ; KNL-LABEL: test_extractelement_v2i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; KNL-NEXT:    vpxor %xmm2, %xmm1, %xmm1
 ; KNL-NEXT:    vpxor %xmm2, %xmm0, %xmm0
@@ -1098,7 +1098,7 @@ define zeroext i8 @test_extractelement_v
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_v2i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpnleuq %xmm1, %xmm0, %k0
 ; SKX-NEXT:    kshiftlw $15, %k0, %k0
 ; SKX-NEXT:    kshiftrw $15, %k0, %k0
@@ -1116,7 +1116,7 @@ define zeroext i8 @test_extractelement_v
 
 define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
 ; KNL-LABEL: extractelement_v2i1_alt:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; KNL-NEXT:    vpxor %xmm2, %xmm1, %xmm1
 ; KNL-NEXT:    vpxor %xmm2, %xmm0, %xmm0
@@ -1129,7 +1129,7 @@ define zeroext i8 @extractelement_v2i1_a
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: extractelement_v2i1_alt:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpnleuq %xmm1, %xmm0, %k0
 ; SKX-NEXT:    kshiftlw $15, %k0, %k0
 ; SKX-NEXT:    kshiftrw $15, %k0, %k0
@@ -1148,7 +1148,7 @@ define zeroext i8 @extractelement_v2i1_a
 
 define zeroext i8 @test_extractelement_v4i1(<4 x i32> %a, <4 x i32> %b) {
 ; KNL-LABEL: test_extractelement_v4i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; KNL-NEXT:    vpxor %xmm2, %xmm1, %xmm1
 ; KNL-NEXT:    vpxor %xmm2, %xmm0, %xmm0
@@ -1158,7 +1158,7 @@ define zeroext i8 @test_extractelement_v
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_v4i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpnleud %xmm1, %xmm0, %k0
 ; SKX-NEXT:    kshiftlw $12, %k0, %k0
 ; SKX-NEXT:    kshiftrw $15, %k0, %k0
@@ -1173,7 +1173,7 @@ define zeroext i8 @test_extractelement_v
 
 define zeroext i8 @test_extractelement_v32i1(<32 x i8> %a, <32 x i8> %b) {
 ; KNL-LABEL: test_extractelement_v32i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; KNL-NEXT:    vpxor %ymm2, %ymm1, %ymm1
 ; KNL-NEXT:    vpxor %ymm2, %ymm0, %ymm0
@@ -1184,7 +1184,7 @@ define zeroext i8 @test_extractelement_v
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_v32i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpnleub %ymm1, %ymm0, %k0
 ; SKX-NEXT:    kshiftld $29, %k0, %k0
 ; SKX-NEXT:    kshiftrd $31, %k0, %k0
@@ -1200,7 +1200,7 @@ define zeroext i8 @test_extractelement_v
 
 define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) {
 ; KNL-LABEL: test_extractelement_v64i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; KNL-NEXT:    vpxor %ymm0, %ymm3, %ymm2
 ; KNL-NEXT:    vpxor %ymm0, %ymm1, %ymm0
@@ -1215,7 +1215,7 @@ define zeroext i8 @test_extractelement_v
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_v64i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpnleub %zmm1, %zmm0, %k0
 ; SKX-NEXT:    kshiftrq $63, %k0, %k0
 ; SKX-NEXT:    kmovd %k0, %eax
@@ -1233,7 +1233,7 @@ define zeroext i8 @test_extractelement_v
 
 define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) {
 ; KNL-LABEL: extractelement_v64i1_alt:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; KNL-NEXT:    vpxor %ymm0, %ymm3, %ymm2
 ; KNL-NEXT:    vpxor %ymm0, %ymm1, %ymm0
@@ -1248,7 +1248,7 @@ define zeroext i8 @extractelement_v64i1_
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: extractelement_v64i1_alt:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpnleub %zmm1, %zmm0, %k0
 ; SKX-NEXT:    kshiftrq $63, %k0, %k0
 ; SKX-NEXT:    kmovd %k0, %eax
@@ -1267,7 +1267,7 @@ define zeroext i8 @extractelement_v64i1_
 
 define i64 @test_extractelement_variable_v2i64(<2 x i64> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v2i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    andl $1, %edi
@@ -1279,7 +1279,7 @@ define i64 @test_extractelement_variable
 
 define i64 @test_extractelement_variable_v4i64(<4 x i64> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v4i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbp, -16
@@ -1301,7 +1301,7 @@ define i64 @test_extractelement_variable
 
 define i64 @test_extractelement_variable_v8i64(<8 x i64> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v8i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbp, -16
@@ -1323,7 +1323,7 @@ define i64 @test_extractelement_variable
 
 define double @test_extractelement_variable_v2f64(<2 x double> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v2f64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    andl $1, %edi
@@ -1335,7 +1335,7 @@ define double @test_extractelement_varia
 
 define double @test_extractelement_variable_v4f64(<4 x double> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v4f64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbp, -16
@@ -1357,7 +1357,7 @@ define double @test_extractelement_varia
 
 define double @test_extractelement_variable_v8f64(<8 x double> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v8f64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbp, -16
@@ -1379,7 +1379,7 @@ define double @test_extractelement_varia
 
 define i32 @test_extractelement_variable_v4i32(<4 x i32> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v4i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    andl $3, %edi
@@ -1391,7 +1391,7 @@ define i32 @test_extractelement_variable
 
 define i32 @test_extractelement_variable_v8i32(<8 x i32> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v8i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbp, -16
@@ -1413,7 +1413,7 @@ define i32 @test_extractelement_variable
 
 define i32 @test_extractelement_variable_v16i32(<16 x i32> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v16i32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbp, -16
@@ -1435,7 +1435,7 @@ define i32 @test_extractelement_variable
 
 define float @test_extractelement_variable_v4f32(<4 x float> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v4f32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    andl $3, %edi
@@ -1447,7 +1447,7 @@ define float @test_extractelement_variab
 
 define float @test_extractelement_variable_v8f32(<8 x float> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v8f32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbp, -16
@@ -1469,7 +1469,7 @@ define float @test_extractelement_variab
 
 define float @test_extractelement_variable_v16f32(<16 x float> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v16f32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbp, -16
@@ -1491,7 +1491,7 @@ define float @test_extractelement_variab
 
 define i16 @test_extractelement_variable_v8i16(<8 x i16> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v8i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    andl $7, %edi
@@ -1503,7 +1503,7 @@ define i16 @test_extractelement_variable
 
 define i16 @test_extractelement_variable_v16i16(<16 x i16> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v16i16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbp, -16
@@ -1525,7 +1525,7 @@ define i16 @test_extractelement_variable
 
 define i16 @test_extractelement_variable_v32i16(<32 x i16> %t1, i32 %index) {
 ; KNL-LABEL: test_extractelement_variable_v32i16:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    pushq %rbp
 ; KNL-NEXT:    .cfi_def_cfa_offset 16
 ; KNL-NEXT:    .cfi_offset %rbp, -16
@@ -1544,7 +1544,7 @@ define i16 @test_extractelement_variable
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_variable_v32i16:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    pushq %rbp
 ; SKX-NEXT:    .cfi_def_cfa_offset 16
 ; SKX-NEXT:    .cfi_offset %rbp, -16
@@ -1566,7 +1566,7 @@ define i16 @test_extractelement_variable
 
 define i8 @test_extractelement_variable_v16i8(<16 x i8> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v16i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    andl $15, %edi
@@ -1579,7 +1579,7 @@ define i8 @test_extractelement_variable_
 
 define i8 @test_extractelement_variable_v32i8(<32 x i8> %t1, i32 %index) {
 ; CHECK-LABEL: test_extractelement_variable_v32i8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbp, -16
@@ -1603,7 +1603,7 @@ define i8 @test_extractelement_variable_
 
 define i8 @test_extractelement_variable_v64i8(<64 x i8> %t1, i32 %index) {
 ; KNL-LABEL: test_extractelement_variable_v64i8:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    pushq %rbp
 ; KNL-NEXT:    .cfi_def_cfa_offset 16
 ; KNL-NEXT:    .cfi_offset %rbp, -16
@@ -1623,7 +1623,7 @@ define i8 @test_extractelement_variable_
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_variable_v64i8:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    pushq %rbp
 ; SKX-NEXT:    .cfi_def_cfa_offset 16
 ; SKX-NEXT:    .cfi_offset %rbp, -16
@@ -1647,7 +1647,7 @@ define i8 @test_extractelement_variable_
 
 define i8 @test_extractelement_variable_v64i8_indexi8(<64 x i8> %t1, i8 %index) {
 ; KNL-LABEL: test_extractelement_variable_v64i8_indexi8:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    pushq %rbp
 ; KNL-NEXT:    .cfi_def_cfa_offset 16
 ; KNL-NEXT:    .cfi_offset %rbp, -16
@@ -1668,7 +1668,7 @@ define i8 @test_extractelement_variable_
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_variable_v64i8_indexi8:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    pushq %rbp
 ; SKX-NEXT:    .cfi_def_cfa_offset 16
 ; SKX-NEXT:    .cfi_offset %rbp, -16
@@ -1694,7 +1694,7 @@ define i8 @test_extractelement_variable_
 
 define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b, i32 %index) {
 ; KNL-LABEL: test_extractelement_varible_v2i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; KNL-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; KNL-NEXT:    vpxor %xmm2, %xmm1, %xmm1
@@ -1707,7 +1707,7 @@ define zeroext i8 @test_extractelement_v
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_varible_v2i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; SKX-NEXT:    vpcmpnleuq %xmm1, %xmm0, %k0
 ; SKX-NEXT:    vpmovm2q %k0, %xmm0
@@ -1724,7 +1724,7 @@ define zeroext i8 @test_extractelement_v
 
 define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b, i32 %index) {
 ; KNL-LABEL: test_extractelement_varible_v4i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; KNL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; KNL-NEXT:    vpxor %xmm2, %xmm1, %xmm1
@@ -1737,7 +1737,7 @@ define zeroext i8 @test_extractelement_v
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_varible_v4i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    ## kill: %edi<def> %edi<kill> %rdi<def>
 ; SKX-NEXT:    vpcmpnleud %xmm1, %xmm0, %k0
 ; SKX-NEXT:    vpmovm2d %k0, %xmm0
@@ -1754,7 +1754,7 @@ define zeroext i8 @test_extractelement_v
 
 define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b, i32 %index) {
 ; KNL-LABEL: test_extractelement_varible_v8i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    pushq %rbp
 ; KNL-NEXT:    .cfi_def_cfa_offset 16
 ; KNL-NEXT:    .cfi_offset %rbp, -16
@@ -1777,7 +1777,7 @@ define zeroext i8 @test_extractelement_v
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_varible_v8i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    pushq %rbp
 ; SKX-NEXT:    .cfi_def_cfa_offset 16
 ; SKX-NEXT:    .cfi_offset %rbp, -16
@@ -1804,7 +1804,7 @@ define zeroext i8 @test_extractelement_v
 
 define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %b, i32 %index) {
 ; KNL-LABEL: test_extractelement_varible_v16i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    pushq %rbp
 ; KNL-NEXT:    .cfi_def_cfa_offset 16
 ; KNL-NEXT:    .cfi_offset %rbp, -16
@@ -1825,7 +1825,7 @@ define zeroext i8 @test_extractelement_v
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_varible_v16i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    pushq %rbp
 ; SKX-NEXT:    .cfi_def_cfa_offset 16
 ; SKX-NEXT:    .cfi_offset %rbp, -16
@@ -1852,7 +1852,7 @@ define zeroext i8 @test_extractelement_v
 
 define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b, i32 %index) {
 ; KNL-LABEL: test_extractelement_varible_v32i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    pushq %rbp
 ; KNL-NEXT:    .cfi_def_cfa_offset 16
 ; KNL-NEXT:    .cfi_offset %rbp, -16
@@ -1876,7 +1876,7 @@ define zeroext i8 @test_extractelement_v
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_extractelement_varible_v32i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    pushq %rbp
 ; SKX-NEXT:    .cfi_def_cfa_offset 16
 ; SKX-NEXT:    .cfi_offset %rbp, -16
@@ -1903,7 +1903,7 @@ define zeroext i8 @test_extractelement_v
 
 define <8 x i64> @insert_double_zero(<2 x i64> %a) nounwind {
 ; CHECK-LABEL: insert_double_zero:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vinsertf32x4 $2, %xmm0, %zmm1, %zmm0
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx512-insert-extract_i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-insert-extract_i1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-insert-extract_i1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-insert-extract_i1.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define zeroext i8 @test_extractelement_varible_v64i1(<64 x i8> %a, <64 x i8> %b, i32 %index) {
 ; SKX-LABEL: test_extractelement_varible_v64i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    pushq %rbp
 ; SKX-NEXT:    .cfi_def_cfa_offset 16
 ; SKX-NEXT:    .cfi_offset %rbp, -16

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll Mon Dec  4 09:18:51 2017
@@ -7,12 +7,12 @@
 
 define <16 x float> @test_mm512_shuffle_f32x4(<16 x float> %__A, <16 x float> %__B) {
 ; X32-LABEL: test_mm512_shuffle_f32x4:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_shuffle_f32x4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X64-NEXT:    retq
 entry:
@@ -23,13 +23,13 @@ entry:
 
 define <16 x float> @test_mm512_mask_shuffle_f32x4(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X32-LABEL: test_mm512_mask_shuffle_f32x4:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vshuff32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7],zmm2[0,1,2,3,0,1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shuffle_f32x4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vshuff32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7],zmm2[0,1,2,3,0,1,2,3]
 ; X64-NEXT:    retq
@@ -42,13 +42,13 @@ entry:
 
 define <16 x float> @test_mm512_maskz_shuffle_f32x4(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; X32-LABEL: test_mm512_maskz_shuffle_f32x4:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,7],zmm1[0,1,2,3,0,1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_shuffle_f32x4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,7],zmm1[0,1,2,3,0,1,2,3]
 ; X64-NEXT:    retq
@@ -61,12 +61,12 @@ entry:
 
 define <8 x double> @test_mm512_shuffle_f64x2(<8 x double> %__A, <8 x double> %__B) {
 ; X32-LABEL: test_mm512_shuffle_f64x2:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_shuffle_f64x2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X64-NEXT:    retq
 entry:
@@ -76,14 +76,14 @@ entry:
 
 define <8 x double> @test_mm512_mask_shuffle_f64x2(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; X32-LABEL: test_mm512_mask_shuffle_f64x2:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vshuff64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3],zmm2[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shuffle_f64x2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vshuff64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3],zmm2[0,1,0,1]
 ; X64-NEXT:    retq
@@ -96,14 +96,14 @@ entry:
 
 define <8 x double> @test_mm512_maskz_shuffle_f64x2(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; X32-LABEL: test_mm512_maskz_shuffle_f64x2:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_shuffle_f64x2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X64-NEXT:    retq
@@ -116,12 +116,12 @@ entry:
 
 define <8 x i64> @test_mm512_shuffle_i32x4(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
 ; X32-LABEL: test_mm512_shuffle_i32x4:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_shuffle_i32x4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X64-NEXT:    retq
 entry:
@@ -131,13 +131,13 @@ entry:
 
 define <8 x i64> @test_mm512_mask_shuffle_i32x4(<8 x i64> %__W, i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
 ; X32-LABEL: test_mm512_mask_shuffle_i32x4:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vshufi32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7],zmm2[0,1,2,3,0,1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shuffle_i32x4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vshufi32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7],zmm2[0,1,2,3,0,1,2,3]
 ; X64-NEXT:    retq
@@ -153,13 +153,13 @@ entry:
 
 define <8 x i64> @test_mm512_maskz_shuffle_i32x4(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
 ; X32-LABEL: test_mm512_maskz_shuffle_i32x4:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,7],zmm1[0,1,2,3,0,1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_shuffle_i32x4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,7],zmm1[0,1,2,3,0,1,2,3]
 ; X64-NEXT:    retq
@@ -174,12 +174,12 @@ entry:
 
 define <8 x i64> @test_mm512_shuffle_i64x2(<8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
 ; X32-LABEL: test_mm512_shuffle_i64x2:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_shuffle_i64x2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X64-NEXT:    retq
 entry:
@@ -189,14 +189,14 @@ entry:
 
 define <8 x i64> @test_mm512_mask_shuffle_i64x2(<8 x i64> %__W, i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
 ; X32-LABEL: test_mm512_mask_shuffle_i64x2:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3],zmm2[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shuffle_i64x2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3],zmm2[0,1,0,1]
 ; X64-NEXT:    retq
@@ -209,14 +209,14 @@ entry:
 
 define <8 x i64> @test_mm512_maskz_shuffle_i64x2(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) local_unnamed_addr #0 {
 ; X32-LABEL: test_mm512_maskz_shuffle_i64x2:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_shuffle_i64x2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; X64-NEXT:    retq
@@ -230,7 +230,7 @@ entry:
 
 define zeroext i16 @test_mm512_testn_epi32_mask(<8 x i64> %__A, <8 x i64> %__B) {
 ; X32-LABEL: test_mm512_testn_epi32_mask:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    vptestnmd %zmm0, %zmm1, %k0
 ; X32-NEXT:    kmovw %k0, %eax
 ; X32-NEXT:    movzwl %ax, %eax
@@ -238,7 +238,7 @@ define zeroext i16 @test_mm512_testn_epi
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_testn_epi32_mask:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    vptestnmd %zmm0, %zmm1, %k0
 ; X64-NEXT:    kmovw %k0, %eax
 ; X64-NEXT:    movzwl %ax, %eax
@@ -254,7 +254,7 @@ entry:
 
 define zeroext i16 @test_mm512_mask_testn_epi32_mask(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X32-LABEL: test_mm512_mask_testn_epi32_mask:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vptestnmd %zmm0, %zmm1, %k0 {%k1}
 ; X32-NEXT:    kmovw %k0, %eax
@@ -263,7 +263,7 @@ define zeroext i16 @test_mm512_mask_test
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_testn_epi32_mask:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestnmd %zmm0, %zmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
@@ -282,7 +282,7 @@ entry:
 
 define zeroext i8 @test_mm512_testn_epi64_mask(<8 x i64> %__A, <8 x i64> %__B) {
 ; X32-LABEL: test_mm512_testn_epi64_mask:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    vptestnmq %zmm0, %zmm1, %k0
 ; X32-NEXT:    kmovw %k0, %eax
 ; X32-NEXT:    movzbl %al, %eax
@@ -290,7 +290,7 @@ define zeroext i8 @test_mm512_testn_epi6
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_testn_epi64_mask:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    vptestnmq %zmm0, %zmm1, %k0
 ; X64-NEXT:    kmovw %k0, %eax
 ; X64-NEXT:    movzbl %al, %eax
@@ -305,7 +305,7 @@ entry:
 
 define zeroext i8 @test_mm512_mask_testn_epi64_mask(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X32-LABEL: test_mm512_mask_testn_epi64_mask:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vptestnmq %zmm0, %zmm1, %k0 {%k1}
@@ -315,7 +315,7 @@ define zeroext i8 @test_mm512_mask_testn
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_testn_epi64_mask:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestnmq %zmm0, %zmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
@@ -333,7 +333,7 @@ entry:
 
 define zeroext i16 @test_mm512_mask_test_epi32_mask(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X32-LABEL: test_mm512_mask_test_epi32_mask:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vptestmd %zmm0, %zmm1, %k0 {%k1}
 ; X32-NEXT:    kmovw %k0, %eax
@@ -342,7 +342,7 @@ define zeroext i16 @test_mm512_mask_test
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_test_epi32_mask:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestmd %zmm0, %zmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
@@ -361,7 +361,7 @@ entry:
 
 define zeroext i8 @test_mm512_mask_test_epi64_mask(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
 ; X32-LABEL: test_mm512_mask_test_epi64_mask:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vptestmq %zmm0, %zmm1, %k0 {%k1}
@@ -371,7 +371,7 @@ define zeroext i8 @test_mm512_mask_test_
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_test_epi64_mask:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestmq %zmm0, %zmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
@@ -389,14 +389,14 @@ entry:
 
 define <8 x i64> @test_mm512_mask_set1_epi32(<8 x i64> %__O, i16 zeroext %__M, i32 %__A) {
 ; X32-LABEL: test_mm512_mask_set1_epi32:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpbroadcastd %eax, %zmm0 {%k1}
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_set1_epi32:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpbroadcastd %esi, %zmm0 {%k1}
 ; X64-NEXT:    retq
@@ -412,14 +412,14 @@ entry:
 
 define <8 x i64> @test_mm512_maskz_set1_epi32(i16 zeroext %__M, i32 %__A)  {
 ; X32-LABEL: test_mm512_maskz_set1_epi32:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpbroadcastd %eax, %zmm0 {%k1} {z}
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_set1_epi32:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpbroadcastd %esi, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
@@ -434,7 +434,7 @@ entry:
 
 define <8 x i64> @test_mm512_mask_set1_epi64(<8 x i64> %__O, i8 zeroext %__M, i64 %__A) {
 ; X32-LABEL: test_mm512_mask_set1_epi64:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
@@ -448,7 +448,7 @@ define <8 x i64> @test_mm512_mask_set1_e
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_set1_epi64:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpbroadcastq %rsi, %zmm0 {%k1}
 ; X64-NEXT:    retq
@@ -462,7 +462,7 @@ entry:
 
 define <8 x i64> @test_mm512_maskz_set1_epi64(i8 zeroext %__M, i64 %__A)  {
 ; X32-LABEL: test_mm512_maskz_set1_epi64:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
@@ -476,7 +476,7 @@ define <8 x i64> @test_mm512_maskz_set1_
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_set1_epi64:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpbroadcastq %rsi, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
@@ -491,12 +491,12 @@ entry:
 
 define <8 x i64> @test_mm512_broadcastd_epi32(<2 x i64> %a0) {
 ; X32-LABEL: test_mm512_broadcastd_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vbroadcastss %xmm0, %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_broadcastd_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastss %xmm0, %zmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -507,13 +507,13 @@ define <8 x i64> @test_mm512_broadcastd_
 
 define <8 x i64> @test_mm512_mask_broadcastd_epi32(<8 x i64> %a0, i16 %a1, <2 x i64> %a2) {
 ; X32-LABEL: test_mm512_mask_broadcastd_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpbroadcastd %xmm1, %zmm0 {%k1}
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_broadcastd_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpbroadcastd %xmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
@@ -528,13 +528,13 @@ define <8 x i64> @test_mm512_mask_broadc
 
 define <8 x i64> @test_mm512_maskz_broadcastd_epi32(i16 %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm512_maskz_broadcastd_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpbroadcastd %xmm0, %zmm0 {%k1} {z}
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_broadcastd_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpbroadcastd %xmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
@@ -548,12 +548,12 @@ define <8 x i64> @test_mm512_maskz_broad
 
 define <8 x i64> @test_mm512_broadcastq_epi64(<2 x i64> %a0) {
 ; X32-LABEL: test_mm512_broadcastq_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vbroadcastsd %xmm0, %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_broadcastq_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastsd %xmm0, %zmm0
 ; X64-NEXT:    retq
   %res = shufflevector <2 x i64> %a0, <2 x i64> undef, <8 x i32> zeroinitializer
@@ -562,14 +562,14 @@ define <8 x i64> @test_mm512_broadcastq_
 
 define <8 x i64> @test_mm512_mask_broadcastq_epi64(<8 x i64> %a0, i8 %a1, <2 x i64> %a2) {
 ; X32-LABEL: test_mm512_mask_broadcastq_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpbroadcastq %xmm1, %zmm0 {%k1}
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_broadcastq_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpbroadcastq %xmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
@@ -581,14 +581,14 @@ define <8 x i64> @test_mm512_mask_broadc
 
 define <8 x i64> @test_mm512_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
 ; X32-LABEL: test_mm512_maskz_broadcastq_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpbroadcastq %xmm0, %zmm0 {%k1} {z}
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_broadcastq_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpbroadcastq %xmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
@@ -600,12 +600,12 @@ define <8 x i64> @test_mm512_maskz_broad
 
 define <8 x double> @test_mm512_broadcastsd_pd(<2 x double> %a0) {
 ; X32-LABEL: test_mm512_broadcastsd_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vbroadcastsd %xmm0, %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_broadcastsd_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastsd %xmm0, %zmm0
 ; X64-NEXT:    retq
   %res = shufflevector <2 x double> %a0, <2 x double> undef, <8 x i32> zeroinitializer
@@ -614,14 +614,14 @@ define <8 x double> @test_mm512_broadcas
 
 define <8 x double> @test_mm512_mask_broadcastsd_pd(<8 x double> %a0, i8 %a1, <2 x double> %a2) {
 ; X32-LABEL: test_mm512_mask_broadcastsd_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vbroadcastsd %xmm1, %zmm0 {%k1}
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_broadcastsd_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vbroadcastsd %xmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
@@ -633,14 +633,14 @@ define <8 x double> @test_mm512_mask_bro
 
 define <8 x double> @test_mm512_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
 ; X32-LABEL: test_mm512_maskz_broadcastsd_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vbroadcastsd %xmm0, %zmm0 {%k1} {z}
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_broadcastsd_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vbroadcastsd %xmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
@@ -652,12 +652,12 @@ define <8 x double> @test_mm512_maskz_br
 
 define <16 x float> @test_mm512_broadcastss_ps(<4 x float> %a0) {
 ; X32-LABEL: test_mm512_broadcastss_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vbroadcastss %xmm0, %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_broadcastss_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastss %xmm0, %zmm0
 ; X64-NEXT:    retq
   %res = shufflevector <4 x float> %a0, <4 x float> undef, <16 x i32> zeroinitializer
@@ -666,13 +666,13 @@ define <16 x float> @test_mm512_broadcas
 
 define <16 x float> @test_mm512_mask_broadcastss_ps(<16 x float> %a0, i16 %a1, <4 x float> %a2) {
 ; X32-LABEL: test_mm512_mask_broadcastss_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vbroadcastss %xmm1, %zmm0 {%k1}
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_broadcastss_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vbroadcastss %xmm1, %zmm0 {%k1}
 ; X64-NEXT:    retq
@@ -684,13 +684,13 @@ define <16 x float> @test_mm512_mask_bro
 
 define <16 x float> @test_mm512_maskz_broadcastss_ps(i16 %a0, <4 x float> %a1) {
 ; X32-LABEL: test_mm512_maskz_broadcastss_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vbroadcastss %xmm0, %zmm0 {%k1} {z}
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_broadcastss_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vbroadcastss %xmm0, %zmm0 {%k1} {z}
 ; X64-NEXT:    retq
@@ -702,12 +702,12 @@ define <16 x float> @test_mm512_maskz_br
 
 define <8 x double> @test_mm512_movddup_pd(<8 x double> %a0) {
 ; X32-LABEL: test_mm512_movddup_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_movddup_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -716,14 +716,14 @@ define <8 x double> @test_mm512_movddup_
 
 define <8 x double> @test_mm512_mask_movddup_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2) {
 ; X32-LABEL: test_mm512_mask_movddup_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vmovddup {{.*#+}} zmm0 {%k1} = zmm1[0,0,2,2,4,4,6,6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_movddup_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vmovddup {{.*#+}} zmm0 {%k1} = zmm1[0,0,2,2,4,4,6,6]
 ; X64-NEXT:    retq
@@ -735,14 +735,14 @@ define <8 x double> @test_mm512_mask_mov
 
 define <8 x double> @test_mm512_maskz_movddup_pd(i8 %a0, <8 x double> %a1) {
 ; X32-LABEL: test_mm512_maskz_movddup_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_movddup_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
 ; X64-NEXT:    retq
@@ -754,12 +754,12 @@ define <8 x double> @test_mm512_maskz_mo
 
 define <16 x float> @test_mm512_movehdup_ps(<16 x float> %a0) {
 ; X32-LABEL: test_mm512_movehdup_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_movehdup_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
 ; X64-NEXT:    retq
   %res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
@@ -768,13 +768,13 @@ define <16 x float> @test_mm512_movehdup
 
 define <16 x float> @test_mm512_mask_movehdup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
 ; X32-LABEL: test_mm512_mask_movehdup_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vmovshdup {{.*#+}} zmm0 {%k1} = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_movehdup_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vmovshdup {{.*#+}} zmm0 {%k1} = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
 ; X64-NEXT:    retq
@@ -786,13 +786,13 @@ define <16 x float> @test_mm512_mask_mov
 
 define <16 x float> @test_mm512_maskz_movehdup_ps(i16 %a0, <16 x float> %a1) {
 ; X32-LABEL: test_mm512_maskz_movehdup_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_movehdup_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
 ; X64-NEXT:    retq
@@ -804,12 +804,12 @@ define <16 x float> @test_mm512_maskz_mo
 
 define <16 x float> @test_mm512_moveldup_ps(<16 x float> %a0) {
 ; X32-LABEL: test_mm512_moveldup_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_moveldup_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
 ; X64-NEXT:    retq
   %res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
@@ -818,13 +818,13 @@ define <16 x float> @test_mm512_moveldup
 
 define <16 x float> @test_mm512_mask_moveldup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
 ; X32-LABEL: test_mm512_mask_moveldup_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} = zmm1[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_moveldup_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} = zmm1[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
 ; X64-NEXT:    retq
@@ -836,13 +836,13 @@ define <16 x float> @test_mm512_mask_mov
 
 define <16 x float> @test_mm512_maskz_moveldup_ps(i16 %a0, <16 x float> %a1) {
 ; X32-LABEL: test_mm512_maskz_moveldup_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_moveldup_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
 ; X64-NEXT:    retq
@@ -854,12 +854,12 @@ define <16 x float> @test_mm512_maskz_mo
 
 define <8 x double> @test_mm512_permute_pd(<8 x double> %a0) {
 ; X32-LABEL: test_mm512_permute_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilpd {{.*#+}} zmm0 = zmm0[0,1,2,2,4,4,6,6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_permute_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilpd {{.*#+}} zmm0 = zmm0[0,1,2,2,4,4,6,6]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -868,14 +868,14 @@ define <8 x double> @test_mm512_permute_
 
 define <8 x double> @test_mm512_mask_permute_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2) {
 ; X32-LABEL: test_mm512_mask_permute_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpermilpd {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,2,4,4,6,6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_permute_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpermilpd {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,2,4,4,6,6]
 ; X64-NEXT:    retq
@@ -887,14 +887,14 @@ define <8 x double> @test_mm512_mask_per
 
 define <8 x double> @test_mm512_maskz_permute_pd(i8 %a0, <8 x double> %a1) {
 ; X32-LABEL: test_mm512_maskz_permute_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,2,4,4,6,6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_permute_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,2,4,4,6,6]
 ; X64-NEXT:    retq
@@ -906,12 +906,12 @@ define <8 x double> @test_mm512_maskz_pe
 
 define <16 x float> @test_mm512_permute_ps(<16 x float> %a0) {
 ; X32-LABEL: test_mm512_permute_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_permute_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
 ; X64-NEXT:    retq
   %res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4, i32 10, i32 8, i32 8, i32 8, i32 14, i32 12, i32 12, i32 12>
@@ -920,13 +920,13 @@ define <16 x float> @test_mm512_permute_
 
 define <16 x float> @test_mm512_mask_permute_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
 ; X32-LABEL: test_mm512_mask_permute_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} = zmm1[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_permute_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} = zmm1[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
 ; X64-NEXT:    retq
@@ -938,13 +938,13 @@ define <16 x float> @test_mm512_mask_per
 
 define <16 x float> @test_mm512_maskz_permute_ps(i16 %a0, <16 x float> %a1) {
 ; X32-LABEL: test_mm512_maskz_permute_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_permute_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0,0,0,6,4,4,4,10,8,8,8,14,12,12,12]
 ; X64-NEXT:    retq
@@ -956,12 +956,12 @@ define <16 x float> @test_mm512_maskz_pe
 
 define <8 x i64> @test_mm512_permutex_epi64(<8 x i64> %a0) {
 ; X32-LABEL: test_mm512_permutex_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_permutex_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
@@ -970,14 +970,14 @@ define <8 x i64> @test_mm512_permutex_ep
 
 define <8 x i64> @test_mm512_mask_permutex_epi64(<8 x i64> %a0, i8 %a1, <8 x i64> %a2) {
 ; X32-LABEL: test_mm512_mask_permutex_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpermq {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_permutex_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpermq {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
 ; X64-NEXT:    retq
@@ -989,14 +989,14 @@ define <8 x i64> @test_mm512_mask_permut
 
 define <8 x i64> @test_mm512_maskz_permutex_epi64(i8 %a0, <8 x i64> %a1) {
 ; X32-LABEL: test_mm512_maskz_permutex_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_permutex_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
 ; X64-NEXT:    retq
@@ -1008,12 +1008,12 @@ define <8 x i64> @test_mm512_maskz_permu
 
 define <8 x double> @test_mm512_permutex_pd(<8 x double> %a0) {
 ; X32-LABEL: test_mm512_permutex_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_permutex_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermpd {{.*#+}} zmm0 = zmm0[0,0,0,0,4,4,4,4]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
@@ -1022,14 +1022,14 @@ define <8 x double> @test_mm512_permutex
 
 define <8 x double> @test_mm512_mask_permutex_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2) {
 ; X32-LABEL: test_mm512_mask_permutex_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpermpd {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_permutex_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpermpd {{.*#+}} zmm0 {%k1} = zmm1[0,0,0,0,4,4,4,4]
 ; X64-NEXT:    retq
@@ -1041,14 +1041,14 @@ define <8 x double> @test_mm512_mask_per
 
 define <8 x double> @test_mm512_maskz_permutex_pd(i8 %a0, <8 x double> %a1) {
 ; X32-LABEL: test_mm512_maskz_permutex_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_permutex_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4]
 ; X64-NEXT:    retq
@@ -1060,12 +1060,12 @@ define <8 x double> @test_mm512_maskz_pe
 
 define <8 x i64> @test_mm512_shuffle_epi32(<8 x i64> %a0) {
 ; X32-LABEL: test_mm512_shuffle_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_shuffle_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
 ; X64-NEXT:    retq
   %arg0 = bitcast <8 x i64> %a0 to <16 x i32>
@@ -1076,13 +1076,13 @@ define <8 x i64> @test_mm512_shuffle_epi
 
 define <8 x i64> @test_mm512_mask_shuffle_epi32(<8 x i64> %a0, i16 %a1, <8 x i64> %a2) {
 ; X32-LABEL: test_mm512_mask_shuffle_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpshufd {{.*#+}} zmm0 {%k1} = zmm1[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shuffle_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpshufd {{.*#+}} zmm0 {%k1} = zmm1[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
 ; X64-NEXT:    retq
@@ -1097,13 +1097,13 @@ define <8 x i64> @test_mm512_mask_shuffl
 
 define <8 x i64> @test_mm512_maskz_shuffle_epi32(i16 %a0, <8 x i64> %a1) {
 ; X32-LABEL: test_mm512_maskz_shuffle_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_shuffle_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,0,0,5,4,4,4,9,8,8,8,13,12,12,12]
 ; X64-NEXT:    retq
@@ -1117,12 +1117,12 @@ define <8 x i64> @test_mm512_maskz_shuff
 
 define <8 x double> @test_mm512_shuffle_pd(<8 x double> %a0, <8 x double> %a1) {
 ; X32-LABEL: test_mm512_shuffle_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vshufpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[3],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_shuffle_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vshufpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[3],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x double> %a0, <8 x double> %a1, <8 x i32> <i32 0, i32 8, i32 3, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -1131,14 +1131,14 @@ define <8 x double> @test_mm512_shuffle_
 
 define <8 x double> @test_mm512_mask_shuffle_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2, <8 x double> %a3) {
 ; X32-LABEL: test_mm512_mask_shuffle_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vshufpd {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[3],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_shuffle_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vshufpd {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[3],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
 ; X64-NEXT:    retq
@@ -1150,14 +1150,14 @@ define <8 x double> @test_mm512_mask_shu
 
 define <8 x double> @test_mm512_maskz_shuffle_pd(i8 %a0, <8 x double> %a1, <8 x double> %a2) {
 ; X32-LABEL: test_mm512_maskz_shuffle_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[3],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_shuffle_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[3],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X64-NEXT:    retq
@@ -1169,12 +1169,12 @@ define <8 x double> @test_mm512_maskz_sh
 
 define <8 x i64> @test_mm512_unpackhi_epi32(<8 x i64> %a0, <8 x i64> %a1) {
 ; X32-LABEL: test_mm512_unpackhi_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_unpackhi_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; X64-NEXT:    retq
   %arg0 = bitcast <8 x i64> %a0 to <16 x i32>
@@ -1186,13 +1186,13 @@ define <8 x i64> @test_mm512_unpackhi_ep
 
 define <8 x i64> @test_mm512_mask_unpackhi_epi32(<8 x i64> %a0, i16 %a1, <8 x i64> %a2, <8 x i64> %a3) {
 ; X32-LABEL: test_mm512_mask_unpackhi_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpunpckhdq {{.*#+}} zmm0 {%k1} = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_unpackhi_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpunpckhdq {{.*#+}} zmm0 {%k1} = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15]
 ; X64-NEXT:    retq
@@ -1208,13 +1208,13 @@ define <8 x i64> @test_mm512_mask_unpack
 
 define <8 x i64> @test_mm512_maskz_unpackhi_epi32(i16 %a0, <8 x i64> %a1, <8 x i64> %a2) {
 ; X32-LABEL: test_mm512_maskz_unpackhi_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpunpckhdq {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_unpackhi_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpunpckhdq {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; X64-NEXT:    retq
@@ -1229,12 +1229,12 @@ define <8 x i64> @test_mm512_maskz_unpac
 
 define <8 x i64> @test_mm512_unpackhi_epi64(<8 x i64> %a0, <8 x i64> %a1) {
 ; X32-LABEL: test_mm512_unpackhi_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_unpackhi_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x i64> %a0, <8 x i64> %a1, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -1243,14 +1243,14 @@ define <8 x i64> @test_mm512_unpackhi_ep
 
 define <8 x i64> @test_mm512_mask_unpackhi_epi64(<8 x i64> %a0, i8 %a1, <8 x i64> %a2, <8 x i64> %a3) {
 ; X32-LABEL: test_mm512_mask_unpackhi_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm1[1],zmm2[1],zmm1[3],zmm2[3],zmm1[5],zmm2[5],zmm1[7],zmm2[7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_unpackhi_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm1[1],zmm2[1],zmm1[3],zmm2[3],zmm1[5],zmm2[5],zmm1[7],zmm2[7]
 ; X64-NEXT:    retq
@@ -1262,14 +1262,14 @@ define <8 x i64> @test_mm512_mask_unpack
 
 define <8 x i64> @test_mm512_maskz_unpackhi_epi64(i8 %a0, <8 x i64> %a1, <8 x i64> %a2) {
 ; X32-LABEL: test_mm512_maskz_unpackhi_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_unpackhi_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; X64-NEXT:    retq
@@ -1281,12 +1281,12 @@ define <8 x i64> @test_mm512_maskz_unpac
 
 define <8 x double> @test_mm512_unpackhi_pd(<8 x double> %a0, <8 x double> %a1) {
 ; X32-LABEL: test_mm512_unpackhi_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_unpackhi_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x double> %a0, <8 x double> %a1, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -1295,14 +1295,14 @@ define <8 x double> @test_mm512_unpackhi
 
 define <8 x double> @test_mm512_mask_unpackhi_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2, <8 x double> %a3) {
 ; X32-LABEL: test_mm512_mask_unpackhi_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vunpckhpd {{.*#+}} zmm0 {%k1} = zmm1[1],zmm2[1],zmm1[3],zmm2[3],zmm1[5],zmm2[5],zmm1[7],zmm2[7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_unpackhi_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vunpckhpd {{.*#+}} zmm0 {%k1} = zmm1[1],zmm2[1],zmm1[3],zmm2[3],zmm1[5],zmm2[5],zmm1[7],zmm2[7]
 ; X64-NEXT:    retq
@@ -1314,14 +1314,14 @@ define <8 x double> @test_mm512_mask_unp
 
 define <8 x double> @test_mm512_maskz_unpackhi_pd(i8 %a0, <8 x double> %a1, <8 x double> %a2) {
 ; X32-LABEL: test_mm512_maskz_unpackhi_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_unpackhi_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; X64-NEXT:    retq
@@ -1333,12 +1333,12 @@ define <8 x double> @test_mm512_maskz_un
 
 define <16 x float> @test_mm512_unpackhi_ps(<16 x float> %a0, <16 x float> %a1) {
 ; X32-LABEL: test_mm512_unpackhi_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_unpackhi_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; X64-NEXT:    retq
   %res = shufflevector <16 x float> %a0, <16 x float> %a1, <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -1347,13 +1347,13 @@ define <16 x float> @test_mm512_unpackhi
 
 define <16 x float> @test_mm512_mask_unpackhi_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2, <16 x float> %a3) {
 ; X32-LABEL: test_mm512_mask_unpackhi_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vunpckhps {{.*#+}} zmm0 {%k1} = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_unpackhi_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vunpckhps {{.*#+}} zmm0 {%k1} = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15]
 ; X64-NEXT:    retq
@@ -1365,13 +1365,13 @@ define <16 x float> @test_mm512_mask_unp
 
 define <16 x float> @test_mm512_maskz_unpackhi_ps(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
 ; X32-LABEL: test_mm512_maskz_unpackhi_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_unpackhi_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; X64-NEXT:    retq
@@ -1383,12 +1383,12 @@ define <16 x float> @test_mm512_maskz_un
 
 define <8 x i64> @test_mm512_unpacklo_epi32(<8 x i64> %a0, <8 x i64> %a1) {
 ; X32-LABEL: test_mm512_unpacklo_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_unpacklo_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; X64-NEXT:    retq
   %arg0 = bitcast <8 x i64> %a0 to <16 x i32>
@@ -1400,13 +1400,13 @@ define <8 x i64> @test_mm512_unpacklo_ep
 
 define <8 x i64> @test_mm512_mask_unpacklo_epi32(<8 x i64> %a0, i16 %a1, <8 x i64> %a2, <8 x i64> %a3) {
 ; X32-LABEL: test_mm512_mask_unpacklo_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpunpckldq {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_unpacklo_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpunpckldq {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13]
 ; X64-NEXT:    retq
@@ -1422,13 +1422,13 @@ define <8 x i64> @test_mm512_mask_unpack
 
 define <8 x i64> @test_mm512_maskz_unpacklo_epi32(i16 %a0, <8 x i64> %a1, <8 x i64> %a2) {
 ; X32-LABEL: test_mm512_maskz_unpacklo_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vpunpckldq {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_unpacklo_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpunpckldq {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; X64-NEXT:    retq
@@ -1443,12 +1443,12 @@ define <8 x i64> @test_mm512_maskz_unpac
 
 define <8 x i64> @test_mm512_unpacklo_epi64(<8 x i64> %a0, <8 x i64> %a1) {
 ; X32-LABEL: test_mm512_unpacklo_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_unpacklo_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x i64> %a0, <8 x i64> %a1, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -1457,14 +1457,14 @@ define <8 x i64> @test_mm512_unpacklo_ep
 
 define <8 x i64> @test_mm512_mask_unpacklo_epi64(<8 x i64> %a0, i8 %a1, <8 x i64> %a2, <8 x i64> %a3) {
 ; X32-LABEL: test_mm512_mask_unpacklo_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm1[0],zmm2[0],zmm1[2],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_unpacklo_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm1[0],zmm2[0],zmm1[2],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
 ; X64-NEXT:    retq
@@ -1476,14 +1476,14 @@ define <8 x i64> @test_mm512_mask_unpack
 
 define <8 x i64> @test_mm512_maskz_unpacklo_epi64(i8 %a0, <8 x i64> %a1, <8 x i64> %a2) {
 ; X32-LABEL: test_mm512_maskz_unpacklo_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_unpacklo_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X64-NEXT:    retq
@@ -1495,12 +1495,12 @@ define <8 x i64> @test_mm512_maskz_unpac
 
 define <8 x double> @test_mm512_unpacklo_pd(<8 x double> %a0, <8 x double> %a1) {
 ; X32-LABEL: test_mm512_unpacklo_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_unpacklo_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x double> %a0, <8 x double> %a1, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -1509,14 +1509,14 @@ define <8 x double> @test_mm512_unpacklo
 
 define <8 x double> @test_mm512_mask_unpacklo_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2, <8 x double> %a3) {
 ; X32-LABEL: test_mm512_mask_unpacklo_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vunpcklpd {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[2],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_unpacklo_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vunpcklpd {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[2],zmm2[2],zmm1[4],zmm2[4],zmm1[6],zmm2[6]
 ; X64-NEXT:    retq
@@ -1528,14 +1528,14 @@ define <8 x double> @test_mm512_mask_unp
 
 define <8 x double> @test_mm512_maskz_unpacklo_pd(i8 %a0, <8 x double> %a1, <8 x double> %a2) {
 ; X32-LABEL: test_mm512_maskz_unpacklo_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    kmovw %eax, %k1
 ; X32-NEXT:    vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_unpacklo_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; X64-NEXT:    retq
@@ -1547,12 +1547,12 @@ define <8 x double> @test_mm512_maskz_un
 
 define <16 x float> @test_mm512_unpacklo_ps(<16 x float> %a0, <16 x float> %a1) {
 ; X32-LABEL: test_mm512_unpacklo_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_unpacklo_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; X64-NEXT:    retq
   %res = shufflevector <16 x float> %a0, <16 x float> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -1561,13 +1561,13 @@ define <16 x float> @test_mm512_unpacklo
 
 define <16 x float> @test_mm512_mask_unpacklo_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2, <16 x float> %a3) {
 ; X32-LABEL: test_mm512_mask_unpacklo_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vunpcklps {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_unpacklo_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vunpcklps {{.*#+}} zmm0 {%k1} = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13]
 ; X64-NEXT:    retq
@@ -1579,13 +1579,13 @@ define <16 x float> @test_mm512_mask_unp
 
 define <16 x float> @test_mm512_maskz_unpacklo_ps(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
 ; X32-LABEL: test_mm512_maskz_unpacklo_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X32-NEXT:    vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_unpacklo_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; X64-NEXT:    retq
@@ -1597,12 +1597,12 @@ define <16 x float> @test_mm512_maskz_un
 
 define <8 x double> @test_mm512_zextpd128_pd512(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm512_zextpd128_pd512:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_zextpd128_pd512:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = shufflevector <2 x double> %a0, <2 x double> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
@@ -1611,12 +1611,12 @@ define <8 x double> @test_mm512_zextpd12
 
 define <8 x double> @test_mm512_zextpd256_pd512(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm512_zextpd256_pd512:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_zextpd256_pd512:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = shufflevector <4 x double> %a0, <4 x double> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1625,14 +1625,14 @@ define <8 x double> @test_mm512_zextpd25
 
 define <16 x float> @test_mm512_zextps128_ps512(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm512_zextps128_ps512:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps %xmm0, %xmm0
 ; X32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X32-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_zextps128_ps512:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %xmm0, %xmm0
 ; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1643,12 +1643,12 @@ define <16 x float> @test_mm512_zextps12
 
 define <16 x float> @test_mm512_zextps256_ps512(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm512_zextps256_ps512:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_zextps256_ps512:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = shufflevector <8 x float> %a0, <8 x float> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -1657,12 +1657,12 @@ define <16 x float> @test_mm512_zextps25
 
 define <8 x i64> @test_mm512_zextsi128_si512(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm512_zextsi128_si512:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_zextsi128_si512:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
@@ -1671,12 +1671,12 @@ define <8 x i64> @test_mm512_zextsi128_s
 
 define <8 x i64> @test_mm512_zextsi256_si512(<4 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm512_zextsi256_si512:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_zextsi256_si512:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = shufflevector <4 x i64> %a0, <4 x i64> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
  define <16 x i32>@test_int_x86_avx512_mask_pbroadcastd_gpr_512(i32 %x0, <16 x i32> %x1, i16 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcastd_gpr_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpbroadcastd %edi, %zmm1
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpbroadcastd %edi, %zmm0 {%k1}
@@ -23,7 +23,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64>@test_int_x86_avx512_mask_pbroadcastq_gpr_512(i64 %x0, <8 x i64> %x1, i8 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcastq_gpr_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpbroadcastq %rdi, %zmm1
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpbroadcastq %rdi, %zmm0 {%k1}
@@ -45,7 +45,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float> @test_x86_vbroadcast_ss_ps_512(<4 x float> %a0, <16 x float> %a1, i16 %mask ) {
 ; CHECK-LABEL: test_x86_vbroadcast_ss_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vbroadcastss %xmm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vbroadcastss %xmm0, %zmm1 {%k1}
@@ -66,7 +66,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double> @test_x86_vbroadcast_sd_pd_512(<2 x double> %a0, <8 x double> %a1, i8 %mask ) {
 ; CHECK-LABEL: test_x86_vbroadcast_sd_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vbroadcastsd %xmm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vbroadcastsd %xmm0, %zmm1 {%k1}
@@ -87,7 +87,7 @@ declare <16 x i32> @llvm.x86.avx512.pbro
 
 define <16 x i32>@test_int_x86_avx512_pbroadcastd_512(<4 x i32> %x0, <16 x i32> %x1, i16 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_pbroadcastd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpbroadcastd %xmm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpbroadcastd %xmm0, %zmm1 {%k1}
@@ -107,7 +107,7 @@ declare <8 x i64> @llvm.x86.avx512.pbroa
 
 define <8 x i64>@test_int_x86_avx512_pbroadcastq_512(<2 x i64> %x0, <8 x i64> %x1, i8 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_pbroadcastq_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpbroadcastq %xmm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpbroadcastq %xmm0, %zmm1 {%k1}
@@ -127,7 +127,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_movsldup_512(<16 x float> %x0, <16 x float> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_movsldup_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovsldup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
@@ -147,7 +147,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_movshdup_512(<16 x float> %x0, <16 x float> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_movshdup_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovshdup {{.*#+}} zmm2 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -167,7 +167,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_movddup_512(<8 x double> %x0, <8 x double> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_movddup_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovddup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6]
@@ -187,7 +187,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_perm_df_512(<8 x double> %x0, i32 %x1, <8 x double> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_perm_df_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpermpd {{.*#+}} zmm2 = zmm0[3,0,0,0,7,4,4,4]
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,0,7,4,4,4]
@@ -207,7 +207,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_perm_di_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_perm_di_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpermq {{.*#+}} zmm2 = zmm0[3,0,0,0,7,4,4,4]
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,0,7,4,4,4]
@@ -225,7 +225,7 @@ define <8 x i64>@test_int_x86_avx512_mas
 
 define void @test_store1(<16 x float> %data, i8* %ptr, i8* %ptr2, i16 %mask) {
 ; CHECK-LABEL: test_store1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vmovups %zmm0, (%rdi) {%k1}
 ; CHECK-NEXT:    vmovups %zmm0, (%rsi)
@@ -239,7 +239,7 @@ declare void @llvm.x86.avx512.mask.store
 
 define void @test_store2(<8 x double> %data, i8* %ptr, i8* %ptr2, i8 %mask) {
 ; CHECK-LABEL: test_store2:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vmovupd %zmm0, (%rdi) {%k1}
 ; CHECK-NEXT:    vmovupd %zmm0, (%rsi)
@@ -253,7 +253,7 @@ declare void @llvm.x86.avx512.mask.store
 
 define void @test_mask_store_aligned_ps(<16 x float> %data, i8* %ptr, i8* %ptr2, i16 %mask) {
 ; CHECK-LABEL: test_mask_store_aligned_ps:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vmovaps %zmm0, (%rdi) {%k1}
 ; CHECK-NEXT:    vmovaps %zmm0, (%rsi)
@@ -267,7 +267,7 @@ declare void @llvm.x86.avx512.mask.store
 
 define void @test_mask_store_aligned_pd(<8 x double> %data, i8* %ptr, i8* %ptr2, i8 %mask) {
 ; CHECK-LABEL: test_mask_store_aligned_pd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vmovapd %zmm0, (%rdi) {%k1}
 ; CHECK-NEXT:    vmovapd %zmm0, (%rsi)
@@ -281,7 +281,7 @@ declare void @llvm.x86.avx512.mask.store
 
 define void at test_int_x86_avx512_mask_storeu_q_512(i8* %ptr1, i8* %ptr2, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_storeu_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vmovdqu64 %zmm0, (%rdi) {%k1}
 ; CHECK-NEXT:    vmovdqu64 %zmm0, (%rsi)
@@ -295,7 +295,7 @@ declare void @llvm.x86.avx512.mask.store
 
 define void at test_int_x86_avx512_mask_storeu_d_512(i8* %ptr1, i8* %ptr2, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_storeu_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vmovdqu32 %zmm0, (%rdi) {%k1}
 ; CHECK-NEXT:    vmovdqu32 %zmm0, (%rsi)
@@ -309,7 +309,7 @@ declare void @llvm.x86.avx512.mask.store
 
 define void at test_int_x86_avx512_mask_store_q_512(i8* %ptr1, i8* %ptr2, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_store_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vmovdqa64 %zmm0, (%rdi) {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm0, (%rsi)
@@ -323,7 +323,7 @@ declare void @llvm.x86.avx512.mask.store
 
 define void at test_int_x86_avx512_mask_store_d_512(i8* %ptr1, i8* %ptr2, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_store_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vmovdqa32 %zmm0, (%rdi) {%k1}
 ; CHECK-NEXT:    vmovdqa32 %zmm0, (%rsi)
@@ -337,7 +337,7 @@ declare void @llvm.x86.avx512.mask.store
 
 define <16 x float> @test_mask_load_aligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
 ; CHECK-LABEL: test_mask_load_aligned_ps:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %zmm0
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmovaps (%rdi), %zmm0 {%k1}
@@ -355,7 +355,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float> @test_mask_load_unaligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
 ; CHECK-LABEL: test_mask_load_unaligned_ps:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovups (%rdi), %zmm0
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmovups (%rdi), %zmm0 {%k1}
@@ -373,7 +373,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <8 x double> @test_mask_load_aligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
 ; CHECK-LABEL: test_mask_load_aligned_pd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovapd (%rdi), %zmm0
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmovapd (%rdi), %zmm0 {%k1}
@@ -391,7 +391,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double> @test_mask_load_unaligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
 ; CHECK-LABEL: test_mask_load_unaligned_pd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovupd (%rdi), %zmm0
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmovupd (%rdi), %zmm0 {%k1}
@@ -411,7 +411,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32> @test_mask_load_unaligned_d(i8* %ptr, i8* %ptr2, <16 x i32> %data, i16 %mask) {
 ; CHECK-LABEL: test_mask_load_unaligned_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovdqu32 (%rdi), %zmm0
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vmovdqu32 (%rsi), %zmm0 {%k1}
@@ -429,7 +429,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64> @test_mask_load_unaligned_q(i8* %ptr, i8* %ptr2, <8 x i64> %data, i8 %mask) {
 ; CHECK-LABEL: test_mask_load_unaligned_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovdqu64 (%rdi), %zmm0
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vmovdqu64 (%rsi), %zmm0 {%k1}
@@ -447,7 +447,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32> @test_mask_load_aligned_d(<16 x i32> %data, i8* %ptr, i16 %mask) {
 ; CHECK-LABEL: test_mask_load_aligned_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovdqa32 (%rdi), %zmm0
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmovdqa32 (%rdi), %zmm0 {%k1}
@@ -465,7 +465,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64> @test_mask_load_aligned_q(<8 x i64> %data, i8* %ptr, i8 %mask) {
 ; CHECK-LABEL: test_mask_load_aligned_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm0 {%k1}
@@ -483,7 +483,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_vpermil_pd_512(<8 x double> %x0, <8 x double> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_vpermil_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpermilpd {{.*#+}} zmm2 = zmm0[0,1,3,2,5,4,6,6]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermilpd {{.*#+}} zmm1 {%k1} = zmm0[0,1,3,2,5,4,6,6]
@@ -503,7 +503,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_vpermil_ps_512(<16 x float> %x0, <16 x float> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_vpermil_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpermilps {{.*#+}} zmm2 = zmm0[2,1,1,0,6,5,5,4,10,9,9,8,14,13,13,12]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermilps {{.*#+}} zmm1 {%k1} = zmm0[2,1,1,0,6,5,5,4,10,9,9,8,14,13,13,12]
@@ -523,7 +523,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_pshuf_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pshuf_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpshufd {{.*#+}} zmm2 = zmm0[3,0,0,0,7,4,4,4,11,8,8,8,15,12,12,12]
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpshufd {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,0,7,4,4,4,11,8,8,8,15,12,12,12]
@@ -541,7 +541,7 @@ define <16 x i32>@test_int_x86_avx512_ma
 
 define i16 @test_pcmpeq_d(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_pcmpeq_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -552,7 +552,7 @@ define i16 @test_pcmpeq_d(<16 x i32> %a,
 
 define i16 @test_mask_pcmpeq_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
 ; CHECK-LABEL: test_mask_pcmpeq_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovw %k0, %eax
@@ -566,7 +566,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpeq
 
 define i8 @test_pcmpeq_q(<8 x i64> %a, <8 x i64> %b) {
 ; CHECK-LABEL: test_pcmpeq_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    ## kill: %al<def> %al<kill> %eax<kill>
@@ -577,7 +577,7 @@ define i8 @test_pcmpeq_q(<8 x i64> %a, <
 
 define i8 @test_mask_pcmpeq_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
 ; CHECK-LABEL: test_mask_pcmpeq_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovw %k0, %eax
@@ -591,7 +591,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpeq.
 
 define i16 @test_pcmpgt_d(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_pcmpgt_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -602,7 +602,7 @@ define i16 @test_pcmpgt_d(<16 x i32> %a,
 
 define i16 @test_mask_pcmpgt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
 ; CHECK-LABEL: test_mask_pcmpgt_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovw %k0, %eax
@@ -616,7 +616,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpgt
 
 define i8 @test_pcmpgt_q(<8 x i64> %a, <8 x i64> %b) {
 ; CHECK-LABEL: test_pcmpgt_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpcmpgtq %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    ## kill: %al<def> %al<kill> %eax<kill>
@@ -627,7 +627,7 @@ define i8 @test_pcmpgt_q(<8 x i64> %a, <
 
 define i8 @test_mask_pcmpgt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
 ; CHECK-LABEL: test_mask_pcmpgt_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovw %k0, %eax
@@ -643,7 +643,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_unpckh_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_unpckh_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vunpckhpd {{.*#+}} zmm3 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -659,7 +659,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_unpckh_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_unpckh_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vunpckhps {{.*#+}} zmm3 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -675,7 +675,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_unpckl_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_unpckl_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vunpcklpd {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -691,7 +691,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_unpckl_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_unpckl_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vunpcklps {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -707,7 +707,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_punpcklqd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_punpcklqd_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
@@ -727,7 +727,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_punpckhqd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_punpckhqd_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpunpckhqdq {{.*#+}} zmm2 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
@@ -743,7 +743,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_punpckhd_q_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_punpckhd_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpunpckhdq {{.*#+}} zmm3 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpunpckhdq {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
@@ -759,7 +759,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_punpckld_q_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_punpckld_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpunpckldq {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpunpckldq {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
@@ -773,7 +773,7 @@ define <16 x i32>@test_int_x86_avx512_ma
 
 define <16 x i32> @test_x86_avx512_pslli_d(<16 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx512_pslli_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpslld $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
@@ -782,7 +782,7 @@ define <16 x i32> @test_x86_avx512_pslli
 
 define <16 x i32> @test_x86_avx512_mask_pslli_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_pslli_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpslld $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -793,7 +793,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_pslli_d(<16 x i32> %a0, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_pslli_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpslld $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -805,7 +805,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64> @test_x86_avx512_pslli_q(<8 x i64> %a0) {
 ; CHECK-LABEL: test_x86_avx512_pslli_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllq $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
@@ -814,7 +814,7 @@ define <8 x i64> @test_x86_avx512_pslli_
 
 define <8 x i64> @test_x86_avx512_mask_pslli_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_pslli_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllq $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -825,7 +825,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_pslli_q(<8 x i64> %a0, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_pslli_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllq $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -837,7 +837,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <16 x i32> @test_x86_avx512_psrli_d(<16 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrli_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrld $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
@@ -846,7 +846,7 @@ define <16 x i32> @test_x86_avx512_psrli
 
 define <16 x i32> @test_x86_avx512_mask_psrli_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrli_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrld $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -857,7 +857,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_psrli_d(<16 x i32> %a0, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrli_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrld $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -869,7 +869,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64> @test_x86_avx512_psrli_q(<8 x i64> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrli_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrlq $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
@@ -878,7 +878,7 @@ define <8 x i64> @test_x86_avx512_psrli_
 
 define <8 x i64> @test_x86_avx512_mask_psrli_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrli_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlq $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -889,7 +889,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_psrli_q(<8 x i64> %a0, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrli_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlq $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -901,7 +901,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <16 x i32> @test_x86_avx512_psrai_d(<16 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrai_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrad $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
@@ -910,7 +910,7 @@ define <16 x i32> @test_x86_avx512_psrai
 
 define <16 x i32> @test_x86_avx512_mask_psrai_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrai_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrad $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -921,7 +921,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_psrai_d(<16 x i32> %a0, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrai_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrad $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -933,7 +933,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64> @test_x86_avx512_psrai_q(<8 x i64> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrai_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsraq $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
@@ -942,7 +942,7 @@ define <8 x i64> @test_x86_avx512_psrai_
 
 define <8 x i64> @test_x86_avx512_mask_psrai_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrai_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsraq $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -953,7 +953,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_psrai_q(<8 x i64> %a0, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrai_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsraq $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -967,7 +967,7 @@ declare void @llvm.x86.avx512.storent.q.
 
 define void at test_storent_q_512(<8 x i64> %data, i8* %ptr) {
 ; CHECK-LABEL: test_storent_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovntps %zmm0, (%rdi)
 ; CHECK-NEXT:    retq
   call void @llvm.x86.avx512.storent.q.512(i8* %ptr, <8 x i64> %data)
@@ -978,7 +978,7 @@ declare void @llvm.x86.avx512.storent.pd
 
 define void @test_storent_pd_512(<8 x double> %data, i8* %ptr) {
 ; CHECK-LABEL: test_storent_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovntps %zmm0, (%rdi)
 ; CHECK-NEXT:    retq
   call void @llvm.x86.avx512.storent.pd.512(i8* %ptr, <8 x double> %data)
@@ -989,7 +989,7 @@ declare void @llvm.x86.avx512.storent.ps
 
 define void @test_storent_ps_512(<16 x float> %data, i8* %ptr) {
 ; CHECK-LABEL: test_storent_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovntps %zmm0, (%rdi)
 ; CHECK-NEXT:    retq
   call void @llvm.x86.avx512.storent.ps.512(i8* %ptr, <16 x float> %data)
@@ -998,7 +998,7 @@ define void @test_storent_ps_512(<16 x f
 
 define <16 x i32> @test_xor_epi32(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_xor_epi32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.pxor.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
@@ -1007,7 +1007,7 @@ define <16 x i32> @test_xor_epi32(<16 x
 
 define <16 x i32> @test_mask_xor_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_xor_epi32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpxord %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1020,7 +1020,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32> @test_or_epi32(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_or_epi32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vporq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.por.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
@@ -1029,7 +1029,7 @@ define <16 x i32> @test_or_epi32(<16 x i
 
 define <16 x i32> @test_mask_or_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_or_epi32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpord %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1042,7 +1042,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32> @test_and_epi32(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_and_epi32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.pand.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
@@ -1051,7 +1051,7 @@ define <16 x i32> @test_and_epi32(<16 x
 
 define <16 x i32> @test_mask_and_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_and_epi32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpandd %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1064,7 +1064,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64> @test_xor_epi64(<8 x i64> %a, <8 x i64> %b) {
 ; CHECK-LABEL: test_xor_epi64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.pxor.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
@@ -1073,7 +1073,7 @@ define <8 x i64> @test_xor_epi64(<8 x i6
 
 define <8 x i64> @test_mask_xor_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_xor_epi64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpxorq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1086,7 +1086,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64> @test_or_epi64(<8 x i64> %a, <8 x i64> %b) {
 ; CHECK-LABEL: test_or_epi64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vporq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.por.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
@@ -1095,7 +1095,7 @@ define <8 x i64> @test_or_epi64(<8 x i64
 
 define <8 x i64> @test_mask_or_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_or_epi64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vporq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1108,7 +1108,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64> @test_and_epi64(<8 x i64> %a, <8 x i64> %b) {
 ; CHECK-LABEL: test_and_epi64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.pand.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
@@ -1117,7 +1117,7 @@ define <8 x i64> @test_and_epi64(<8 x i6
 
 define <8 x i64> @test_mask_and_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_and_epi64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpandq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1130,7 +1130,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <16 x i32> @test_mask_add_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_mask_add_epi32_rr:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
@@ -1139,7 +1139,7 @@ define <16 x i32> @test_mask_add_epi32_r
 
 define <16 x i32> @test_mask_add_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_add_epi32_rrk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1150,7 +1150,7 @@ define <16 x i32> @test_mask_add_epi32_r
 
 define <16 x i32> @test_mask_add_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
 ; CHECK-LABEL: test_mask_add_epi32_rrkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1160,7 +1160,7 @@ define <16 x i32> @test_mask_add_epi32_r
 
 define <16 x i32> @test_mask_add_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
 ; CHECK-LABEL: test_mask_add_epi32_rm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpaddd (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -1170,7 +1170,7 @@ define <16 x i32> @test_mask_add_epi32_r
 
 define <16 x i32> @test_mask_add_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_add_epi32_rmk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpaddd (%rdi), %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1182,7 +1182,7 @@ define <16 x i32> @test_mask_add_epi32_r
 
 define <16 x i32> @test_mask_add_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
 ; CHECK-LABEL: test_mask_add_epi32_rmkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1193,7 +1193,7 @@ define <16 x i32> @test_mask_add_epi32_r
 
 define <16 x i32> @test_mask_add_epi32_rmb(<16 x i32> %a, i32* %ptr_b) {
 ; CHECK-LABEL: test_mask_add_epi32_rmb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpaddd (%rdi){1to16}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %q = load i32, i32* %ptr_b
@@ -1205,7 +1205,7 @@ define <16 x i32> @test_mask_add_epi32_r
 
 define <16 x i32> @test_mask_add_epi32_rmbk(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_add_epi32_rmbk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpaddd (%rdi){1to16}, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1219,7 +1219,7 @@ define <16 x i32> @test_mask_add_epi32_r
 
 define <16 x i32> @test_mask_add_epi32_rmbkz(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
 ; CHECK-LABEL: test_mask_add_epi32_rmbkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpaddd (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1234,7 +1234,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32> @test_mask_sub_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_mask_sub_epi32_rr:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsubd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
@@ -1243,7 +1243,7 @@ define <16 x i32> @test_mask_sub_epi32_r
 
 define <16 x i32> @test_mask_sub_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi32_rrk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsubd %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1254,7 +1254,7 @@ define <16 x i32> @test_mask_sub_epi32_r
 
 define <16 x i32> @test_mask_sub_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi32_rrkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsubd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1264,7 +1264,7 @@ define <16 x i32> @test_mask_sub_epi32_r
 
 define <16 x i32> @test_mask_sub_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
 ; CHECK-LABEL: test_mask_sub_epi32_rm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsubd (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -1274,7 +1274,7 @@ define <16 x i32> @test_mask_sub_epi32_r
 
 define <16 x i32> @test_mask_sub_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi32_rmk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsubd (%rdi), %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1286,7 +1286,7 @@ define <16 x i32> @test_mask_sub_epi32_r
 
 define <16 x i32> @test_mask_sub_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi32_rmkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsubd (%rdi), %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1297,7 +1297,7 @@ define <16 x i32> @test_mask_sub_epi32_r
 
 define <16 x i32> @test_mask_sub_epi32_rmb(<16 x i32> %a, i32* %ptr_b) {
 ; CHECK-LABEL: test_mask_sub_epi32_rmb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsubd (%rdi){1to16}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %q = load i32, i32* %ptr_b
@@ -1309,7 +1309,7 @@ define <16 x i32> @test_mask_sub_epi32_r
 
 define <16 x i32> @test_mask_sub_epi32_rmbk(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi32_rmbk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsubd (%rdi){1to16}, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1323,7 +1323,7 @@ define <16 x i32> @test_mask_sub_epi32_r
 
 define <16 x i32> @test_mask_sub_epi32_rmbkz(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi32_rmbkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsubd (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1338,7 +1338,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64> @test_mask_add_epi64_rr(<8 x i64> %a, <8 x i64> %b) {
 ; CHECK-LABEL: test_mask_add_epi64_rr:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
@@ -1347,7 +1347,7 @@ define <8 x i64> @test_mask_add_epi64_rr
 
 define <8 x i64> @test_mask_add_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_epi64_rrk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpaddq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1358,7 +1358,7 @@ define <8 x i64> @test_mask_add_epi64_rr
 
 define <8 x i64> @test_mask_add_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_epi64_rrkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpaddq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1368,7 +1368,7 @@ define <8 x i64> @test_mask_add_epi64_rr
 
 define <8 x i64> @test_mask_add_epi64_rm(<8 x i64> %a, <8 x i64>* %ptr_b) {
 ; CHECK-LABEL: test_mask_add_epi64_rm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpaddq (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %b = load <8 x i64>, <8 x i64>* %ptr_b
@@ -1378,7 +1378,7 @@ define <8 x i64> @test_mask_add_epi64_rm
 
 define <8 x i64> @test_mask_add_epi64_rmk(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_epi64_rmk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpaddq (%rdi), %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1390,7 +1390,7 @@ define <8 x i64> @test_mask_add_epi64_rm
 
 define <8 x i64> @test_mask_add_epi64_rmkz(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_epi64_rmkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpaddq (%rdi), %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1401,7 +1401,7 @@ define <8 x i64> @test_mask_add_epi64_rm
 
 define <8 x i64> @test_mask_add_epi64_rmb(<8 x i64> %a, i64* %ptr_b) {
 ; CHECK-LABEL: test_mask_add_epi64_rmb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpaddq (%rdi){1to8}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %q = load i64, i64* %ptr_b
@@ -1413,7 +1413,7 @@ define <8 x i64> @test_mask_add_epi64_rm
 
 define <8 x i64> @test_mask_add_epi64_rmbk(<8 x i64> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_epi64_rmbk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpaddq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1427,7 +1427,7 @@ define <8 x i64> @test_mask_add_epi64_rm
 
 define <8 x i64> @test_mask_add_epi64_rmbkz(<8 x i64> %a, i64* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_epi64_rmbkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpaddq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1442,7 +1442,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64> @test_mask_sub_epi64_rr(<8 x i64> %a, <8 x i64> %b) {
 ; CHECK-LABEL: test_mask_sub_epi64_rr:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsubq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
@@ -1451,7 +1451,7 @@ define <8 x i64> @test_mask_sub_epi64_rr
 
 define <8 x i64> @test_mask_sub_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi64_rrk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsubq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1462,7 +1462,7 @@ define <8 x i64> @test_mask_sub_epi64_rr
 
 define <8 x i64> @test_mask_sub_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi64_rrkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsubq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1472,7 +1472,7 @@ define <8 x i64> @test_mask_sub_epi64_rr
 
 define <8 x i64> @test_mask_sub_epi64_rm(<8 x i64> %a, <8 x i64>* %ptr_b) {
 ; CHECK-LABEL: test_mask_sub_epi64_rm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsubq (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %b = load <8 x i64>, <8 x i64>* %ptr_b
@@ -1482,7 +1482,7 @@ define <8 x i64> @test_mask_sub_epi64_rm
 
 define <8 x i64> @test_mask_sub_epi64_rmk(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi64_rmk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsubq (%rdi), %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1494,7 +1494,7 @@ define <8 x i64> @test_mask_sub_epi64_rm
 
 define <8 x i64> @test_mask_sub_epi64_rmkz(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi64_rmkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsubq (%rdi), %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1505,7 +1505,7 @@ define <8 x i64> @test_mask_sub_epi64_rm
 
 define <8 x i64> @test_mask_sub_epi64_rmb(<8 x i64> %a, i64* %ptr_b) {
 ; CHECK-LABEL: test_mask_sub_epi64_rmb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsubq (%rdi){1to8}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %q = load i64, i64* %ptr_b
@@ -1517,7 +1517,7 @@ define <8 x i64> @test_mask_sub_epi64_rm
 
 define <8 x i64> @test_mask_sub_epi64_rmbk(<8 x i64> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi64_rmbk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsubq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1531,7 +1531,7 @@ define <8 x i64> @test_mask_sub_epi64_rm
 
 define <8 x i64> @test_mask_sub_epi64_rmbkz(<8 x i64> %a, i64* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mask_sub_epi64_rmbkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsubq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1546,7 +1546,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <16 x i32> @test_mask_mullo_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_mask_mullo_epi32_rr_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmulld %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
@@ -1555,7 +1555,7 @@ define <16 x i32> @test_mask_mullo_epi32
 
 define <16 x i32> @test_mask_mullo_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_mullo_epi32_rrk_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmulld %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1566,7 +1566,7 @@ define <16 x i32> @test_mask_mullo_epi32
 
 define <16 x i32> @test_mask_mullo_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
 ; CHECK-LABEL: test_mask_mullo_epi32_rrkz_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmulld %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1576,7 +1576,7 @@ define <16 x i32> @test_mask_mullo_epi32
 
 define <16 x i32> @test_mask_mullo_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
 ; CHECK-LABEL: test_mask_mullo_epi32_rm_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmulld (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -1586,7 +1586,7 @@ define <16 x i32> @test_mask_mullo_epi32
 
 define <16 x i32> @test_mask_mullo_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_mullo_epi32_rmk_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmulld (%rdi), %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1598,7 +1598,7 @@ define <16 x i32> @test_mask_mullo_epi32
 
 define <16 x i32> @test_mask_mullo_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
 ; CHECK-LABEL: test_mask_mullo_epi32_rmkz_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmulld (%rdi), %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1609,7 +1609,7 @@ define <16 x i32> @test_mask_mullo_epi32
 
 define <16 x i32> @test_mask_mullo_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
 ; CHECK-LABEL: test_mask_mullo_epi32_rmb_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmulld (%rdi){1to16}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %q = load i32, i32* %ptr_b
@@ -1621,7 +1621,7 @@ define <16 x i32> @test_mask_mullo_epi32
 
 define <16 x i32> @test_mask_mullo_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
 ; CHECK-LABEL: test_mask_mullo_epi32_rmbk_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmulld (%rdi){1to16}, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1635,7 +1635,7 @@ define <16 x i32> @test_mask_mullo_epi32
 
 define <16 x i32> @test_mask_mullo_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
 ; CHECK-LABEL: test_mask_mullo_epi32_rmbkz_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmulld (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1653,7 +1653,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_shuf_f32x4(<16 x float> %x0, <16 x float> %x1, <16 x float> %x3, i16 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_shuf_f32x4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vshuff32x4 {{.*#+}} zmm3 = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
@@ -1669,7 +1669,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_shuf_f64x2(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_shuf_f64x2:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vshuff64x2 {{.*#+}} zmm3 = zmm0[4,5,2,3],zmm1[2,3,0,1]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,2,3],zmm1[2,3,0,1]
@@ -1690,7 +1690,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_shuf_i32x4(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_shuf_i32x4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vshufi32x4 {{.*#+}} zmm3 = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
@@ -1706,7 +1706,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_shuf_i64x2(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_shuf_i64x2:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm0[4,5,2,3],zmm1[2,3,0,1]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,2,3],zmm1[2,3,0,1]
@@ -1722,7 +1722,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_shuf_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_shuf_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vshufpd {{.*#+}} zmm3 = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vshufpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
@@ -1743,7 +1743,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_shuf_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x3, i16 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_shuf_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vshufps {{.*#+}} zmm3 = zmm0[2,1],zmm1[1,0],zmm0[6,5],zmm1[5,4],zmm0[10,9],zmm1[9,8],zmm0[14,13],zmm1[13,12]
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vshufps {{.*#+}} zmm2 {%k1} = zmm0[2,1],zmm1[1,0],zmm0[6,5],zmm1[5,4],zmm0[10,9],zmm1[9,8],zmm0[14,13],zmm1[13,12]
@@ -1759,7 +1759,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_pmaxs_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmaxsd %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmaxsd %zmm1, %zmm0, %zmm2 {%k1}
@@ -1775,7 +1775,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pmaxs_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm2 {%k1}
@@ -1791,7 +1791,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_pmaxu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmaxud %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmaxud %zmm1, %zmm0, %zmm2 {%k1}
@@ -1807,7 +1807,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pmaxu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm2 {%k1}
@@ -1823,7 +1823,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_pmins_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmins_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpminsd %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpminsd %zmm1, %zmm0, %zmm2 {%k1}
@@ -1839,7 +1839,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pmins_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmins_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpminsq %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpminsq %zmm1, %zmm0, %zmm2 {%k1}
@@ -1855,7 +1855,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_pminu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pminu_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpminud %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpminud %zmm1, %zmm0, %zmm2 {%k1}
@@ -1871,7 +1871,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pminu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pminu_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpminuq %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpminuq %zmm1, %zmm0, %zmm2 {%k1}
@@ -1885,7 +1885,7 @@ define <8 x i64>@test_int_x86_avx512_mas
 
 define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
 ; CHECK-LABEL: test_mm_mask_move_ss:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovss %xmm2, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -1897,7 +1897,7 @@ entry:
 
 define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
 ; CHECK-LABEL: test_mm_maskz_move_ss:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovss %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1908,7 +1908,7 @@ entry:
 
 define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
 ; CHECK-LABEL: test_mm_mask_move_sd:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovsd %xmm2, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -1919,7 +1919,7 @@ entry:
 
 define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
 ; CHECK-LABEL: test_mm_maskz_move_sd:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1935,7 +1935,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_pmovzxb_d_512(<16 x i8> %x0, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovzxbd {{.*#+}} zmm1 {%k1} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
@@ -1955,7 +1955,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pmovzxb_q_512(<16 x i8> %x0, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmovzxbq {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovzxbq {{.*#+}} zmm1 {%k1} = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
@@ -1975,7 +1975,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pmovzxd_q_512(<8 x i32> %x0, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxd_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovzxdq {{.*#+}} zmm1 {%k1} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
@@ -1995,7 +1995,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_pmovzxw_d_512(<16 x i16> %x0, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxw_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovzxwd {{.*#+}} zmm1 {%k1} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
@@ -2015,7 +2015,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pmovzxw_q_512(<8 x i16> %x0, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxw_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmovzxwq {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovzxwq {{.*#+}} zmm1 {%k1} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
@@ -2035,7 +2035,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_pmovsxb_d_512(<16 x i8> %x0, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmovsxbd %xmm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovsxbd %xmm0, %zmm1 {%k1}
@@ -2055,7 +2055,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pmovsxb_q_512(<16 x i8> %x0, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmovsxbq %xmm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovsxbq %xmm0, %zmm1 {%k1}
@@ -2075,7 +2075,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pmovsxd_q_512(<8 x i32> %x0, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxd_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmovsxdq %ymm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovsxdq %ymm0, %zmm1 {%k1}
@@ -2096,7 +2096,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_pmovsxw_d_512(<16 x i16> %x0, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxw_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmovsxwd %ymm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovsxwd %ymm0, %zmm1 {%k1}
@@ -2117,7 +2117,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pmovsxw_q_512(<8 x i16> %x0, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxw_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmovsxwq %xmm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovsxwq %xmm0, %zmm1 {%k1}
@@ -2137,7 +2137,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_psrl_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_psrl_qi_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrlq $4, %zmm0, %zmm2
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsrlq $4, %zmm0, %zmm1 {%k1}
@@ -2157,7 +2157,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_psrl_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_psrl_di_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrld $4, %zmm0, %zmm2
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsrld $4, %zmm0, %zmm1 {%k1}
@@ -2177,7 +2177,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_psra_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_psra_di_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrad $3, %zmm0, %zmm2
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsrad $3, %zmm0, %zmm1 {%k1}
@@ -2197,7 +2197,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_psra_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_psra_qi_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsraq $3, %zmm0, %zmm2
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsraq $3, %zmm0, %zmm1 {%k1}
@@ -2217,7 +2217,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_psll_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_psll_di_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpslld $3, %zmm0, %zmm2
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpslld $3, %zmm0, %zmm1 {%k1}
@@ -2237,7 +2237,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_psll_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_psll_qi_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllq $3, %zmm0, %zmm2
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpsllq $3, %zmm0, %zmm1 {%k1}
@@ -2255,7 +2255,7 @@ define <8 x i64>@test_int_x86_avx512_mas
 
 define <16 x i32> @test_x86_avx512_psll_d(<16 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psll_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpslld %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2264,7 +2264,7 @@ define <16 x i32> @test_x86_avx512_psll_
 
 define <16 x i32> @test_x86_avx512_mask_psll_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psll_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpslld %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2275,7 +2275,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_psll_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psll_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2287,7 +2287,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64> @test_x86_avx512_psll_q(<8 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psll_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllq %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2296,7 +2296,7 @@ define <8 x i64> @test_x86_avx512_psll_q
 
 define <8 x i64> @test_x86_avx512_mask_psll_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psll_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllq %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2307,7 +2307,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_psll_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psll_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2319,7 +2319,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <16 x i32> @test_x86_avx512_psrl_d(<16 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrl_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrld %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2328,7 +2328,7 @@ define <16 x i32> @test_x86_avx512_psrl_
 
 define <16 x i32> @test_x86_avx512_mask_psrl_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrl_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrld %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2339,7 +2339,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_psrl_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrl_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2351,7 +2351,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64> @test_x86_avx512_psrl_q(<8 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrl_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrlq %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2360,7 +2360,7 @@ define <8 x i64> @test_x86_avx512_psrl_q
 
 define <8 x i64> @test_x86_avx512_mask_psrl_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrl_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2371,7 +2371,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_psrl_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrl_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2383,7 +2383,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <16 x i32> @test_x86_avx512_psra_d(<16 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psra_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrad %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2392,7 +2392,7 @@ define <16 x i32> @test_x86_avx512_psra_
 
 define <16 x i32> @test_x86_avx512_mask_psra_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psra_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrad %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2403,7 +2403,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_psra_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psra_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2415,7 +2415,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64> @test_x86_avx512_psra_q(<8 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psra_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsraq %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2424,7 +2424,7 @@ define <8 x i64> @test_x86_avx512_psra_q
 
 define <8 x i64> @test_x86_avx512_mask_psra_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psra_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsraq %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2435,7 +2435,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_psra_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psra_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2447,7 +2447,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <16 x i32> @test_x86_avx512_psllv_d(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psllv_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2456,7 +2456,7 @@ define <16 x i32> @test_x86_avx512_psllv
 
 define <16 x i32> @test_x86_avx512_mask_psllv_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psllv_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2467,7 +2467,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_psllv_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psllv_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2479,7 +2479,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64> @test_x86_avx512_psllv_q(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psllv_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllvq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2488,7 +2488,7 @@ define <8 x i64> @test_x86_avx512_psllv_
 
 define <8 x i64> @test_x86_avx512_mask_psllv_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psllv_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2499,7 +2499,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_psllv_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psllv_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2512,7 +2512,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <16 x i32> @test_x86_avx512_psrav_d(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrav_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsravd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2521,7 +2521,7 @@ define <16 x i32> @test_x86_avx512_psrav
 
 define <16 x i32> @test_x86_avx512_mask_psrav_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrav_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsravd %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2532,7 +2532,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_psrav_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrav_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2544,7 +2544,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64> @test_x86_avx512_psrav_q(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrav_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsravq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2553,7 +2553,7 @@ define <8 x i64> @test_x86_avx512_psrav_
 
 define <8 x i64> @test_x86_avx512_mask_psrav_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrav_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsravq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2564,7 +2564,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_psrav_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrav_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2576,7 +2576,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <16 x i32> @test_x86_avx512_psrlv_d(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrlv_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
@@ -2585,7 +2585,7 @@ define <16 x i32> @test_x86_avx512_psrlv
 
 define <16 x i32> @test_x86_avx512_mask_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrlv_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2596,7 +2596,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrlv_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2608,7 +2608,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <8 x i64> @test_x86_avx512_psrlv_q(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrlv_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
@@ -2617,7 +2617,7 @@ define <8 x i64> @test_x86_avx512_psrlv_
 
 define <8 x i64> @test_x86_avx512_mask_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrlv_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2628,7 +2628,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrlv_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2640,7 +2640,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64> @test_x86_avx512_psrlv_q_memop(<8 x i64> %a0, <8 x i64>* %ptr) {
 ; CHECK-LABEL: test_x86_avx512_psrlv_q_memop:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrlvq (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %b = load <8 x i64>, <8 x i64>* %ptr
@@ -2652,7 +2652,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_cvt_dq2pd_512(<8 x i32> %x0, <8 x double> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_dq2pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtdq2pd %ymm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtdq2pd %ymm0, %zmm1 {%k1}
@@ -2668,7 +2668,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_cvt_udq2pd_512(<8 x i32> %x0, <8 x double> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_udq2pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtudq2pd %ymm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtudq2pd %ymm0, %zmm1 {%k1}
@@ -2682,7 +2682,7 @@ define <8 x double>@test_int_x86_avx512_
 
 define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) {
 ; CHECK-LABEL: test_valign_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    valignq {{.*#+}} zmm0 = zmm1[2,3,4,5,6,7],zmm0[0,1]
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> zeroinitializer, i8 -1)
@@ -2691,7 +2691,7 @@ define <8 x i64> @test_valign_q(<8 x i64
 
 define <8 x i64> @test_mask_valign_q(<8 x i64> %a, <8 x i64> %b, <8 x i64> %src, i8 %mask) {
 ; CHECK-LABEL: test_mask_valign_q:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    valignq {{.*#+}} zmm2 {%k1} = zmm1[2,3,4,5,6,7],zmm0[0,1]
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2704,7 +2704,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <16 x i32> @test_maskz_valign_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
 ; CHECK-LABEL: test_maskz_valign_d:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    valignd {{.*#+}} zmm0 {%k1} {z} = zmm1[5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1,2,3,4]
 ; CHECK-NEXT:    retq
@@ -2718,7 +2718,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_vpermilvar_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpermilpd %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermilpd %zmm1, %zmm0, %zmm2 {%k1}
@@ -2738,7 +2738,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpermilps %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermilps %zmm1, %zmm0, %zmm2 {%k1}
@@ -2757,7 +2757,7 @@ define <16 x float>@test_int_x86_avx512_
 ; Test case to make sure we can print shuffle decode comments for constant pool loads.
 define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermilps {{.*#+}} zmm2 {%k1} = zmm0[2,3,0,1,7,6,5,4,9,8,11,10,12,13,14,15]
 ; CHECK-NEXT:    vpermilps {{.*#+}} zmm1 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
@@ -2775,7 +2775,7 @@ define <16 x float>@test_int_x86_avx512_
 
 define <8 x i64> @test_mask_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_mask_mul_epi32_rr:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuldq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
@@ -2784,7 +2784,7 @@ define <8 x i64> @test_mask_mul_epi32_rr
 
 define <8 x i64> @test_mask_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epi32_rrk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmuldq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2795,7 +2795,7 @@ define <8 x i64> @test_mask_mul_epi32_rr
 
 define <8 x i64> @test_mask_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epi32_rrkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmuldq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2805,7 +2805,7 @@ define <8 x i64> @test_mask_mul_epi32_rr
 
 define <8 x i64> @test_mask_mul_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
 ; CHECK-LABEL: test_mask_mul_epi32_rm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuldq (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -2815,7 +2815,7 @@ define <8 x i64> @test_mask_mul_epi32_rm
 
 define <8 x i64> @test_mask_mul_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epi32_rmk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuldq (%rdi), %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -2827,7 +2827,7 @@ define <8 x i64> @test_mask_mul_epi32_rm
 
 define <8 x i64> @test_mask_mul_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epi32_rmkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuldq (%rdi), %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2838,7 +2838,7 @@ define <8 x i64> @test_mask_mul_epi32_rm
 
 define <8 x i64> @test_mask_mul_epi32_rmb(<16 x i32> %a, i64* %ptr_b) {
 ; CHECK-LABEL: test_mask_mul_epi32_rmb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuldq (%rdi){1to8}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %q = load i64, i64* %ptr_b
@@ -2851,7 +2851,7 @@ define <8 x i64> @test_mask_mul_epi32_rm
 
 define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epi32_rmbk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuldq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -2866,7 +2866,7 @@ define <8 x i64> @test_mask_mul_epi32_rm
 
 define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epi32_rmbkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuldq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2882,7 +2882,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64> @test_mask_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_mask_mul_epu32_rr:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuludq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
@@ -2891,7 +2891,7 @@ define <8 x i64> @test_mask_mul_epu32_rr
 
 define <8 x i64> @test_mask_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epu32_rrk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmuludq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -2902,7 +2902,7 @@ define <8 x i64> @test_mask_mul_epu32_rr
 
 define <8 x i64> @test_mask_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epu32_rrkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmuludq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2912,7 +2912,7 @@ define <8 x i64> @test_mask_mul_epu32_rr
 
 define <8 x i64> @test_mask_mul_epu32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
 ; CHECK-LABEL: test_mask_mul_epu32_rm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuludq (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -2922,7 +2922,7 @@ define <8 x i64> @test_mask_mul_epu32_rm
 
 define <8 x i64> @test_mask_mul_epu32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epu32_rmk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuludq (%rdi), %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -2934,7 +2934,7 @@ define <8 x i64> @test_mask_mul_epu32_rm
 
 define <8 x i64> @test_mask_mul_epu32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epu32_rmkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuludq (%rdi), %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2945,7 +2945,7 @@ define <8 x i64> @test_mask_mul_epu32_rm
 
 define <8 x i64> @test_mask_mul_epu32_rmb(<16 x i32> %a, i64* %ptr_b) {
 ; CHECK-LABEL: test_mask_mul_epu32_rmb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuludq (%rdi){1to8}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %q = load i64, i64* %ptr_b
@@ -2958,7 +2958,7 @@ define <8 x i64> @test_mask_mul_epu32_rm
 
 define <8 x i64> @test_mask_mul_epu32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epu32_rmbk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuludq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -2973,7 +2973,7 @@ define <8 x i64> @test_mask_mul_epu32_rm
 
 define <8 x i64> @test_mask_mul_epu32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mask_mul_epu32_rmbkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuludq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2989,7 +2989,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <4 x float> @test_mask_vextractf32x4(<4 x float> %b, <16 x float> %a, i8 %mask) {
 ; CHECK-LABEL: test_mask_vextractf32x4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k0
 ; CHECK-NEXT:    kshiftlw $12, %k0, %k1
 ; CHECK-NEXT:    kshiftrw $15, %k1, %k1
@@ -3019,7 +3019,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x i64> @test_mask_vextracti64x4(<4 x i64> %b, <8 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: test_mask_vextracti64x4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf64x4 $1, %zmm1, %ymm1
 ; CHECK-NEXT:    kmovw %edi, %k0
 ; CHECK-NEXT:    kshiftlw $12, %k0, %k1
@@ -3050,7 +3050,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.
 
 define <4 x i32> @test_maskz_vextracti32x4(<16 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: test_maskz_vextracti32x4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k0
 ; CHECK-NEXT:    kshiftlw $12, %k0, %k1
 ; CHECK-NEXT:    kshiftrw $15, %k1, %k1
@@ -3081,7 +3081,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.
 
 define <4 x double> @test_vextractf64x4(<8 x double> %a) {
 ; CHECK-LABEL: test_vextractf64x4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf64x4 $1, %zmm0, %ymm0
 ; CHECK-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx512.mask.vextractf64x4.512(<8 x double> %a, i32 1, <4 x double> zeroinitializer, i8 -1)
@@ -3094,7 +3094,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3, i16 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_insertf32x4_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
 ; CHECK-NEXT:    vinsertf32x4 $1, %xmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -3115,7 +3115,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_inserti32x4_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
 ; CHECK-NEXT:    vinserti32x4 $1, %xmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -3136,7 +3136,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_insertf64x4_512(<8 x double> %x0, <4 x double> %x1, <8 x double> %x3, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_insertf64x4_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm2 {%k1}
@@ -3156,7 +3156,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i64> %x1, <8 x i64> %x3, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_inserti64x4_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2 {%k1}
@@ -3174,7 +3174,7 @@ define <8 x i64>@test_int_x86_avx512_mas
 
 define <8 x i64> @test_x86_avx512_movntdqa(i8* %a0) {
 ; CHECK-LABEL: test_x86_avx512_movntdqa:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovntdqa (%rdi), %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.movntdqa(i8* %a0)
@@ -3185,7 +3185,7 @@ declare <8 x i64> @llvm.x86.avx512.movnt
 
 define <8 x i16> @test_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-LABEL: test_cmp_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
 ; CHECK-NEXT:    vpcmpled %zmm1, %zmm0, %k2
@@ -3230,7 +3230,7 @@ define <8 x i16> @test_cmp_d_512(<16 x i
 
 define <8 x i16> @test_mask_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mask_cmp_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
 ; CHECK-NEXT:    vpcmpgtd %zmm0, %zmm1, %k2 {%k1}
@@ -3278,7 +3278,7 @@ declare i16 @llvm.x86.avx512.mask.cmp.d.
 
 define <8 x i16> @test_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-LABEL: test_ucmp_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    vpcmpltud %zmm1, %zmm0, %k1
 ; CHECK-NEXT:    vpcmpleud %zmm1, %zmm0, %k2
@@ -3323,7 +3323,7 @@ define <8 x i16> @test_ucmp_d_512(<16 x
 
 define <8 x i16> @test_mask_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mask_ucmp_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
 ; CHECK-NEXT:    vpcmpltud %zmm1, %zmm0, %k2 {%k1}
@@ -3371,7 +3371,7 @@ declare i16 @llvm.x86.avx512.mask.ucmp.d
 
 define <8 x i8> @test_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_cmp_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    vpcmpgtq %zmm0, %zmm1, %k1
 ; CHECK-NEXT:    vpcmpleq %zmm1, %zmm0, %k2
@@ -3416,7 +3416,7 @@ define <8 x i8> @test_cmp_q_512(<8 x i64
 
 define <8 x i8> @test_mask_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_mask_cmp_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
 ; CHECK-NEXT:    vpcmpgtq %zmm0, %zmm1, %k2 {%k1}
@@ -3464,7 +3464,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.q.5
 
 define <8 x i8> @test_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_ucmp_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    vpcmpltuq %zmm1, %zmm0, %k1
 ; CHECK-NEXT:    vpcmpleuq %zmm1, %zmm0, %k2
@@ -3509,7 +3509,7 @@ define <8 x i8> @test_ucmp_q_512(<8 x i6
 
 define <8 x i8> @test_mask_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_mask_ucmp_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
 ; CHECK-NEXT:    vpcmpltuq %zmm1, %zmm0, %k2 {%k1}
@@ -3559,7 +3559,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0, <16 x float> %x2, i16 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; CHECK-NEXT:    vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
@@ -3580,7 +3580,7 @@ define <16 x float>@test_int_x86_avx512_
 
 define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512_load(<4 x float>* %x0ptr, <16 x float> %x2, i16 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vbroadcastf32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; CHECK-NEXT:    retq
@@ -3593,7 +3593,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0, <8 x double> %x2, i8 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; CHECK-NEXT:    vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -3613,7 +3613,7 @@ define <8 x double>@test_int_x86_avx512_
 
 define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512_load(<4 x double>* %x0ptr, <8 x double> %x2, i8 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vbroadcastf64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
 ; CHECK-NEXT:    retq
@@ -3627,7 +3627,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; CHECK-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
@@ -3648,7 +3648,7 @@ define <16 x i32>@test_int_x86_avx512_ma
 
 define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512_load(<4 x i32>* %x0ptr, <16 x i32> %x2, i16 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; CHECK-NEXT:    retq
@@ -3662,7 +3662,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; CHECK-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -3682,7 +3682,7 @@ define <8 x i64>@test_int_x86_avx512_mas
 
 define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512_load(<4 x i64>* %x0ptr, <8 x i64> %x2, i8 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
 ; CHECK-NEXT:    retq
@@ -3696,7 +3696,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_pabs_d_512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pabs_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpabsd %zmm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpabsd %zmm0, %zmm1 {%k1}
@@ -3712,7 +3712,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pabs_q_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pabs_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpabsq %zmm0, %zmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpabsq %zmm0, %zmm1 {%k1}
@@ -3726,7 +3726,7 @@ define <8 x i64>@test_int_x86_avx512_mas
 
 define i8 @test_vptestmq(<8 x i64> %a0, <8 x i64> %a1, i8 %m) {
 ; CHECK-LABEL: test_vptestmq:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vptestmq %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %ecx
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -3744,7 +3744,7 @@ declare i8 @llvm.x86.avx512.ptestm.q.512
 
 define i16 @test_vptestmd(<16 x i32> %a0, <16 x i32> %a1, i16 %m) {
 ; CHECK-LABEL: test_vptestmd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vptestmd %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %ecx
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -3764,7 +3764,7 @@ declare i16 @llvm.x86.avx512.ptestnm.d.5
 
 define i16 at test_int_x86_avx512_ptestnm_d_512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_ptestnm_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vptestnmd %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vptestnmd %zmm1, %zmm0, %k1 {%k1}
@@ -3783,7 +3783,7 @@ declare i8 @llvm.x86.avx512.ptestnm.q.51
 
 define i8 at test_int_x86_avx512_ptestnm_q_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_ptestnm_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vptestnmq %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vptestnmq %zmm1, %zmm0, %k1 {%k1}

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 declare i32 @llvm.x86.avx512.kortestz.w(i16, i16) nounwind readnone
 define i32 @test_kortestz(i16 %a0, i16 %a1) {
 ; CHECK-LABEL: test_kortestz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k0
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    xorl %eax, %eax
@@ -18,7 +18,7 @@ define i32 @test_kortestz(i16 %a0, i16 %
 declare i32 @llvm.x86.avx512.kortestc.w(i16, i16) nounwind readnone
 define i32 @test_kortestc(i16 %a0, i16 %a1) {
 ; CHECK-LABEL: test_kortestc:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k0
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    xorl %eax, %eax
@@ -32,7 +32,7 @@ define i32 @test_kortestc(i16 %a0, i16 %
 declare i16 @llvm.x86.avx512.kand.w(i16, i16) nounwind readnone
 define i16 @test_kand(i16 %a0, i16 %a1) {
 ; CHECK-LABEL: test_kand:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k0
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    movw $8, %ax
@@ -50,7 +50,7 @@ define i16 @test_kand(i16 %a0, i16 %a1)
 declare i16 @llvm.x86.avx512.kandn.w(i16, i16) nounwind readnone
 define i16 @test_kandn(i16 %a0, i16 %a1) {
 ; CHECK-LABEL: test_kandn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k0
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    movw $8, %ax
@@ -68,7 +68,7 @@ define i16 @test_kandn(i16 %a0, i16 %a1)
 declare i16 @llvm.x86.avx512.knot.w(i16) nounwind readnone
 define i16 @test_knot(i16 %a0) {
 ; CHECK-LABEL: test_knot:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k0
 ; CHECK-NEXT:    knotw %k0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
@@ -81,7 +81,7 @@ define i16 @test_knot(i16 %a0) {
 declare i16 @llvm.x86.avx512.kor.w(i16, i16) nounwind readnone
 define i16 @test_kor(i16 %a0, i16 %a1) {
 ; CHECK-LABEL: test_kor:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k0
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    movw $8, %ax
@@ -100,7 +100,7 @@ declare i16 @llvm.x86.avx512.kunpck.bw(i
 
 define i16 @unpckbw_test(i16 %a0, i16 %a1) {
 ; CHECK-LABEL: unpckbw_test:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k0
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    kunpckbw %k1, %k0, %k0
@@ -116,7 +116,7 @@ declare i16 @llvm.x86.avx512.kxnor.w(i16
 ; probably by FoldConstantArithmetic in SelectionDAG.
 define i16 @test_kxnor(i16 %a0, i16 %a1) {
 ; CHECK-LABEL: test_kxnor:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k0
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    movw $8, %ax
@@ -134,7 +134,7 @@ define i16 @test_kxnor(i16 %a0, i16 %a1)
 declare i16 @llvm.x86.avx512.kxor.w(i16, i16) nounwind readnone
 define i16 @test_kxor(i16 %a0, i16 %a1) {
 ; CHECK-LABEL: test_kxor:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k0
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    movw $8, %ax
@@ -151,7 +151,7 @@ define i16 @test_kxor(i16 %a0, i16 %a1)
 
 define <16 x float> @test_rcp_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_rcp_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrcp14ps %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
@@ -161,7 +161,7 @@ declare <16 x float> @llvm.x86.avx512.rc
 
 define <8 x double> @test_rcp_pd_512(<8 x double> %a0) {
 ; CHECK-LABEL: test_rcp_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrcp14pd %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x double> @llvm.x86.avx512.rcp14.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1) ; <<8 x double>> [#uses=1]
@@ -173,7 +173,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double> @test_rndscale_sd(<2 x double> %a, <2 x double> %b) {
 ; CHECK-LABEL: test_rndscale_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrndscalesd $11, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double> %a, <2 x double> %b, <2 x double> undef, i8 -1, i32 11, i32 4)
@@ -182,7 +182,7 @@ define <2 x double> @test_rndscale_sd(<2
 
 define <2 x double> @test_rndscale_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
 ; CHECK-LABEL: test_rndscale_sd_mask:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vrndscalesd $11, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm0
@@ -193,7 +193,7 @@ define <2 x double> @test_rndscale_sd_ma
 
 define <2 x double> @test_rndscale_sd_mask_load(<2 x double> %a, <2 x double>* %bptr, <2 x double> %c, i8 %mask) {
 ; CHECK-LABEL: test_rndscale_sd_mask_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vrndscalesd $11, (%rdi), %xmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm0
@@ -205,7 +205,7 @@ define <2 x double> @test_rndscale_sd_ma
 
 define <2 x double> @test_rndscale_sd_maskz(<2 x double> %a, <2 x double> %b, i8 %mask) {
 ; CHECK-LABEL: test_rndscale_sd_maskz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vrndscalesd $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -217,7 +217,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float> @test_rndscale_ss(<4 x float> %a, <4 x float> %b) {
 ; CHECK-LABEL: test_rndscale_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrndscaless $11, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 11, i32 4)
@@ -226,7 +226,7 @@ define <4 x float> @test_rndscale_ss(<4
 
 define <4 x float> @test_rndscale_ss_load(<4 x float> %a, <4 x float>* %bptr) {
 ; CHECK-LABEL: test_rndscale_ss_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrndscaless $11, (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %b = load <4 x float>, <4 x float>* %bptr
@@ -236,7 +236,7 @@ define <4 x float> @test_rndscale_ss_loa
 
 define <4 x float> @test_rndscale_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
 ; CHECK-LABEL: test_rndscale_ss_mask:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vrndscaless $11, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm0
@@ -247,7 +247,7 @@ define <4 x float> @test_rndscale_ss_mas
 
 define <4 x float> @test_rndscale_ss_maskz(<4 x float> %a, <4 x float> %b, i8 %mask) {
 ; CHECK-LABEL: test_rndscale_ss_maskz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vrndscaless $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -259,7 +259,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double> @test7(<8 x double> %a) {
 ; CHECK-LABEL: test7:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrndscalepd $11, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double> %a, i32 11, <8 x double> %a, i8 -1, i32 4)
@@ -270,7 +270,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float> @test8(<16 x float> %a) {
 ; CHECK-LABEL: test8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrndscaleps $11, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float> %a, i32 11, <16 x float> %a, i16 -1, i32 4)
@@ -279,7 +279,7 @@ define <16 x float> @test8(<16 x float>
 
 define <16 x float> @test_rsqrt_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_rsqrt_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrsqrt14ps %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
@@ -289,7 +289,7 @@ declare <16 x float> @llvm.x86.avx512.rs
 
 define <8 x double> @test_sqrt_pd_512(<8 x double> %a0) {
 ; CHECK-LABEL: test_sqrt_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsqrtpd %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x double> @llvm.x86.avx512.mask.sqrt.pd.512(<8 x double> %a0,  <8 x double> zeroinitializer, i8 -1, i32 4)
@@ -299,7 +299,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <16 x float> @test_sqrt_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_sqrt_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsqrtps %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
@@ -307,7 +307,7 @@ define <16 x float> @test_sqrt_ps_512(<1
 }
 define <16 x float> @test_sqrt_round_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_sqrt_round_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsqrtps {rz-sae}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 3)
@@ -317,7 +317,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <8 x double> @test_getexp_pd_512(<8 x double> %a0) {
 ; CHECK-LABEL: test_getexp_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgetexppd %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0,  <8 x double> zeroinitializer, i8 -1, i32 4)
@@ -325,7 +325,7 @@ define <8 x double> @test_getexp_pd_512(
 }
 define <8 x double> @test_getexp_round_pd_512(<8 x double> %a0) {
 ; CHECK-LABEL: test_getexp_round_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgetexppd {sae}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0,  <8 x double> zeroinitializer, i8 -1, i32 8)
@@ -335,7 +335,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <16 x float> @test_getexp_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_getexp_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgetexpps %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
@@ -344,7 +344,7 @@ define <16 x float> @test_getexp_ps_512(
 
 define <16 x float> @test_getexp_round_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_getexp_round_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgetexpps {sae}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
@@ -356,7 +356,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
 ; CHECK-LABEL: test_sqrt_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm3
 ; CHECK-NEXT:    vsqrtss %xmm1, %xmm0, %xmm3 {%k1}
@@ -382,7 +382,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
 ; CHECK-LABEL: test_sqrt_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm3
 ; CHECK-NEXT:    vsqrtsd %xmm1, %xmm0, %xmm3 {%k1}
@@ -406,7 +406,7 @@ define <2 x double> @test_sqrt_sd(<2 x d
 
 define i64 @test_x86_sse2_cvtsd2si64(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_sse2_cvtsd2si64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsd2si %xmm0, %rax
 ; CHECK-NEXT:    retq
   %res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
@@ -416,7 +416,7 @@ declare i64 @llvm.x86.sse2.cvtsd2si64(<2
 
 define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
 ; CHECK-LABEL: test_x86_sse2_cvtsi642sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
@@ -426,7 +426,7 @@ declare <2 x double> @llvm.x86.sse2.cvts
 
 define i64 @test_x86_avx512_cvttsd2si64(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttsd2si64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvttsd2si %xmm0, %rcx
 ; CHECK-NEXT:    vcvttsd2si {sae}, %xmm0, %rax
 ; CHECK-NEXT:    addq %rcx, %rax
@@ -440,7 +440,7 @@ declare i64 @llvm.x86.avx512.cvttsd2si64
 
 define i32 @test_x86_avx512_cvttsd2usi(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttsd2usi:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvttsd2usi %xmm0, %ecx
 ; CHECK-NEXT:    vcvttsd2usi {sae}, %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
@@ -454,7 +454,7 @@ declare i32 @llvm.x86.avx512.cvttsd2usi(
 
 define i32 @test_x86_avx512_cvttsd2si(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttsd2si:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvttsd2si %xmm0, %ecx
 ; CHECK-NEXT:    vcvttsd2si {sae}, %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
@@ -470,7 +470,7 @@ declare i32 @llvm.x86.avx512.cvttsd2si(<
 
 define i64 @test_x86_avx512_cvttsd2usi64(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttsd2usi64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvttsd2usi %xmm0, %rcx
 ; CHECK-NEXT:    vcvttsd2usi {sae}, %xmm0, %rax
 ; CHECK-NEXT:    addq %rcx, %rax
@@ -484,7 +484,7 @@ declare i64 @llvm.x86.avx512.cvttsd2usi6
 
 define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_sse_cvtss2si64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtss2si %xmm0, %rax
 ; CHECK-NEXT:    retq
   %res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
@@ -495,7 +495,7 @@ declare i64 @llvm.x86.sse.cvtss2si64(<4
 
 define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
 ; CHECK-LABEL: test_x86_sse_cvtsi642ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
@@ -506,7 +506,7 @@ declare <4 x float> @llvm.x86.sse.cvtsi6
 
 define i32 @test_x86_avx512_cvttss2si(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttss2si:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvttss2si {sae}, %xmm0, %ecx
 ; CHECK-NEXT:    vcvttss2si %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
@@ -520,7 +520,7 @@ declare i32 @llvm.x86.avx512.cvttss2si(<
 
 define i64 @test_x86_avx512_cvttss2si64(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttss2si64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvttss2si %xmm0, %rcx
 ; CHECK-NEXT:    vcvttss2si {sae}, %xmm0, %rax
 ; CHECK-NEXT:    addq %rcx, %rax
@@ -534,7 +534,7 @@ declare i64 @llvm.x86.avx512.cvttss2si64
 
 define i32 @test_x86_avx512_cvttss2usi(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttss2usi:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvttss2usi {sae}, %xmm0, %ecx
 ; CHECK-NEXT:    vcvttss2usi %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
@@ -548,7 +548,7 @@ declare i32 @llvm.x86.avx512.cvttss2usi(
 
 define i64 @test_x86_avx512_cvttss2usi64(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttss2usi64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvttss2usi %xmm0, %rcx
 ; CHECK-NEXT:    vcvttss2usi {sae}, %xmm0, %rax
 ; CHECK-NEXT:    addq %rcx, %rax
@@ -562,7 +562,7 @@ declare i64 @llvm.x86.avx512.cvttss2usi6
 
 define i64 @test_x86_avx512_cvtsd2usi64(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtsd2usi64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsd2usi %xmm0, %rax
 ; CHECK-NEXT:    vcvtsd2usi {rz-sae}, %xmm0, %rcx
 ; CHECK-NEXT:    addq %rax, %rcx
@@ -581,7 +581,7 @@ declare i64 @llvm.x86.avx512.vcvtsd2usi6
 
 define i64 @test_x86_avx512_cvtsd2si64(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtsd2si64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsd2si %xmm0, %rax
 ; CHECK-NEXT:    vcvtsd2si {rz-sae}, %xmm0, %rcx
 ; CHECK-NEXT:    addq %rax, %rcx
@@ -600,7 +600,7 @@ declare i64 @llvm.x86.avx512.vcvtsd2si64
 
 define i64 @test_x86_avx512_cvtss2usi64(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtss2usi64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtss2usi %xmm0, %rax
 ; CHECK-NEXT:    vcvtss2usi {rz-sae}, %xmm0, %rcx
 ; CHECK-NEXT:    addq %rax, %rcx
@@ -619,7 +619,7 @@ declare i64 @llvm.x86.avx512.vcvtss2usi6
 
 define i64 @test_x86_avx512_cvtss2si64(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtss2si64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtss2si %xmm0, %rax
 ; CHECK-NEXT:    vcvtss2si {rz-sae}, %xmm0, %rcx
 ; CHECK-NEXT:    addq %rax, %rcx
@@ -638,7 +638,7 @@ declare i64 @llvm.x86.avx512.vcvtss2si64
 
 define i32 @test_x86_avx512_cvtsd2usi32(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtsd2usi32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsd2usi %xmm0, %eax
 ; CHECK-NEXT:    vcvtsd2usi {rz-sae}, %xmm0, %ecx
 ; CHECK-NEXT:    addl %eax, %ecx
@@ -657,7 +657,7 @@ declare i32 @llvm.x86.avx512.vcvtsd2usi3
 
 define i32 @test_x86_avx512_cvtsd2si32(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtsd2si32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsd2si %xmm0, %eax
 ; CHECK-NEXT:    vcvtsd2si {rz-sae}, %xmm0, %ecx
 ; CHECK-NEXT:    addl %eax, %ecx
@@ -676,7 +676,7 @@ declare i32 @llvm.x86.avx512.vcvtsd2si32
 
 define i32 @test_x86_avx512_cvtss2usi32(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtss2usi32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtss2usi %xmm0, %eax
 ; CHECK-NEXT:    vcvtss2usi {rz-sae}, %xmm0, %ecx
 ; CHECK-NEXT:    addl %eax, %ecx
@@ -695,7 +695,7 @@ declare i32 @llvm.x86.avx512.vcvtss2usi3
 
 define i32 @test_x86_avx512_cvtss2si32(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtss2si32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtss2si %xmm0, %eax
 ; CHECK-NEXT:    vcvtss2si {rz-sae}, %xmm0, %ecx
 ; CHECK-NEXT:    addl %eax, %ecx
@@ -714,7 +714,7 @@ declare i32 @llvm.x86.avx512.vcvtss2si32
 
 define <16 x float> @test_x86_vcvtph2ps_512(<16 x i16> %a0) {
 ; CHECK-LABEL: test_x86_vcvtph2ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtph2ps %ymm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
@@ -723,7 +723,7 @@ define <16 x float> @test_x86_vcvtph2ps_
 
 define <16 x float> @test_x86_vcvtph2ps_512_sae(<16 x i16> %a0) {
 ; CHECK-LABEL: test_x86_vcvtph2ps_512_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtph2ps {sae}, %ymm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
@@ -732,7 +732,7 @@ define <16 x float> @test_x86_vcvtph2ps_
 
 define <16 x float> @test_x86_vcvtph2ps_512_rrk(<16 x i16> %a0,<16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_vcvtph2ps_512_rrk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtph2ps %ymm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm0
@@ -743,7 +743,7 @@ define <16 x float> @test_x86_vcvtph2ps_
 
 define <16 x float> @test_x86_vcvtph2ps_512_sae_rrkz(<16 x i16> %a0, i16 %mask) {
 ; CHECK-LABEL: test_x86_vcvtph2ps_512_sae_rrkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtph2ps {sae}, %ymm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -753,7 +753,7 @@ define <16 x float> @test_x86_vcvtph2ps_
 
 define <16 x float> @test_x86_vcvtph2ps_512_rrkz(<16 x i16> %a0, i16 %mask) {
 ; CHECK-LABEL: test_x86_vcvtph2ps_512_rrkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtph2ps %ymm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -765,7 +765,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x i16> @test_x86_vcvtps2ph_256(<16 x float> %a0, <16 x i16> %src, i16 %mask, <16 x i16> * %dst) {
 ; CHECK-LABEL: test_x86_vcvtps2ph_256:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtps2ph $2, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vcvtps2ph $2, %zmm0, %ymm2 {%k1} {z}
@@ -785,7 +785,7 @@ declare <16 x i16> @llvm.x86.avx512.mask
 
 define <16 x float> @test_x86_vbroadcast_ss_512(i8* %a0) {
 ; CHECK-LABEL: test_x86_vbroadcast_ss_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vbroadcastss (%rdi), %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.vbroadcast.ss.512(i8* %a0) ; <<16 x float>> [#uses=1]
@@ -795,7 +795,7 @@ declare <16 x float> @llvm.x86.avx512.vb
 
 define <8 x double> @test_x86_vbroadcast_sd_512(i8* %a0) {
 ; CHECK-LABEL: test_x86_vbroadcast_sd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vbroadcastsd (%rdi), %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x double> @llvm.x86.avx512.vbroadcast.sd.512(i8* %a0) ; <<8 x double>> [#uses=1]
@@ -805,7 +805,7 @@ declare <8 x double> @llvm.x86.avx512.vb
 
  define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) {
 ; CHECK-LABEL: test_cmpps:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpleps {sae}, %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -817,7 +817,7 @@ declare <8 x double> @llvm.x86.avx512.vb
 
  define i8 @test_cmppd(<8 x double> %a, <8 x double> %b) {
 ; CHECK-LABEL: test_cmppd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpneqpd %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    ## kill: %al<def> %al<kill> %eax<kill>
@@ -830,7 +830,7 @@ declare <8 x double> @llvm.x86.avx512.vb
  ; fp min - max
 define <8 x double> @test_vmaxpd(<8 x double> %a0, <8 x double> %a1) {
 ; CHECK-LABEL: test_vmaxpd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmaxpd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %a0, <8 x double> %a1,
@@ -842,7 +842,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double> @test_vminpd(<8 x double> %a0, <8 x double> %a1) {
 ; CHECK-LABEL: test_vminpd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vminpd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %a0, <8 x double> %a1,
@@ -854,7 +854,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define void @test_mask_store_ss(i8* %ptr, <4 x float> %data, i8 %mask) {
 ; CHECK-LABEL: test_mask_store_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi) {%k1}
 ; CHECK-NEXT:    retq
@@ -870,7 +870,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vsubps_rn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsubps {rn-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -880,7 +880,7 @@ define <16 x float> @test_vsubps_rn(<16
 
 define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vsubps_rd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsubps {rd-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -890,7 +890,7 @@ define <16 x float> @test_vsubps_rd(<16
 
 define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vsubps_ru:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsubps {ru-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -900,7 +900,7 @@ define <16 x float> @test_vsubps_ru(<16
 
 define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vsubps_rz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsubps {rz-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -910,7 +910,7 @@ define <16 x float> @test_vsubps_rz(<16
 
 define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vmulps_rn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmulps {rn-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -920,7 +920,7 @@ define <16 x float> @test_vmulps_rn(<16
 
 define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vmulps_rd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmulps {rd-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -930,7 +930,7 @@ define <16 x float> @test_vmulps_rd(<16
 
 define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vmulps_ru:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmulps {ru-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -940,7 +940,7 @@ define <16 x float> @test_vmulps_ru(<16
 
 define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vmulps_rz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmulps {rz-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
@@ -951,7 +951,7 @@ define <16 x float> @test_vmulps_rz(<16
 ;; mask float
 define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_vmulps_mask_rn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -962,7 +962,7 @@ define <16 x float> @test_vmulps_mask_rn
 
 define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_vmulps_mask_rd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -973,7 +973,7 @@ define <16 x float> @test_vmulps_mask_rd
 
 define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_vmulps_mask_ru:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -984,7 +984,7 @@ define <16 x float> @test_vmulps_mask_ru
 
 define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_vmulps_mask_rz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -996,7 +996,7 @@ define <16 x float> @test_vmulps_mask_rz
 ;; With Passthru value
 define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_vmulps_mask_passthru_rn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1008,7 +1008,7 @@ define <16 x float> @test_vmulps_mask_pa
 
 define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_vmulps_mask_passthru_rd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1020,7 +1020,7 @@ define <16 x float> @test_vmulps_mask_pa
 
 define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_vmulps_mask_passthru_ru:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1032,7 +1032,7 @@ define <16 x float> @test_vmulps_mask_pa
 
 define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_vmulps_mask_passthru_rz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1045,7 +1045,7 @@ define <16 x float> @test_vmulps_mask_pa
 ;; mask double
 define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
 ; CHECK-LABEL: test_vmulpd_mask_rn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulpd {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1056,7 +1056,7 @@ define <8 x double> @test_vmulpd_mask_rn
 
 define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
 ; CHECK-LABEL: test_vmulpd_mask_rd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulpd {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1067,7 +1067,7 @@ define <8 x double> @test_vmulpd_mask_rd
 
 define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
 ; CHECK-LABEL: test_vmulpd_mask_ru:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulpd {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1078,7 +1078,7 @@ define <8 x double> @test_vmulpd_mask_ru
 
 define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
 ; CHECK-LABEL: test_vmulpd_mask_rz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmulpd {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1089,7 +1089,7 @@ define <8 x double> @test_vmulpd_mask_rz
 
 define <8 x i64> @test_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_mul_epi32_rr:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuldq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.pmul.dq.512(<16 x i32> %a, <16 x i32> %b)
@@ -1098,7 +1098,7 @@ define <8 x i64> @test_mul_epi32_rr(<16
 
 define <8 x i64> @test_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mul_epi32_rrk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmuldq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1111,7 +1111,7 @@ define <8 x i64> @test_mul_epi32_rrk(<16
 
 define <8 x i64> @test_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
 ; CHECK-LABEL: test_mul_epi32_rrkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmuldq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1123,7 +1123,7 @@ define <8 x i64> @test_mul_epi32_rrkz(<1
 
 define <8 x i64> @test_mul_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
 ; CHECK-LABEL: test_mul_epi32_rm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuldq (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -1133,7 +1133,7 @@ define <8 x i64> @test_mul_epi32_rm(<16
 
 define <8 x i64> @test_mul_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mul_epi32_rmk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuldq (%rdi), %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1147,7 +1147,7 @@ define <8 x i64> @test_mul_epi32_rmk(<16
 
 define <8 x i64> @test_mul_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mul_epi32_rmkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuldq (%rdi), %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1160,7 +1160,7 @@ define <8 x i64> @test_mul_epi32_rmkz(<1
 
 define <8 x i64> @test_mul_epi32_rmb(<16 x i32> %a, i64* %ptr_b) {
 ; CHECK-LABEL: test_mul_epi32_rmb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuldq (%rdi){1to8}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %q = load i64, i64* %ptr_b
@@ -1173,7 +1173,7 @@ define <8 x i64> @test_mul_epi32_rmb(<16
 
 define <8 x i64> @test_mul_epi32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mul_epi32_rmbk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuldq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1190,7 +1190,7 @@ define <8 x i64> @test_mul_epi32_rmbk(<1
 
 define <8 x i64> @test_mul_epi32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mul_epi32_rmbkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuldq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1208,7 +1208,7 @@ declare <8 x i64> @llvm.x86.avx512.pmul.
 
 define <8 x i64> @test_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_mul_epu32_rr:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuludq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b)
@@ -1217,7 +1217,7 @@ define <8 x i64> @test_mul_epu32_rr(<16
 
 define <8 x i64> @test_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mul_epu32_rrk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmuludq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -1230,7 +1230,7 @@ define <8 x i64> @test_mul_epu32_rrk(<16
 
 define <8 x i64> @test_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
 ; CHECK-LABEL: test_mul_epu32_rrkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmuludq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1242,7 +1242,7 @@ define <8 x i64> @test_mul_epu32_rrkz(<1
 
 define <8 x i64> @test_mul_epu32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
 ; CHECK-LABEL: test_mul_epu32_rm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuludq (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %b = load <16 x i32>, <16 x i32>* %ptr_b
@@ -1252,7 +1252,7 @@ define <8 x i64> @test_mul_epu32_rm(<16
 
 define <8 x i64> @test_mul_epu32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mul_epu32_rmk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuludq (%rdi), %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1266,7 +1266,7 @@ define <8 x i64> @test_mul_epu32_rmk(<16
 
 define <8 x i64> @test_mul_epu32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mul_epu32_rmkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuludq (%rdi), %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1279,7 +1279,7 @@ define <8 x i64> @test_mul_epu32_rmkz(<1
 
 define <8 x i64> @test_mul_epu32_rmb(<16 x i32> %a, i64* %ptr_b) {
 ; CHECK-LABEL: test_mul_epu32_rmb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpmuludq (%rdi){1to8}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %q = load i64, i64* %ptr_b
@@ -1292,7 +1292,7 @@ define <8 x i64> @test_mul_epu32_rmb(<16
 
 define <8 x i64> @test_mul_epu32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
 ; CHECK-LABEL: test_mul_epu32_rmbk:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuludq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1309,7 +1309,7 @@ define <8 x i64> @test_mul_epu32_rmbk(<1
 
 define <8 x i64> @test_mul_epu32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
 ; CHECK-LABEL: test_mul_epu32_rmbkz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmuludq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1327,7 +1327,7 @@ declare <8 x i64> @llvm.x86.avx512.pmulu
 
 define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_add_round_ps_rn_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1336,7 +1336,7 @@ define <16 x float> @test_mm512_maskz_ad
 }
 define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_add_round_ps_rd_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1345,7 +1345,7 @@ define <16 x float> @test_mm512_maskz_ad
 }
 define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_add_round_ps_ru_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1355,7 +1355,7 @@ define <16 x float> @test_mm512_maskz_ad
 
 define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_add_round_ps_rz_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1366,7 +1366,7 @@ define <16 x float> @test_mm512_maskz_ad
 
 define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_add_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddps %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1376,7 +1376,7 @@ define <16 x float> @test_mm512_maskz_ad
 
 define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_add_round_ps_rn_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1386,7 +1386,7 @@ define <16 x float> @test_mm512_mask_add
 }
 define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_add_round_ps_rd_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1396,7 +1396,7 @@ define <16 x float> @test_mm512_mask_add
 }
 define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_add_round_ps_ru_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1407,7 +1407,7 @@ define <16 x float> @test_mm512_mask_add
 
 define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_add_round_ps_rz_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1419,7 +1419,7 @@ define <16 x float> @test_mm512_mask_add
 
 define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_add_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddps %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1431,7 +1431,7 @@ define <16 x float> @test_mm512_mask_add
 
 define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_add_round_ps_rn_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vaddps {rn-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 0)
@@ -1439,7 +1439,7 @@ define <16 x float> @test_mm512_add_roun
 }
 define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_add_round_ps_rd_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vaddps {rd-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 1)
@@ -1447,7 +1447,7 @@ define <16 x float> @test_mm512_add_roun
 }
 define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_add_round_ps_ru_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vaddps {ru-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 2)
@@ -1456,7 +1456,7 @@ define <16 x float> @test_mm512_add_roun
 
 define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_add_round_ps_rz_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vaddps {rz-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 3)
@@ -1465,7 +1465,7 @@ define <16 x float> @test_mm512_add_roun
 
 define <16 x float> @test_mm512_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_add_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vaddps %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
@@ -1475,7 +1475,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_sub_round_ps_rn_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vsubps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1485,7 +1485,7 @@ define <16 x float> @test_mm512_mask_sub
 }
 define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_sub_round_ps_rd_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vsubps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1495,7 +1495,7 @@ define <16 x float> @test_mm512_mask_sub
 }
 define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_sub_round_ps_ru_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vsubps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1506,7 +1506,7 @@ define <16 x float> @test_mm512_mask_sub
 
 define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_sub_round_ps_rz_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vsubps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1518,7 +1518,7 @@ define <16 x float> @test_mm512_mask_sub
 
 define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_sub_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vsubps %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1529,7 +1529,7 @@ define <16 x float> @test_mm512_mask_sub
 
 define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_sub_round_ps_rn_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsubps {rn-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 0)
@@ -1537,7 +1537,7 @@ define <16 x float> @test_mm512_sub_roun
 }
 define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_sub_round_ps_rd_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsubps {rd-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 1)
@@ -1545,7 +1545,7 @@ define <16 x float> @test_mm512_sub_roun
 }
 define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_sub_round_ps_ru_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsubps {ru-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 2)
@@ -1554,7 +1554,7 @@ define <16 x float> @test_mm512_sub_roun
 
 define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_sub_round_ps_rz_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsubps {rz-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 3)
@@ -1563,7 +1563,7 @@ define <16 x float> @test_mm512_sub_roun
 
 define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_sub_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vsubps %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
@@ -1572,7 +1572,7 @@ define <16 x float> @test_mm512_sub_roun
 
 define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_div_round_ps_rn_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vdivps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1581,7 +1581,7 @@ define <16 x float> @test_mm512_maskz_di
 }
 define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_div_round_ps_rd_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vdivps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1590,7 +1590,7 @@ define <16 x float> @test_mm512_maskz_di
 }
 define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_div_round_ps_ru_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vdivps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1600,7 +1600,7 @@ define <16 x float> @test_mm512_maskz_di
 
 define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_div_round_ps_rz_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vdivps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1611,7 +1611,7 @@ define <16 x float> @test_mm512_maskz_di
 
 define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_div_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vdivps %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1621,7 +1621,7 @@ define <16 x float> @test_mm512_maskz_di
 
 define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_div_round_ps_rn_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vdivps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1631,7 +1631,7 @@ define <16 x float> @test_mm512_mask_div
 }
 define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_div_round_ps_rd_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vdivps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1641,7 +1641,7 @@ define <16 x float> @test_mm512_mask_div
 }
 define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_div_round_ps_ru_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vdivps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1652,7 +1652,7 @@ define <16 x float> @test_mm512_mask_div
 
 define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_div_round_ps_rz_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vdivps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1664,7 +1664,7 @@ define <16 x float> @test_mm512_mask_div
 
 define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_div_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vdivps %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1676,7 +1676,7 @@ define <16 x float> @test_mm512_mask_div
 
 define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_div_round_ps_rn_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vdivps {rn-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 0)
@@ -1684,7 +1684,7 @@ define <16 x float> @test_mm512_div_roun
 }
 define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_div_round_ps_rd_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vdivps {rd-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 1)
@@ -1692,7 +1692,7 @@ define <16 x float> @test_mm512_div_roun
 }
 define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_div_round_ps_ru_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vdivps {ru-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 2)
@@ -1701,7 +1701,7 @@ define <16 x float> @test_mm512_div_roun
 
 define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_div_round_ps_rz_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vdivps {rz-sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 3)
@@ -1710,7 +1710,7 @@ define <16 x float> @test_mm512_div_roun
 
 define <16 x float> @test_mm512_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_div_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vdivps %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
@@ -1720,7 +1720,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_min_round_ps_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vminps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1730,7 +1730,7 @@ define <16 x float> @test_mm512_maskz_mi
 
 define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_min_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vminps %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1740,7 +1740,7 @@ define <16 x float> @test_mm512_maskz_mi
 
 define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_min_round_ps_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vminps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1751,7 +1751,7 @@ define <16 x float> @test_mm512_mask_min
 
 define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_min_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vminps %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1762,7 +1762,7 @@ define <16 x float> @test_mm512_mask_min
 
 define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_min_round_ps_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vminps {sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 8)
@@ -1771,7 +1771,7 @@ define <16 x float> @test_mm512_min_roun
 
 define <16 x float> @test_mm512_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_min_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vminps %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
@@ -1781,7 +1781,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_max_round_ps_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1791,7 +1791,7 @@ define <16 x float> @test_mm512_maskz_ma
 
 define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_maskz_max_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxps %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1801,7 +1801,7 @@ define <16 x float> @test_mm512_maskz_ma
 
 define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_max_round_ps_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1812,7 +1812,7 @@ define <16 x float> @test_mm512_mask_max
 
 define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
 ; CHECK-LABEL: test_mm512_mask_max_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxps %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -1823,7 +1823,7 @@ define <16 x float> @test_mm512_mask_max
 
 define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_max_round_ps_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmaxps {sae}, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 8)
@@ -1832,7 +1832,7 @@ define <16 x float> @test_mm512_max_roun
 
 define <16 x float> @test_mm512_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_max_round_ps_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmaxps %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float>zeroinitializer, i16 -1, i32 4)
@@ -1844,7 +1844,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_ss_rn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm0
@@ -1855,7 +1855,7 @@ define <4 x float> @test_mask_add_ss_rn(
 
 define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_ss_rd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm0
@@ -1866,7 +1866,7 @@ define <4 x float> @test_mask_add_ss_rd(
 
 define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_ss_ru:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddss {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm0
@@ -1877,7 +1877,7 @@ define <4 x float> @test_mask_add_ss_ru(
 
 define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_ss_rz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm0
@@ -1888,7 +1888,7 @@ define <4 x float> @test_mask_add_ss_rz(
 
 define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_ss_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddss %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm0
@@ -1899,7 +1899,7 @@ define <4 x float> @test_mask_add_ss_cur
 
 define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
 ; CHECK-LABEL: test_maskz_add_ss_rn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddss {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1909,7 +1909,7 @@ define <4 x float> @test_maskz_add_ss_rn
 
 define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_add_ss_rn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vaddss {rn-sae}, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 0)
@@ -1918,7 +1918,7 @@ define <4 x float> @test_add_ss_rn(<4 x
 
 define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_ss_current_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vaddss (%rdi), %xmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm0
@@ -1934,7 +1934,7 @@ define <4 x float> @test_mask_add_ss_cur
 
 define <4 x float> @test_maskz_add_ss_current_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
 ; CHECK-LABEL: test_maskz_add_ss_current_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -1951,7 +1951,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_sd_rn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddsd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm0
@@ -1962,7 +1962,7 @@ define <2 x double> @test_mask_add_sd_rn
 
 define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_sd_rd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm0
@@ -1973,7 +1973,7 @@ define <2 x double> @test_mask_add_sd_rd
 
 define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_sd_ru:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddsd {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm0
@@ -1984,7 +1984,7 @@ define <2 x double> @test_mask_add_sd_ru
 
 define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_sd_rz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddsd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm0
@@ -1995,7 +1995,7 @@ define <2 x double> @test_mask_add_sd_rz
 
 define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_sd_current:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddsd %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm0
@@ -2006,7 +2006,7 @@ define <2 x double> @test_mask_add_sd_cu
 
 define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
 ; CHECK-LABEL: test_maskz_add_sd_rn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2016,7 +2016,7 @@ define <2 x double> @test_maskz_add_sd_r
 
 define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_add_sd_rn:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 0)
@@ -2025,7 +2025,7 @@ define <2 x double> @test_add_sd_rn(<2 x
 
 define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_add_sd_current_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vaddsd (%rdi), %xmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm0
@@ -2039,7 +2039,7 @@ define <2 x double> @test_mask_add_sd_cu
 
 define <2 x double> @test_maskz_add_sd_current_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
 ; CHECK-LABEL: test_maskz_add_sd_current_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vaddsd (%rdi), %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2054,7 +2054,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_max_ss_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm0
@@ -2065,7 +2065,7 @@ define <4 x float> @test_mask_max_ss_sae
 
 define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
 ; CHECK-LABEL: test_maskz_max_ss_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxss {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2075,7 +2075,7 @@ define <4 x float> @test_maskz_max_ss_sa
 
 define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_max_ss_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmaxss {sae}, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 8)
@@ -2084,7 +2084,7 @@ define <4 x float> @test_max_ss_sae(<4 x
 
 define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_max_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxss %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm0
@@ -2095,7 +2095,7 @@ define <4 x float> @test_mask_max_ss(<4
 
 define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
 ; CHECK-LABEL: test_maskz_max_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2105,7 +2105,7 @@ define <4 x float> @test_maskz_max_ss(<4
 
 define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_max_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 4)
@@ -2114,7 +2114,7 @@ define <4 x float> @test_max_ss(<4 x flo
 
 define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_max_ss_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmaxss (%rdi), %xmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm0
@@ -2130,7 +2130,7 @@ define <4 x float> @test_mask_max_ss_mem
 
 define <4 x float> @test_maskz_max_ss_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
 ; CHECK-LABEL: test_maskz_max_ss_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2146,7 +2146,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_max_sd_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm0
@@ -2157,7 +2157,7 @@ define <2 x double> @test_mask_max_sd_sa
 
 define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
 ; CHECK-LABEL: test_maskz_max_sd_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2167,7 +2167,7 @@ define <2 x double> @test_maskz_max_sd_s
 
 define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_max_sd_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmaxsd {sae}, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 8)
@@ -2176,7 +2176,7 @@ define <2 x double> @test_max_sd_sae(<2
 
 define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_max_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxsd %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm0
@@ -2187,7 +2187,7 @@ define <2 x double> @test_mask_max_sd(<2
 
 define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
 ; CHECK-LABEL: test_maskz_max_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2197,7 +2197,7 @@ define <2 x double> @test_maskz_max_sd(<
 
 define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_max_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 4)
@@ -2206,7 +2206,7 @@ define <2 x double> @test_max_sd(<2 x do
 
 define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
 ; CHECK-LABEL: test_mask_max_sd_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmaxsd (%rdi), %xmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm0
@@ -2220,7 +2220,7 @@ define <2 x double> @test_mask_max_sd_me
 
 define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
 ; CHECK-LABEL: test_maskz_max_sd_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmaxsd (%rdi), %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -2233,7 +2233,7 @@ define <2 x double> @test_maskz_max_sd_m
 
 define <2 x double> @test_x86_avx512_cvtsi2sd64(<2 x double> %a, i64 %b) {
 ; CHECK-LABEL: test_x86_avx512_cvtsi2sd64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsi2sdq %rdi, {rz-sae}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double> %a, i64 %b, i32 3) ; <<<2 x double>> [#uses=1]
@@ -2243,7 +2243,7 @@ declare <2 x double> @llvm.x86.avx512.cv
 
 define <4 x float> @test_x86_avx512_cvtsi2ss32(<4 x float> %a, i32 %b) {
 ; CHECK-LABEL: test_x86_avx512_cvtsi2ss32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsi2ssl %edi, {rz-sae}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float> %a, i32 %b, i32 3) ; <<<4 x float>> [#uses=1]
@@ -2253,7 +2253,7 @@ declare <4 x float> @llvm.x86.avx512.cvt
 
 define <4 x float> @test_x86_avx512_cvtsi2ss64(<4 x float> %a, i64 %b) {
 ; CHECK-LABEL: test_x86_avx512_cvtsi2ss64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtsi2ssq %rdi, {rz-sae}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float> %a, i64 %b, i32 3) ; <<<4 x float>> [#uses=1]
@@ -2263,7 +2263,7 @@ declare <4 x float> @llvm.x86.avx512.cvt
 
 define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b)
 ; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu32_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtusi2ssl %edi, {rd-sae}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 {
@@ -2273,7 +2273,7 @@ define <4 x float> @test_x86_avx512__mm_
 
 define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss_mem(<4 x float> %a, i32* %ptr)
 ; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu32_ss_mem:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl (%rdi), %eax
 ; CHECK-NEXT:    vcvtusi2ssl %eax, {rd-sae}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -2285,7 +2285,7 @@ define <4 x float> @test_x86_avx512__mm_
 
 define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b)
 ; CHECK-LABEL: test_x86_avx512__mm_cvtu32_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 {
@@ -2295,7 +2295,7 @@ define <4 x float> @test_x86_avx512__mm_
 
 define <4 x float> @test_x86_avx512__mm_cvtu32_ss_mem(<4 x float> %a, i32* %ptr)
 ; CHECK-LABEL: test_x86_avx512__mm_cvtu32_ss_mem:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtusi2ssl (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 {
@@ -2307,7 +2307,7 @@ declare <4 x float> @llvm.x86.avx512.cvt
 
 define <4 x float> @_mm_cvt_roundu64_ss (<4 x float> %a, i64 %b)
 ; CHECK-LABEL: _mm_cvt_roundu64_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtusi2ssq %rdi, {rd-sae}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 {
@@ -2317,7 +2317,7 @@ define <4 x float> @_mm_cvt_roundu64_ss
 
 define <4 x float> @_mm_cvtu64_ss(<4 x float> %a, i64 %b)
 ; CHECK-LABEL: _mm_cvtu64_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtusi2ssq %rdi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 {
@@ -2328,7 +2328,7 @@ declare <4 x float> @llvm.x86.avx512.cvt
 
 define <2 x double> @test_x86_avx512_mm_cvtu32_sd(<2 x double> %a, i32 %b)
 ; CHECK-LABEL: test_x86_avx512_mm_cvtu32_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtusi2sdl %edi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 {
@@ -2339,7 +2339,7 @@ declare <2 x double> @llvm.x86.avx512.cv
 
 define <2 x double> @test_x86_avx512_mm_cvtu64_sd(<2 x double> %a, i64 %b)
 ; CHECK-LABEL: test_x86_avx512_mm_cvtu64_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtusi2sdq %rdi, {rd-sae}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 {
@@ -2349,7 +2349,7 @@ define <2 x double> @test_x86_avx512_mm_
 
 define <2 x double> @test_x86_avx512__mm_cvt_roundu64_sd(<2 x double> %a, i64 %b)
 ; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu64_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcvtusi2sdq %rdi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 {
@@ -2362,7 +2362,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm3
 ; CHECK-NEXT:    vpermi2d (%rdi), %zmm0, %zmm3 {%k1}
@@ -2380,7 +2380,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_vpermi2var_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %zmm1, %zmm3
 ; CHECK-NEXT:    vpermi2pd %zmm2, %zmm0, %zmm3
@@ -2397,7 +2397,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_vpermi2var_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm3
 ; CHECK-NEXT:    vpermi2ps %zmm2, %zmm0, %zmm3
@@ -2414,7 +2414,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm3
 ; CHECK-NEXT:    vpermi2q %zmm2, %zmm0, %zmm3
@@ -2431,7 +2431,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm2
 ; CHECK-NEXT:    vpermt2d (%rdi), %zmm0, %zmm2 {%k1} {z}
@@ -2449,7 +2449,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, <8 x double> %x1, double* %x2ptr, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmovapd %zmm1, %zmm2
 ; CHECK-NEXT:    vpermt2pd (%rdi){1to8}, %zmm0, %zmm2 {%k1} {z}
@@ -2469,7 +2469,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm3
 ; CHECK-NEXT:    vpermt2ps %zmm2, %zmm0, %zmm3
@@ -2487,7 +2487,7 @@ declare <8 x i64> @llvm.x86.avx512.maskz
 
 define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm3
 ; CHECK-NEXT:    vpermt2q %zmm2, %zmm0, %zmm3
@@ -2504,7 +2504,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm3
 ; CHECK-NEXT:    vpermt2d %zmm2, %zmm0, %zmm3
@@ -2520,7 +2520,7 @@ define <16 x i32>@test_int_x86_avx512_ma
 declare <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32)
 define <8 x double>@test_int_x86_avx512_mask_scalef_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_scalef_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vscalefpd {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vscalefpd {rn-sae}, %zmm1, %zmm0, %zmm0
@@ -2535,7 +2535,7 @@ define <8 x double>@test_int_x86_avx512_
 declare <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
 define <16 x float>@test_int_x86_avx512_mask_scalef_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_scalef_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vscalefps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vscalefps {rn-sae}, %zmm1, %zmm0, %zmm0
@@ -2551,7 +2551,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.
 
 define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovqb %zmm0, %xmm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovqb %zmm0, %xmm1 {%k1}
@@ -2571,7 +2571,7 @@ declare void @llvm.x86.avx512.mask.pmov.
 
 define void @test_int_x86_avx512_mask_pmov_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovqb %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovqb %zmm0, (%rdi) {%k1}
@@ -2585,7 +2585,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.
 
 define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovsqb %zmm0, %xmm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovsqb %zmm0, %xmm1 {%k1}
@@ -2605,7 +2605,7 @@ declare void @llvm.x86.avx512.mask.pmovs
 
 define void @test_int_x86_avx512_mask_pmovs_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovsqb %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovsqb %zmm0, (%rdi) {%k1}
@@ -2619,7 +2619,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.
 
 define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovusqb %zmm0, %xmm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovusqb %zmm0, %xmm1 {%k1}
@@ -2639,7 +2639,7 @@ declare void @llvm.x86.avx512.mask.pmovu
 
 define void @test_int_x86_avx512_mask_pmovus_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovusqb %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovusqb %zmm0, (%rdi) {%k1}
@@ -2653,7 +2653,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.
 
 define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovqw %zmm0, %xmm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovqw %zmm0, %xmm1 {%k1}
@@ -2673,7 +2673,7 @@ declare void @llvm.x86.avx512.mask.pmov.
 
 define void @test_int_x86_avx512_mask_pmov_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovqw %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovqw %zmm0, (%rdi) {%k1}
@@ -2687,7 +2687,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.
 
 define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovsqw %zmm0, %xmm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovsqw %zmm0, %xmm1 {%k1}
@@ -2707,7 +2707,7 @@ declare void @llvm.x86.avx512.mask.pmovs
 
 define void @test_int_x86_avx512_mask_pmovs_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovsqw %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovsqw %zmm0, (%rdi) {%k1}
@@ -2721,7 +2721,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.
 
 define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovusqw %zmm0, %xmm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovusqw %zmm0, %xmm1 {%k1}
@@ -2741,7 +2741,7 @@ declare void @llvm.x86.avx512.mask.pmovu
 
 define void @test_int_x86_avx512_mask_pmovus_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovusqw %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovusqw %zmm0, (%rdi) {%k1}
@@ -2755,7 +2755,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.
 
 define <8 x i32>@test_int_x86_avx512_mask_pmov_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovqd %zmm0, %ymm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovqd %zmm0, %ymm1 {%k1}
@@ -2775,7 +2775,7 @@ declare void @llvm.x86.avx512.mask.pmov.
 
 define void @test_int_x86_avx512_mask_pmov_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovqd %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovqd %zmm0, (%rdi) {%k1}
@@ -2789,7 +2789,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.
 
 define <8 x i32>@test_int_x86_avx512_mask_pmovs_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovsqd %zmm0, %ymm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovsqd %zmm0, %ymm1 {%k1}
@@ -2809,7 +2809,7 @@ declare void @llvm.x86.avx512.mask.pmovs
 
 define void @test_int_x86_avx512_mask_pmovs_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovsqd %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovsqd %zmm0, (%rdi) {%k1}
@@ -2823,7 +2823,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.
 
 define <8 x i32>@test_int_x86_avx512_mask_pmovus_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovusqd %zmm0, %ymm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovusqd %zmm0, %ymm1 {%k1}
@@ -2843,7 +2843,7 @@ declare void @llvm.x86.avx512.mask.pmovu
 
 define void @test_int_x86_avx512_mask_pmovus_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovusqd %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovusqd %zmm0, (%rdi) {%k1}
@@ -2857,7 +2857,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.
 
 define <16 x i8>@test_int_x86_avx512_mask_pmov_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovdb %zmm0, %xmm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovdb %zmm0, %xmm1 {%k1}
@@ -2877,7 +2877,7 @@ declare void @llvm.x86.avx512.mask.pmov.
 
 define void @test_int_x86_avx512_mask_pmov_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovdb %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovdb %zmm0, (%rdi) {%k1}
@@ -2891,7 +2891,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.
 
 define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovsdb %zmm0, %xmm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovsdb %zmm0, %xmm1 {%k1}
@@ -2911,7 +2911,7 @@ declare void @llvm.x86.avx512.mask.pmovs
 
 define void @test_int_x86_avx512_mask_pmovs_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovsdb %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovsdb %zmm0, (%rdi) {%k1}
@@ -2925,7 +2925,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.
 
 define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovusdb %zmm0, %xmm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovusdb %zmm0, %xmm1 {%k1}
@@ -2945,7 +2945,7 @@ declare void @llvm.x86.avx512.mask.pmovu
 
 define void @test_int_x86_avx512_mask_pmovus_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovusdb %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovusdb %zmm0, (%rdi) {%k1}
@@ -2959,7 +2959,7 @@ declare <16 x i16> @llvm.x86.avx512.mask
 
 define <16 x i16>@test_int_x86_avx512_mask_pmov_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovdw %zmm0, %ymm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovdw %zmm0, %ymm1 {%k1}
@@ -2979,7 +2979,7 @@ declare void @llvm.x86.avx512.mask.pmov.
 
 define void @test_int_x86_avx512_mask_pmov_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovdw %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovdw %zmm0, (%rdi) {%k1}
@@ -2993,7 +2993,7 @@ declare <16 x i16> @llvm.x86.avx512.mask
 
 define <16 x i16>@test_int_x86_avx512_mask_pmovs_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovsdw %zmm0, %ymm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovsdw %zmm0, %ymm1 {%k1}
@@ -3013,7 +3013,7 @@ declare void @llvm.x86.avx512.mask.pmovs
 
 define void @test_int_x86_avx512_mask_pmovs_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovsdw %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovsdw %zmm0, (%rdi) {%k1}
@@ -3027,7 +3027,7 @@ declare <16 x i16> @llvm.x86.avx512.mask
 
 define <16 x i16>@test_int_x86_avx512_mask_pmovus_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpmovusdw %zmm0, %ymm2 {%k1} {z}
 ; CHECK-NEXT:    vpmovusdw %zmm0, %ymm1 {%k1}
@@ -3047,7 +3047,7 @@ declare void @llvm.x86.avx512.mask.pmovu
 
 define void @test_int_x86_avx512_mask_pmovus_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_mem_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vpmovusdw %zmm0, (%rdi)
 ; CHECK-NEXT:    vpmovusdw %zmm0, (%rdi) {%k1}
@@ -3061,7 +3061,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_cvt_dq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_dq2ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtdq2ps %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vcvtdq2ps {rn-sae}, %zmm0, %zmm0
@@ -3077,7 +3077,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.
 
 define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2dq_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtpd2dq %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vcvtpd2dq {rn-sae}, %zmm0, %ymm0
@@ -3093,7 +3093,7 @@ declare <8 x float> @llvm.x86.avx512.mas
 
 define <8 x float>@test_int_x86_avx512_mask_cvt_pd2ps_512(<8 x double> %x0, <8 x float> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtpd2ps %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vcvtpd2ps {ru-sae}, %zmm0, %ymm0
@@ -3109,7 +3109,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.
 
 define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2udq_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtpd2udq {ru-sae}, %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vcvtpd2udq {rn-sae}, %zmm0, %ymm0
@@ -3125,7 +3125,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2dq_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtps2dq {ru-sae}, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vcvtps2dq {rn-sae}, %zmm0, %zmm0
@@ -3141,7 +3141,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_cvt_ps2pd_512(<8 x float> %x0, <8 x double> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtps2pd %ymm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vcvtps2pd {sae}, %ymm0, %zmm0
@@ -3157,7 +3157,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2udq_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtps2udq {ru-sae}, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vcvtps2udq {rn-sae}, %zmm0, %zmm0
@@ -3173,7 +3173,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.
 
 define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2dq_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvttpd2dq %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vcvttpd2dq {sae}, %zmm0, %ymm0
@@ -3189,7 +3189,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_cvt_udq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_udq2ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtudq2ps %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vcvtudq2ps {rn-sae}, %zmm0, %zmm0
@@ -3205,7 +3205,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.
 
 define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2udq_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvttpd2udq %zmm0, %ymm1 {%k1}
 ; CHECK-NEXT:    vcvttpd2udq {sae}, %zmm0, %ymm0
@@ -3221,7 +3221,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2dq_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvttps2dq %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vcvttps2dq {sae}, %zmm0, %zmm0
@@ -3237,7 +3237,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2udq_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvttps2udq %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vcvttps2udq {sae}, %zmm0, %zmm0
@@ -3253,7 +3253,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
 ; CHECK-LABEL: test_getexp_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm3
 ; CHECK-NEXT:    vgetexpss %xmm1, %xmm0, %xmm3 {%k1}
@@ -3279,7 +3279,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
 ; CHECK-LABEL: test_getexp_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgetexpsd %xmm1, %xmm0, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm4
@@ -3305,7 +3305,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.sd(
 
 define i8 at test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovw %k0, %eax
@@ -3318,7 +3318,7 @@ define i8 at test_int_x86_avx512_mask_cmp_s
 
 define i8 at test_int_x86_avx512_mask_cmp_sd_all(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd_all:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmplesd %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %ecx
 ; CHECK-NEXT:    vcmpunordsd {sae}, %xmm1, %xmm0, %k0
@@ -3349,7 +3349,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(
 
 define i8 at test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcmpunordss %xmm1, %xmm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovw %k0, %eax
@@ -3363,7 +3363,7 @@ define i8 at test_int_x86_avx512_mask_cmp_s
 
 define i8 at test_int_x86_avx512_mask_cmp_ss_all(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss_all:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpless %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %ecx
 ; CHECK-NEXT:    vcmpunordss {sae}, %xmm1, %xmm0, %k0
@@ -3393,7 +3393,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_getmant_pd_512(<8 x double> %x0, <8 x double> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_getmant_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vgetmantpd $11, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vgetmantpd $11, {sae}, %zmm0, %zmm0
@@ -3409,7 +3409,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_getmant_ps_512(<16 x float> %x0, <16 x float> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_getmant_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vgetmantps $11, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vgetmantps $11, {sae}, %zmm0, %zmm0
@@ -3425,7 +3425,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double>@test_int_x86_avx512_mask_getmant_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_getmant_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgetmantsd $11, %xmm1, %xmm0, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm4
@@ -3450,7 +3450,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_getmant_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgetmantss $11, %xmm1, %xmm0, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vgetmantss $11, %xmm1, %xmm0, %xmm2 {%k1}
@@ -3474,7 +3474,7 @@ declare <8 x double> @llvm.x86.avx512.vp
 
 define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512(<8 x double> %x0, <8 x i64> %x1) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpermilpd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %x0, <8 x i64> %x1)
@@ -3483,7 +3483,7 @@ define <8 x double>@test_int_x86_avx512_
 
 define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_mask(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_pd_512_mask:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermilpd %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovapd %zmm2, %zmm0
@@ -3496,7 +3496,7 @@ define <8 x double>@test_int_x86_avx512_
 
 define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_maskz(<8 x double> %x0, <8 x i64> %x1, i8 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_pd_512_maskz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermilpd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -3510,7 +3510,7 @@ declare <16 x float> @llvm.x86.avx512.vp
 
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512(<16 x float> %x0, <16 x i32> %x1) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpermilps %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> %x1)
@@ -3519,7 +3519,7 @@ define <16 x float>@test_int_x86_avx512_
 
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_mask:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermilps %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -3532,7 +3532,7 @@ define <16 x float>@test_int_x86_avx512_
 
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_maskz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermilps %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -3545,7 +3545,7 @@ define <16 x float>@test_int_x86_avx512_
 ; Test case to make sure we can print shuffle decode comments for constant pool loads.
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool(<16 x float> %x0, <16 x i32> %x1) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0>)
@@ -3554,7 +3554,7 @@ define <16 x float>@test_int_x86_avx512_
 
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermilps {{.*#+}} zmm2 {%k1} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
 ; CHECK-NEXT:    vmovaps %zmm2, %zmm0
@@ -3567,7 +3567,7 @@ define <16 x float>@test_int_x86_avx512_
 
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
 ; CHECK-NEXT:    retq
@@ -3581,7 +3581,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double>@test_int_x86_avx512_mask_cvt_ss2sd_round(<2 x double> %x0,<4 x float> %x1, <2 x double> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ss2sd_round:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vcvtss2sd {sae}, %xmm1, %xmm0, %xmm0
@@ -3597,7 +3597,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float>@test_int_x86_avx512_mask_cvt_sd2ss_round(<4 x float> %x0,<2 x double> %x1, <4 x float> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_sd2ss_round:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vcvtsd2ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
 ; CHECK-NEXT:    vcvtsd2ss {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -3613,7 +3613,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pternlog_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm3
 ; CHECK-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm3
@@ -3630,7 +3630,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_maskz_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_pternlog_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm3
 ; CHECK-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm3
@@ -3647,7 +3647,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_pternlog_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm3
 ; CHECK-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm3
@@ -3664,7 +3664,7 @@ declare <8 x i64> @llvm.x86.avx512.maskz
 
 define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_pternlog_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm3
 ; CHECK-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm3
@@ -3679,7 +3679,7 @@ define <8 x i64>@test_int_x86_avx512_mas
 
 define i32 @test_x86_avx512_comi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_comi_sd_eq_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpeqsd {sae}, %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    retq
@@ -3689,7 +3689,7 @@ define i32 @test_x86_avx512_comi_sd_eq_s
 
 define i32 @test_x86_avx512_ucomi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_ucomi_sd_eq_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpeq_uqsd {sae}, %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    retq
@@ -3699,7 +3699,7 @@ define i32 @test_x86_avx512_ucomi_sd_eq_
 
 define i32 @test_x86_avx512_comi_sd_eq(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_comi_sd_eq:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpeqsd %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    retq
@@ -3709,7 +3709,7 @@ define i32 @test_x86_avx512_comi_sd_eq(<
 
 define i32 @test_x86_avx512_ucomi_sd_eq(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_ucomi_sd_eq:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpeq_uqsd %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    retq
@@ -3719,7 +3719,7 @@ define i32 @test_x86_avx512_ucomi_sd_eq(
 
 define i32 @test_x86_avx512_comi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_comi_sd_lt_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpltsd {sae}, %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    retq
@@ -3729,7 +3729,7 @@ define i32 @test_x86_avx512_comi_sd_lt_s
 
 define i32 @test_x86_avx512_ucomi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_ucomi_sd_lt_sae:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpngesd {sae}, %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    retq
@@ -3739,7 +3739,7 @@ define i32 @test_x86_avx512_ucomi_sd_lt_
 
 define i32 @test_x86_avx512_comi_sd_lt(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_comi_sd_lt:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpltsd %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    retq
@@ -3749,7 +3749,7 @@ define i32 @test_x86_avx512_comi_sd_lt(<
 
 define i32 @test_x86_avx512_ucomi_sd_lt(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_ucomi_sd_lt:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpngesd %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    retq
@@ -3761,7 +3761,7 @@ declare i32 @llvm.x86.avx512.vcomi.sd(<2
 
 define i32 @test_x86_avx512_ucomi_ss_lt(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx512_ucomi_ss_lt:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vcmpngess %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
 ; CHECK-NEXT:    retq
@@ -3775,7 +3775,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vprorvd %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    vprorvd %zmm1, %zmm0, %zmm2 {%k1}
@@ -3795,7 +3795,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_prorv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vprorvq %zmm1, %zmm0, %zmm3
 ; CHECK-NEXT:    vprorvq %zmm1, %zmm0, %zmm2 {%k1}
@@ -3815,7 +3815,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_prol_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_prol_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vprold $3, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vprold $3, %zmm0, %zmm2 {%k1} {z}
@@ -3835,7 +3835,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_prol_q_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_prol_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vprolq $3, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vprolq $3, %zmm0, %zmm2 {%k1} {z}
@@ -3855,7 +3855,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_permvar_df_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermpd %zmm0, %zmm1, %zmm3
 ; CHECK-NEXT:    vpermpd %zmm0, %zmm1, %zmm2 {%k1}
@@ -3875,7 +3875,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
 
 define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_permvar_di_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermq %zmm0, %zmm1, %zmm3
 ; CHECK-NEXT:    vpermq %zmm0, %zmm1, %zmm2 {%k1}
@@ -3895,7 +3895,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_permvar_sf_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermps %zmm0, %zmm1, %zmm3
 ; CHECK-NEXT:    vpermps %zmm0, %zmm1, %zmm2 {%k1}
@@ -3915,7 +3915,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_permvar_si_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpermd %zmm0, %zmm1, %zmm3
 ; CHECK-NEXT:    vpermd %zmm0, %zmm1, %zmm2 {%k1}
@@ -3935,7 +3935,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %zmm0, %zmm3
 ; CHECK-NEXT:    vfixupimmpd $4, %zmm2, %zmm1, %zmm3 {%k1}
@@ -3957,7 +3957,7 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double>@test_int_x86_avx512_maskz_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_pd_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %zmm0, %zmm3
 ; CHECK-NEXT:    vfixupimmpd $3, %zmm2, %zmm1, %zmm3 {%k1} {z}
@@ -3980,7 +3980,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %xmm0, %xmm3
 ; CHECK-NEXT:    vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1}
@@ -4003,7 +4003,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %xmm0, %xmm3
 ; CHECK-NEXT:    vfixupimmss $5, %xmm2, %xmm1, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -4026,7 +4026,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %zmm0, %zmm3
 ; CHECK-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm3 {%k1}
@@ -4049,7 +4049,7 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float>@test_int_x86_avx512_maskz_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ps_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %zmm0, %zmm3
 ; CHECK-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm3
@@ -4072,7 +4072,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovapd %xmm0, %xmm3
 ; CHECK-NEXT:    vfixupimmsd $5, %xmm2, %xmm1, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -4095,7 +4095,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm0, %xmm3
 ; CHECK-NEXT:    vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
@@ -4118,7 +4118,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double>@test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
 ; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovapd %xmm0, %xmm3
 ; CHECK-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -4145,7 +4145,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
 ; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %xmm0, %xmm3
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -4172,7 +4172,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double>@test_int_x86_avx512_maskz_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
 ; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm0, %xmm3
 ; CHECK-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1} {z}
@@ -4189,7 +4189,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
 ; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4202,7 +4202,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double>@test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm3
 ; CHECK-NEXT:    vfmadd231sd %xmm1, %xmm0, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -4229,7 +4229,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm3
 ; CHECK-NEXT:    vfmadd231ss %xmm1, %xmm0, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -4254,7 +4254,7 @@ define <4 x float>@test_int_x86_avx512_m
 
 define void @fmadd_ss_mask_memfold(float* %a, float* %b, i8 %c) {
 ; CHECK-LABEL: fmadd_ss_mask_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1}
@@ -4281,7 +4281,7 @@ define void @fmadd_ss_mask_memfold(float
 
 define void @fmadd_ss_maskz_memfold(float* %a, float* %b, i8 %c) {
 ; CHECK-LABEL: fmadd_ss_maskz_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1} {z}
@@ -4308,7 +4308,7 @@ define void @fmadd_ss_maskz_memfold(floa
 
 define void @fmadd_sd_mask_memfold(double* %a, double* %b, i8 %c) {
 ; CHECK-LABEL: fmadd_sd_mask_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1}
@@ -4331,7 +4331,7 @@ define void @fmadd_sd_mask_memfold(doubl
 
 define void @fmadd_sd_maskz_memfold(double* %a, double* %b, i8 %c) {
 ; CHECK-LABEL: fmadd_sd_maskz_memfold:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    kmovw %edx, %k1
 ; CHECK-NEXT:    vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1} {z}
@@ -4356,7 +4356,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm3
 ; CHECK-NEXT:    vfmsub231sd %xmm1, %xmm0, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -4383,7 +4383,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm3
 ; CHECK-NEXT:    vfmsub231ss %xmm1, %xmm0, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -4410,7 +4410,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm3
 ; CHECK-NEXT:    vfnmsub231sd %xmm1, %xmm0, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -4437,7 +4437,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm3
 ; CHECK-NEXT:    vfnmsub231ss %xmm1, %xmm0, %xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
@@ -4462,7 +4462,7 @@ define <4 x float>@test_int_x86_avx512_m
 
 define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1, float *%ptr_b ,i8 %x3,i32 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_rm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vfmadd231ss (%rdi), %xmm0, %xmm1 {%k1}
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm0
@@ -4475,7 +4475,7 @@ define <4 x float>@test_int_x86_avx512_m
 
 define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss_rm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vfmadd132ss (%rdi), %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -4488,7 +4488,7 @@ define <4 x float>@test_int_x86_avx512_m
 
 define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss_rm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    kmovw %eax, %k1
 ; CHECK-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 {%k1} {z}
@@ -4501,7 +4501,7 @@ define <4 x float>@test_int_x86_avx512_m
 
 define <16 x i32> @test_x86_avx512_psll_d_512(<16 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psll_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpslld %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
@@ -4509,7 +4509,7 @@ define <16 x i32> @test_x86_avx512_psll_
 }
 define <16 x i32> @test_x86_avx512_mask_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psll_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpslld %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -4521,7 +4521,7 @@ define <16 x i32> @test_x86_avx512_mask_
 }
 define <16 x i32> @test_x86_avx512_maskz_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psll_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4535,7 +4535,7 @@ declare <16 x i32> @llvm.x86.avx512.psll
 
 define <8 x i64> @test_x86_avx512_psll_q_512(<8 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psll_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllq %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
@@ -4543,7 +4543,7 @@ define <8 x i64> @test_x86_avx512_psll_q
 }
 define <8 x i64> @test_x86_avx512_mask_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psll_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllq %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -4555,7 +4555,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 }
 define <8 x i64> @test_x86_avx512_maskz_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psll_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4569,7 +4569,7 @@ declare <8 x i64> @llvm.x86.avx512.psll.
 
 define <16 x i32> @test_x86_avx512_pslli_d_512(<16 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx512_pslli_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpslld $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
@@ -4577,7 +4577,7 @@ define <16 x i32> @test_x86_avx512_pslli
 }
 define <16 x i32> @test_x86_avx512_mask_pslli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_pslli_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpslld $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -4589,7 +4589,7 @@ define <16 x i32> @test_x86_avx512_mask_
 }
 define <16 x i32> @test_x86_avx512_maskz_pslli_d_512(<16 x i32> %a0, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_pslli_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpslld $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4603,7 +4603,7 @@ declare <16 x i32> @llvm.x86.avx512.psll
 
 define <8 x i64> @test_x86_avx512_pslli_q_512(<8 x i64> %a0) {
 ; CHECK-LABEL: test_x86_avx512_pslli_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllq $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
@@ -4611,7 +4611,7 @@ define <8 x i64> @test_x86_avx512_pslli_
 }
 define <8 x i64> @test_x86_avx512_mask_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_pslli_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllq $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -4623,7 +4623,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 }
 define <8 x i64> @test_x86_avx512_maskz_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_pslli_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllq $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4637,7 +4637,7 @@ declare <8 x i64> @llvm.x86.avx512.pslli
 
 define <8 x i64> @test_x86_avx512_psra_q_512(<8 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psra_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsraq %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
@@ -4645,7 +4645,7 @@ define <8 x i64> @test_x86_avx512_psra_q
 }
 define <8 x i64> @test_x86_avx512_mask_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psra_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsraq %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -4657,7 +4657,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 }
 define <8 x i64> @test_x86_avx512_maskz_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psra_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4671,7 +4671,7 @@ declare <8 x i64> @llvm.x86.avx512.psra.
 
 define <16 x i32> @test_x86_avx512_psra_d_512(<16 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psra_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrad %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
@@ -4679,7 +4679,7 @@ define <16 x i32> @test_x86_avx512_psra_
 }
 define <16 x i32> @test_x86_avx512_mask_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psra_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrad %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -4691,7 +4691,7 @@ define <16 x i32> @test_x86_avx512_mask_
 }
 define <16 x i32> @test_x86_avx512_maskz_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psra_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4706,7 +4706,7 @@ declare <16 x i32> @llvm.x86.avx512.psra
 
 define <8 x i64> @test_x86_avx512_psrai_q_512(<8 x i64> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrai_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsraq $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
@@ -4714,7 +4714,7 @@ define <8 x i64> @test_x86_avx512_psrai_
 }
 define <8 x i64> @test_x86_avx512_mask_psrai_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrai_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsraq $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -4726,7 +4726,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 }
 define <8 x i64> @test_x86_avx512_maskz_psrai_q_512(<8 x i64> %a0, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrai_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsraq $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4740,7 +4740,7 @@ declare <8 x i64> @llvm.x86.avx512.psrai
 
 define <16 x i32> @test_x86_avx512_psrai_d_512(<16 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrai_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrad $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
@@ -4748,7 +4748,7 @@ define <16 x i32> @test_x86_avx512_psrai
 }
 define <16 x i32> @test_x86_avx512_mask_psrai_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrai_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrad $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -4760,7 +4760,7 @@ define <16 x i32> @test_x86_avx512_mask_
 }
 define <16 x i32> @test_x86_avx512_maskz_psrai_d_512(<16 x i32> %a0, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrai_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrad $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4775,7 +4775,7 @@ declare <16 x i32> @llvm.x86.avx512.psra
 
 define <16 x i32> @test_x86_avx512_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrl_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrld %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
@@ -4783,7 +4783,7 @@ define <16 x i32> @test_x86_avx512_psrl_
 }
 define <16 x i32> @test_x86_avx512_mask_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrl_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrld %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -4795,7 +4795,7 @@ define <16 x i32> @test_x86_avx512_mask_
 }
 define <16 x i32> @test_x86_avx512_maskz_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrl_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4809,7 +4809,7 @@ declare <16 x i32> @llvm.x86.avx512.psrl
 
 define <8 x i64> @test_x86_avx512_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrl_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrlq %xmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
@@ -4817,7 +4817,7 @@ define <8 x i64> @test_x86_avx512_psrl_q
 }
 define <8 x i64> @test_x86_avx512_mask_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrl_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -4829,7 +4829,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 }
 define <8 x i64> @test_x86_avx512_maskz_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrl_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4843,7 +4843,7 @@ declare <8 x i64> @llvm.x86.avx512.psrl.
 
 define <16 x i32> @test_x86_avx512_psrli_d_512(<16 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrli_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrld $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
@@ -4851,7 +4851,7 @@ define <16 x i32> @test_x86_avx512_psrli
 }
 define <16 x i32> @test_x86_avx512_mask_psrli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrli_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrld $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -4863,7 +4863,7 @@ define <16 x i32> @test_x86_avx512_mask_
 }
 define <16 x i32> @test_x86_avx512_maskz_psrli_d_512(<16 x i32> %a0, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrli_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrld $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4877,7 +4877,7 @@ declare <16 x i32> @llvm.x86.avx512.psrl
 
 define <8 x i64> @test_x86_avx512_psrli_q_512(<8 x i64> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrli_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrlq $7, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
@@ -4885,7 +4885,7 @@ define <8 x i64> @test_x86_avx512_psrli_
 }
 define <8 x i64> @test_x86_avx512_mask_psrli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrli_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlq $7, %zmm0, %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -4897,7 +4897,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 }
 define <8 x i64> @test_x86_avx512_maskz_psrli_q_512(<8 x i64> %a0, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrli_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlq $7, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4910,7 +4910,7 @@ declare <8 x i64> @llvm.x86.avx512.psrli
 
 define <16 x i32> @test_x86_avx512_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psllv_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %a0, <16 x i32> %a1)
@@ -4919,7 +4919,7 @@ define <16 x i32> @test_x86_avx512_psllv
 
 define <16 x i32> @test_x86_avx512_mask_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psllv_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -4932,7 +4932,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psllv_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4946,7 +4946,7 @@ declare <16 x i32> @llvm.x86.avx512.psll
 
 define <8 x i64> @test_x86_avx512_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psllv_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllvq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -4955,7 +4955,7 @@ define <8 x i64> @test_x86_avx512_psllv_
 
 define <8 x i64> @test_x86_avx512_mask_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psllv_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -4968,7 +4968,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psllv_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -4982,7 +4982,7 @@ declare <8 x i64> @llvm.x86.avx512.psllv
 
 define <16 x i32> @test_x86_avx512_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrav_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsravd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %a0, <16 x i32> %a1)
@@ -4991,7 +4991,7 @@ define <16 x i32> @test_x86_avx512_psrav
 
 define <16 x i32> @test_x86_avx512_mask_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrav_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsravd %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -5004,7 +5004,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrav_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -5018,7 +5018,7 @@ declare <16 x i32> @llvm.x86.avx512.psra
 
 define <8 x i64> @test_x86_avx512_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrav_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsravq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -5027,7 +5027,7 @@ define <8 x i64> @test_x86_avx512_psrav_
 
 define <8 x i64> @test_x86_avx512_mask_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrav_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsravq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -5040,7 +5040,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrav_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -5054,7 +5054,7 @@ declare <8 x i64> @llvm.x86.avx512.psrav
 
 define <16 x i32> @test_x86_avx512_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrlv_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %a0, <16 x i32> %a1)
@@ -5063,7 +5063,7 @@ define <16 x i32> @test_x86_avx512_psrlv
 
 define <16 x i32> @test_x86_avx512_mask_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrlv_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -5076,7 +5076,7 @@ define <16 x i32> @test_x86_avx512_mask_
 
 define <16 x i32> @test_x86_avx512_maskz_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrlv_d_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -5090,7 +5090,7 @@ declare <16 x i32> @llvm.x86.avx512.psrl
 
 define <8 x i64> @test_x86_avx512_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrlv_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -5099,7 +5099,7 @@ define <8 x i64> @test_x86_avx512_psrlv_
 
 define <8 x i64> @test_x86_avx512_mask_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_mask_psrlv_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
 ; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -5112,7 +5112,7 @@ define <8 x i64> @test_x86_avx512_mask_p
 
 define <8 x i64> @test_x86_avx512_maskz_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
 ; CHECK-LABEL: test_x86_avx512_maskz_psrlv_q_512:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx512-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-load-store.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-load-store.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) local_unnamed_addr #0 {
 ; CHECK64-LABEL: test_mm_mask_move_ss:
-; CHECK64:       # BB#0: # %entry
+; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    kmovw %edi, %k1
 ; CHECK64-NEXT:    vmovss %xmm2, %xmm1, %xmm0 {%k1}
 ; CHECK64-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_mm_mask_move_ss:
-; CHECK32:       # BB#0: # %entry
+; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
 ; CHECK32-NEXT:    vmovss %xmm2, %xmm0, %xmm0 {%k1}
@@ -28,13 +28,13 @@ entry:
 
 define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) local_unnamed_addr #0 {
 ; CHECK64-LABEL: test_mm_maskz_move_ss:
-; CHECK64:       # BB#0: # %entry
+; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    kmovw %edi, %k1
 ; CHECK64-NEXT:    vmovss %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK64-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_mm_maskz_move_ss:
-; CHECK32:       # BB#0: # %entry
+; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
 ; CHECK32-NEXT:    vxorps %xmm2, %xmm2, %xmm2
@@ -52,13 +52,13 @@ entry:
 
 define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) local_unnamed_addr #0 {
 ; CHECK64-LABEL: test_mm_mask_move_sd:
-; CHECK64:       # BB#0: # %entry
+; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    kmovw %edi, %k1
 ; CHECK64-NEXT:    vmovsd %xmm2, %xmm1, %xmm0 {%k1}
 ; CHECK64-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_mm_mask_move_sd:
-; CHECK32:       # BB#0: # %entry
+; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
 ; CHECK32-NEXT:    vmovsd %xmm2, %xmm0, %xmm0 {%k1}
@@ -76,13 +76,13 @@ entry:
 
 define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) local_unnamed_addr #0 {
 ; CHECK64-LABEL: test_mm_maskz_move_sd:
-; CHECK64:       # BB#0: # %entry
+; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    kmovw %edi, %k1
 ; CHECK64-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK64-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_mm_maskz_move_sd:
-; CHECK32:       # BB#0: # %entry
+; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
 ; CHECK32-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
@@ -100,13 +100,13 @@ entry:
 
 define void @test_mm_mask_store_ss(float* %__W, i8 zeroext %__U, <4 x float> %__A) local_unnamed_addr #1 {
 ; CHECK64-LABEL: test_mm_mask_store_ss:
-; CHECK64:       # BB#0: # %entry
+; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    kmovw %esi, %k1
 ; CHECK64-NEXT:    vmovss %xmm0, (%rdi) {%k1}
 ; CHECK64-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_mm_mask_store_ss:
-; CHECK32:       # BB#0: # %entry
+; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK32-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; CHECK32-NEXT:    kmovw %ecx, %k1
@@ -124,13 +124,13 @@ entry:
 
 define void @test_mm_mask_store_sd(double* %__W, i8 zeroext %__U, <2 x double> %__A) local_unnamed_addr #1 {
 ; CHECK64-LABEL: test_mm_mask_store_sd:
-; CHECK64:       # BB#0: # %entry
+; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    kmovw %esi, %k1
 ; CHECK64-NEXT:    vmovsd %xmm0, (%rdi) {%k1}
 ; CHECK64-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_mm_mask_store_sd:
-; CHECK32:       # BB#0: # %entry
+; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; CHECK32-NEXT:    kmovw %ecx, %k1
@@ -147,13 +147,13 @@ entry:
 
 define <4 x float> @test_mm_mask_load_ss(<4 x float> %__A, i8 zeroext %__U, float* %__W) local_unnamed_addr #2 {
 ; CHECK64-LABEL: test_mm_mask_load_ss:
-; CHECK64:       # BB#0: # %entry
+; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    kmovw %edi, %k1
 ; CHECK64-NEXT:    vmovss (%rsi), %xmm0 {%k1}
 ; CHECK64-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_mm_mask_load_ss:
-; CHECK32:       # BB#0: # %entry
+; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK32-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; CHECK32-NEXT:    kmovw %ecx, %k1
@@ -173,13 +173,13 @@ entry:
 
 define <2 x double> @test_mm_mask_load_sd(<2 x double> %__A, i8 zeroext %__U, double* %__W) local_unnamed_addr #2 {
 ; CHECK64-LABEL: test_mm_mask_load_sd:
-; CHECK64:       # BB#0: # %entry
+; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    kmovw %edi, %k1
 ; CHECK64-NEXT:    vmovsd (%rsi), %xmm0 {%k1}
 ; CHECK64-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_mm_mask_load_sd:
-; CHECK32:       # BB#0: # %entry
+; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; CHECK32-NEXT:    kmovw %ecx, %k1
@@ -198,13 +198,13 @@ entry:
 
 define <4 x float> @test_mm_maskz_load_ss(i8 zeroext %__U, float* %__W) local_unnamed_addr #2 {
 ; CHECK64-LABEL: test_mm_maskz_load_ss:
-; CHECK64:       # BB#0: # %entry
+; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    kmovw %edi, %k1
 ; CHECK64-NEXT:    vmovss (%rsi), %xmm0 {%k1} {z}
 ; CHECK64-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_mm_maskz_load_ss:
-; CHECK32:       # BB#0: # %entry
+; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK32-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; CHECK32-NEXT:    kmovw %ecx, %k1
@@ -222,13 +222,13 @@ entry:
 
 define <2 x double> @test_mm_maskz_load_sd(i8 zeroext %__U, double* %__W) local_unnamed_addr #2 {
 ; CHECK64-LABEL: test_mm_maskz_load_sd:
-; CHECK64:       # BB#0: # %entry
+; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    kmovw %edi, %k1
 ; CHECK64-NEXT:    vmovsd (%rsi), %xmm0 {%k1} {z}
 ; CHECK64-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_mm_maskz_load_sd:
-; CHECK32:       # BB#0: # %entry
+; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; CHECK32-NEXT:    kmovw %ecx, %k1

Modified: llvm/trunk/test/CodeGen/X86/avx512-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-logic.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-logic.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define <16 x i32> @vpandd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: vpandd:
-; ALL:       ## BB#0: ## %entry
+; ALL:       ## %bb.0: ## %entry
 ; ALL-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
 ; ALL-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
@@ -19,7 +19,7 @@ entry:
 
 define <16 x i32> @vpandnd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: vpandnd:
-; ALL:       ## BB#0: ## %entry
+; ALL:       ## %bb.0: ## %entry
 ; ALL-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
 ; ALL-NEXT:    vpandnq %zmm0, %zmm1, %zmm0
 ; ALL-NEXT:    retq
@@ -35,7 +35,7 @@ entry:
 
 define <16 x i32> @vpord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: vpord:
-; ALL:       ## BB#0: ## %entry
+; ALL:       ## %bb.0: ## %entry
 ; ALL-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
 ; ALL-NEXT:    vporq %zmm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
@@ -49,7 +49,7 @@ entry:
 
 define <16 x i32> @vpxord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: vpxord:
-; ALL:       ## BB#0: ## %entry
+; ALL:       ## %bb.0: ## %entry
 ; ALL-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
 ; ALL-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
@@ -63,7 +63,7 @@ entry:
 
 define <8 x i64> @vpandq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: vpandq:
-; ALL:       ## BB#0: ## %entry
+; ALL:       ## %bb.0: ## %entry
 ; ALL-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; ALL-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
@@ -76,7 +76,7 @@ entry:
 
 define <8 x i64> @vpandnq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: vpandnq:
-; ALL:       ## BB#0: ## %entry
+; ALL:       ## %bb.0: ## %entry
 ; ALL-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; ALL-NEXT:    vpandnq %zmm0, %zmm1, %zmm0
 ; ALL-NEXT:    retq
@@ -90,7 +90,7 @@ entry:
 
 define <8 x i64> @vporq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: vporq:
-; ALL:       ## BB#0: ## %entry
+; ALL:       ## %bb.0: ## %entry
 ; ALL-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; ALL-NEXT:    vporq %zmm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
@@ -103,7 +103,7 @@ entry:
 
 define <8 x i64> @vpxorq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: vpxorq:
-; ALL:       ## BB#0: ## %entry
+; ALL:       ## %bb.0: ## %entry
 ; ALL-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; ALL-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
@@ -117,12 +117,12 @@ entry:
 
 define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
 ; KNL-LABEL: orq_broadcast:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: orq_broadcast:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %b = or <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -131,12 +131,12 @@ define <8 x i64> @orq_broadcast(<8 x i64
 
 define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
 ; KNL-LABEL: andd512fold:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    vpandq (%rdi), %zmm0, %zmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: andd512fold:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    vandps (%rdi), %zmm0, %zmm0
 ; SKX-NEXT:    retq
 entry:
@@ -147,12 +147,12 @@ entry:
 
 define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
 ; KNL-LABEL: andqbrst:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    vpandq (%rdi){1to8}, %zmm0, %zmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: andqbrst:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    vandpd (%rdi){1to8}, %zmm0, %zmm0
 ; SKX-NEXT:    retq
 entry:
@@ -165,13 +165,13 @@ entry:
 
 define <64 x i8> @and_v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; KNL-LABEL: and_v64i8:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vandps %ymm2, %ymm0, %ymm0
 ; KNL-NEXT:    vandps %ymm3, %ymm1, %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: and_v64i8:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vandps %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %res = and <64 x i8> %a, %b
@@ -180,13 +180,13 @@ define <64 x i8> @and_v64i8(<64 x i8> %a
 
 define <64 x i8> @andn_v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; KNL-LABEL: andn_v64i8:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vandnps %ymm0, %ymm2, %ymm0
 ; KNL-NEXT:    vandnps %ymm1, %ymm3, %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: andn_v64i8:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vandnps %zmm0, %zmm1, %zmm0
 ; SKX-NEXT:    retq
   %b2 = xor <64 x i8> %b, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
@@ -199,13 +199,13 @@ define <64 x i8> @andn_v64i8(<64 x i8> %
 
 define <64 x i8> @or_v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; KNL-LABEL: or_v64i8:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vorps %ymm2, %ymm0, %ymm0
 ; KNL-NEXT:    vorps %ymm3, %ymm1, %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: or_v64i8:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vorps %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %res = or <64 x i8> %a, %b
@@ -214,13 +214,13 @@ define <64 x i8> @or_v64i8(<64 x i8> %a,
 
 define <64 x i8> @xor_v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; KNL-LABEL: xor_v64i8:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vxorps %ymm2, %ymm0, %ymm0
 ; KNL-NEXT:    vxorps %ymm3, %ymm1, %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: xor_v64i8:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vxorps %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %res = xor <64 x i8> %a, %b
@@ -229,13 +229,13 @@ define <64 x i8> @xor_v64i8(<64 x i8> %a
 
 define <32 x i16> @and_v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; KNL-LABEL: and_v32i16:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vandps %ymm2, %ymm0, %ymm0
 ; KNL-NEXT:    vandps %ymm3, %ymm1, %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: and_v32i16:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vandps %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %res = and <32 x i16> %a, %b
@@ -244,13 +244,13 @@ define <32 x i16> @and_v32i16(<32 x i16>
 
 define <32 x i16> @andn_v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; KNL-LABEL: andn_v32i16:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vandnps %ymm0, %ymm2, %ymm0
 ; KNL-NEXT:    vandnps %ymm1, %ymm3, %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: andn_v32i16:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vandnps %zmm0, %zmm1, %zmm0
 ; SKX-NEXT:    retq
   %b2 = xor <32 x i16> %b, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1,
@@ -261,13 +261,13 @@ define <32 x i16> @andn_v32i16(<32 x i16
 
 define <32 x i16> @or_v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; KNL-LABEL: or_v32i16:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vorps %ymm2, %ymm0, %ymm0
 ; KNL-NEXT:    vorps %ymm3, %ymm1, %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: or_v32i16:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vorps %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %res = or <32 x i16> %a, %b
@@ -276,13 +276,13 @@ define <32 x i16> @or_v32i16(<32 x i16>
 
 define <32 x i16> @xor_v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; KNL-LABEL: xor_v32i16:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vxorps %ymm2, %ymm0, %ymm0
 ; KNL-NEXT:    vxorps %ymm3, %ymm1, %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: xor_v32i16:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vxorps %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %res = xor <32 x i16> %a, %b
@@ -291,14 +291,14 @@ define <32 x i16> @xor_v32i16(<32 x i16>
 
 define <16 x float> @masked_and_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
 ; KNL-LABEL: masked_and_v16f32:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandd %zmm1, %zmm0, %zmm2 {%k1}
 ; KNL-NEXT:    vaddps %zmm2, %zmm3, %zmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: masked_and_v16f32:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandps %zmm1, %zmm0, %zmm2 {%k1}
 ; SKX-NEXT:    vaddps %zmm2, %zmm3, %zmm0
@@ -316,14 +316,14 @@ define <16 x float> @masked_and_v16f32(<
 
 define <16 x float> @masked_or_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
 ; KNL-LABEL: masked_or_v16f32:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandd %zmm1, %zmm0, %zmm2 {%k1}
 ; KNL-NEXT:    vaddps %zmm2, %zmm3, %zmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: masked_or_v16f32:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandps %zmm1, %zmm0, %zmm2 {%k1}
 ; SKX-NEXT:    vaddps %zmm2, %zmm3, %zmm0
@@ -341,14 +341,14 @@ define <16 x float> @masked_or_v16f32(<1
 
 define <16 x float> @masked_xor_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
 ; KNL-LABEL: masked_xor_v16f32:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandd %zmm1, %zmm0, %zmm2 {%k1}
 ; KNL-NEXT:    vaddps %zmm2, %zmm3, %zmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: masked_xor_v16f32:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandps %zmm1, %zmm0, %zmm2 {%k1}
 ; SKX-NEXT:    vaddps %zmm2, %zmm3, %zmm0
@@ -366,14 +366,14 @@ define <16 x float> @masked_xor_v16f32(<
 
 define <8 x double> @masked_and_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
 ; KNL-LABEL: masked_and_v8f64:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandq %zmm1, %zmm0, %zmm2 {%k1}
 ; KNL-NEXT:    vaddpd %zmm2, %zmm3, %zmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: masked_and_v8f64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandpd %zmm1, %zmm0, %zmm2 {%k1}
 ; SKX-NEXT:    vaddpd %zmm2, %zmm3, %zmm0
@@ -391,14 +391,14 @@ define <8 x double> @masked_and_v8f64(<8
 
 define <8 x double> @masked_or_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
 ; KNL-LABEL: masked_or_v8f64:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandq %zmm1, %zmm0, %zmm2 {%k1}
 ; KNL-NEXT:    vaddpd %zmm2, %zmm3, %zmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: masked_or_v8f64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandpd %zmm1, %zmm0, %zmm2 {%k1}
 ; SKX-NEXT:    vaddpd %zmm2, %zmm3, %zmm0
@@ -416,14 +416,14 @@ define <8 x double> @masked_or_v8f64(<8
 
 define <8 x double> @masked_xor_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
 ; KNL-LABEL: masked_xor_v8f64:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandq %zmm1, %zmm0, %zmm2 {%k1}
 ; KNL-NEXT:    vaddpd %zmm2, %zmm3, %zmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: masked_xor_v8f64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandpd %zmm1, %zmm0, %zmm2 {%k1}
 ; SKX-NEXT:    vaddpd %zmm2, %zmm3, %zmm0
@@ -441,13 +441,13 @@ define <8 x double> @masked_xor_v8f64(<8
 
 define <8 x i64> @test_mm512_mask_and_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
 ; KNL-LABEL: test_mm512_mask_and_epi32:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandd %zmm2, %zmm1, %zmm0 {%k1}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_mask_and_epi32:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandps %zmm2, %zmm1, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -463,13 +463,13 @@ entry:
 
 define <8 x i64> @test_mm512_mask_or_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
 ; KNL-LABEL: test_mm512_mask_or_epi32:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpord %zmm2, %zmm1, %zmm0 {%k1}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_mask_or_epi32:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vorps %zmm2, %zmm1, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -485,13 +485,13 @@ entry:
 
 define <8 x i64> @test_mm512_mask_xor_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
 ; KNL-LABEL: test_mm512_mask_xor_epi32:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpxord %zmm2, %zmm1, %zmm0 {%k1}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_mask_xor_epi32:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vxorps %zmm2, %zmm1, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -507,13 +507,13 @@ entry:
 
 define <8 x double> @test_mm512_mask_xor_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; KNL-LABEL: test_mm512_mask_xor_pd:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpxorq %zmm2, %zmm1, %zmm0 {%k1}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_mask_xor_pd:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vxorpd %zmm2, %zmm1, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -529,13 +529,13 @@ entry:
 
 define <8 x double> @test_mm512_maskz_xor_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; KNL-LABEL: test_mm512_maskz_xor_pd:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpxorq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_maskz_xor_pd:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vxorpd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; SKX-NEXT:    retq
@@ -551,13 +551,13 @@ entry:
 
 define <16 x float> @test_mm512_mask_xor_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; KNL-LABEL: test_mm512_mask_xor_ps:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpxord %zmm2, %zmm1, %zmm0 {%k1}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_mask_xor_ps:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vxorps %zmm2, %zmm1, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -573,13 +573,13 @@ entry:
 
 define <16 x float> @test_mm512_maskz_xor_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; KNL-LABEL: test_mm512_maskz_xor_ps:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpxord %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_maskz_xor_ps:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vxorps %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; SKX-NEXT:    retq
@@ -595,13 +595,13 @@ entry:
 
 define <8 x double> @test_mm512_mask_or_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; KNL-LABEL: test_mm512_mask_or_pd:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vporq %zmm1, %zmm2, %zmm0 {%k1}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_mask_or_pd:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vorpd %zmm1, %zmm2, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -617,13 +617,13 @@ entry:
 
 define <8 x double> @test_mm512_maskz_or_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; KNL-LABEL: test_mm512_maskz_or_pd:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vporq %zmm0, %zmm1, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_maskz_or_pd:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vorpd %zmm0, %zmm1, %zmm0 {%k1} {z}
 ; SKX-NEXT:    retq
@@ -639,13 +639,13 @@ entry:
 
 define <16 x float> @test_mm512_mask_or_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; KNL-LABEL: test_mm512_mask_or_ps:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpord %zmm1, %zmm2, %zmm0 {%k1}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_mask_or_ps:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vorps %zmm1, %zmm2, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -661,13 +661,13 @@ entry:
 
 define <16 x float> @test_mm512_maskz_or_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; KNL-LABEL: test_mm512_maskz_or_ps:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpord %zmm0, %zmm1, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_maskz_or_ps:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vorps %zmm0, %zmm1, %zmm0 {%k1} {z}
 ; SKX-NEXT:    retq
@@ -683,13 +683,13 @@ entry:
 
 define <8 x double> @test_mm512_mask_and_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; KNL-LABEL: test_mm512_mask_and_pd:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandq %zmm1, %zmm2, %zmm0 {%k1}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_mask_and_pd:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandpd %zmm1, %zmm2, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -705,13 +705,13 @@ entry:
 
 define <8 x double> @test_mm512_maskz_and_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; KNL-LABEL: test_mm512_maskz_and_pd:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandq %zmm0, %zmm1, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_maskz_and_pd:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandpd %zmm0, %zmm1, %zmm0 {%k1} {z}
 ; SKX-NEXT:    retq
@@ -727,13 +727,13 @@ entry:
 
 define <16 x float> @test_mm512_mask_and_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; KNL-LABEL: test_mm512_mask_and_ps:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandd %zmm1, %zmm2, %zmm0 {%k1}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_mask_and_ps:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandps %zmm1, %zmm2, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -749,13 +749,13 @@ entry:
 
 define <16 x float> @test_mm512_maskz_and_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; KNL-LABEL: test_mm512_maskz_and_ps:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandd %zmm0, %zmm1, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_maskz_and_ps:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandps %zmm0, %zmm1, %zmm0 {%k1} {z}
 ; SKX-NEXT:    retq
@@ -771,13 +771,13 @@ entry:
 
 define <8 x double> @test_mm512_mask_andnot_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; KNL-LABEL: test_mm512_mask_andnot_pd:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandnq %zmm2, %zmm1, %zmm0 {%k1}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_mask_andnot_pd:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandnpd %zmm2, %zmm1, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -794,13 +794,13 @@ entry:
 
 define <8 x double> @test_mm512_maskz_andnot_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; KNL-LABEL: test_mm512_maskz_andnot_pd:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandnq %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_maskz_andnot_pd:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandnpd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; SKX-NEXT:    retq
@@ -817,13 +817,13 @@ entry:
 
 define <16 x float> @test_mm512_mask_andnot_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; KNL-LABEL: test_mm512_mask_andnot_ps:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandnd %zmm2, %zmm1, %zmm0 {%k1}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_mask_andnot_ps:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandnps %zmm2, %zmm1, %zmm0 {%k1}
 ; SKX-NEXT:    retq
@@ -840,13 +840,13 @@ entry:
 
 define <16 x float> @test_mm512_maskz_andnot_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; KNL-LABEL: test_mm512_maskz_andnot_ps:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vpandnd %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_mm512_maskz_andnot_ps:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vandnps %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; SKX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define i16 @mask16(i16 %x) {
 ; KNL-LABEL: mask16:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    knotw %k0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
@@ -15,7 +15,7 @@ define i16 @mask16(i16 %x) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: mask16:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    knotw %k0, %k0
 ; SKX-NEXT:    kmovd %k0, %eax
@@ -23,7 +23,7 @@ define i16 @mask16(i16 %x) {
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: mask16:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    knotw %k0, %k0
 ; AVX512BW-NEXT:    kmovd %k0, %eax
@@ -31,7 +31,7 @@ define i16 @mask16(i16 %x) {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: mask16:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    knotw %k0, %k0
 ; AVX512DQ-NEXT:    kmovw %k0, %eax
@@ -45,28 +45,28 @@ define i16 @mask16(i16 %x) {
 
 define i32 @mask16_zext(i16 %x) {
 ; KNL-LABEL: mask16_zext:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    knotw %k0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: mask16_zext:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    knotw %k0, %k0
 ; SKX-NEXT:    kmovw %k0, %eax
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: mask16_zext:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    knotw %k0, %k0
 ; AVX512BW-NEXT:    kmovw %k0, %eax
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: mask16_zext:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    knotw %k0, %k0
 ; AVX512DQ-NEXT:    kmovw %k0, %eax
@@ -80,7 +80,7 @@ define i32 @mask16_zext(i16 %x) {
 
 define i8 @mask8(i8 %x) {
 ; KNL-LABEL: mask8:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    knotw %k0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
@@ -88,7 +88,7 @@ define i8 @mask8(i8 %x) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: mask8:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    knotb %k0, %k0
 ; SKX-NEXT:    kmovd %k0, %eax
@@ -96,7 +96,7 @@ define i8 @mask8(i8 %x) {
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: mask8:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    knotw %k0, %k0
 ; AVX512BW-NEXT:    kmovd %k0, %eax
@@ -104,7 +104,7 @@ define i8 @mask8(i8 %x) {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: mask8:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    knotb %k0, %k0
 ; AVX512DQ-NEXT:    kmovw %k0, %eax
@@ -118,7 +118,7 @@ define i8 @mask8(i8 %x) {
 
 define i32 @mask8_zext(i8 %x) {
 ; KNL-LABEL: mask8_zext:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    knotw %k0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
@@ -126,14 +126,14 @@ define i32 @mask8_zext(i8 %x) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: mask8_zext:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    knotb %k0, %k0
 ; SKX-NEXT:    kmovb %k0, %eax
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: mask8_zext:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    knotw %k0, %k0
 ; AVX512BW-NEXT:    kmovd %k0, %eax
@@ -141,7 +141,7 @@ define i32 @mask8_zext(i8 %x) {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: mask8_zext:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    knotb %k0, %k0
 ; AVX512DQ-NEXT:    kmovb %k0, %eax
@@ -155,7 +155,7 @@ define i32 @mask8_zext(i8 %x) {
 
 define void @mask16_mem(i16* %ptr) {
 ; CHECK-LABEL: mask16_mem:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw (%rdi), %k0
 ; CHECK-NEXT:    knotw %k0, %k0
 ; CHECK-NEXT:    kmovw %k0, (%rdi)
@@ -170,7 +170,7 @@ define void @mask16_mem(i16* %ptr) {
 
 define void @mask8_mem(i8* %ptr) {
 ; KNL-LABEL: mask8_mem:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    movzbl (%rdi), %eax
 ; KNL-NEXT:    kmovw %eax, %k0
 ; KNL-NEXT:    knotw %k0, %k0
@@ -179,14 +179,14 @@ define void @mask8_mem(i8* %ptr) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: mask8_mem:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovb (%rdi), %k0
 ; SKX-NEXT:    knotb %k0, %k0
 ; SKX-NEXT:    kmovb %k0, (%rdi)
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: mask8_mem:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    movzbl (%rdi), %eax
 ; AVX512BW-NEXT:    kmovd %eax, %k0
 ; AVX512BW-NEXT:    knotw %k0, %k0
@@ -195,7 +195,7 @@ define void @mask8_mem(i8* %ptr) {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: mask8_mem:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovb (%rdi), %k0
 ; AVX512DQ-NEXT:    knotb %k0, %k0
 ; AVX512DQ-NEXT:    kmovb %k0, (%rdi)
@@ -210,7 +210,7 @@ define void @mask8_mem(i8* %ptr) {
 
 define i16 @mand16(i16 %x, i16 %y) {
 ; CHECK-LABEL: mand16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    xorl %esi, %eax
 ; CHECK-NEXT:    andl %esi, %edi
@@ -228,7 +228,7 @@ define i16 @mand16(i16 %x, i16 %y) {
 
 define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
 ; KNL-LABEL: mand16_mem:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw (%rdi), %k0
 ; KNL-NEXT:    kmovw (%rsi), %k1
 ; KNL-NEXT:    kandw %k1, %k0, %k2
@@ -239,7 +239,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: mand16_mem:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovw (%rdi), %k0
 ; SKX-NEXT:    kmovw (%rsi), %k1
 ; SKX-NEXT:    kandw %k1, %k0, %k2
@@ -250,7 +250,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: mand16_mem:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovw (%rdi), %k0
 ; AVX512BW-NEXT:    kmovw (%rsi), %k1
 ; AVX512BW-NEXT:    kandw %k1, %k0, %k2
@@ -261,7 +261,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: mand16_mem:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw (%rdi), %k0
 ; AVX512DQ-NEXT:    kmovw (%rsi), %k1
 ; AVX512DQ-NEXT:    kandw %k1, %k0, %k2
@@ -281,7 +281,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
 
 define i8 @shuf_test1(i16 %v) nounwind {
 ; KNL-LABEL: shuf_test1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    kshiftrw $8, %k0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
@@ -289,7 +289,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: shuf_test1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    kshiftrw $8, %k0, %k0
 ; SKX-NEXT:    kmovd %k0, %eax
@@ -297,7 +297,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuf_test1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    kshiftrw $8, %k0, %k0
 ; AVX512BW-NEXT:    kmovd %k0, %eax
@@ -305,7 +305,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: shuf_test1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    kshiftrw $8, %k0, %k0
 ; AVX512DQ-NEXT:    kmovw %k0, %eax
@@ -319,7 +319,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
 
 define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
 ; KNL-LABEL: zext_test1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; KNL-NEXT:    kshiftlw $10, %k0, %k0
 ; KNL-NEXT:    kshiftrw $15, %k0, %k0
@@ -329,7 +329,7 @@ define i32 @zext_test1(<16 x i32> %a, <1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: zext_test1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; SKX-NEXT:    kshiftlw $10, %k0, %k0
 ; SKX-NEXT:    kshiftrw $15, %k0, %k0
@@ -339,7 +339,7 @@ define i32 @zext_test1(<16 x i32> %a, <1
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: zext_test1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; AVX512BW-NEXT:    kshiftlw $10, %k0, %k0
 ; AVX512BW-NEXT:    kshiftrw $15, %k0, %k0
@@ -349,7 +349,7 @@ define i32 @zext_test1(<16 x i32> %a, <1
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: zext_test1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; AVX512DQ-NEXT:    kshiftlw $10, %k0, %k0
 ; AVX512DQ-NEXT:    kshiftrw $15, %k0, %k0
@@ -365,7 +365,7 @@ define i32 @zext_test1(<16 x i32> %a, <1
 
 define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
 ; KNL-LABEL: zext_test2:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; KNL-NEXT:    kshiftlw $10, %k0, %k0
 ; KNL-NEXT:    kshiftrw $15, %k0, %k0
@@ -376,7 +376,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: zext_test2:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; SKX-NEXT:    kshiftlw $10, %k0, %k0
 ; SKX-NEXT:    kshiftrw $15, %k0, %k0
@@ -387,7 +387,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: zext_test2:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; AVX512BW-NEXT:    kshiftlw $10, %k0, %k0
 ; AVX512BW-NEXT:    kshiftrw $15, %k0, %k0
@@ -398,7 +398,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: zext_test2:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; AVX512DQ-NEXT:    kshiftlw $10, %k0, %k0
 ; AVX512DQ-NEXT:    kshiftrw $15, %k0, %k0
@@ -415,7 +415,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
 
 define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
 ; KNL-LABEL: zext_test3:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; KNL-NEXT:    kshiftlw $10, %k0, %k0
 ; KNL-NEXT:    kshiftrw $15, %k0, %k0
@@ -426,7 +426,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: zext_test3:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; SKX-NEXT:    kshiftlw $10, %k0, %k0
 ; SKX-NEXT:    kshiftrw $15, %k0, %k0
@@ -437,7 +437,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: zext_test3:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; AVX512BW-NEXT:    kshiftlw $10, %k0, %k0
 ; AVX512BW-NEXT:    kshiftrw $15, %k0, %k0
@@ -448,7 +448,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: zext_test3:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; AVX512DQ-NEXT:    kshiftlw $10, %k0, %k0
 ; AVX512DQ-NEXT:    kshiftrw $15, %k0, %k0
@@ -465,7 +465,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
 
 define i8 @conv1(<8 x i1>* %R) {
 ; KNL-LABEL: conv1:
-; KNL:       ## BB#0: ## %entry
+; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    kxnorw %k0, %k0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    movb %al, (%rdi)
@@ -474,7 +474,7 @@ define i8 @conv1(<8 x i1>* %R) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: conv1:
-; SKX:       ## BB#0: ## %entry
+; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    kxnorw %k0, %k0, %k0
 ; SKX-NEXT:    kmovb %k0, (%rdi)
 ; SKX-NEXT:    movb $-2, -{{[0-9]+}}(%rsp)
@@ -482,7 +482,7 @@ define i8 @conv1(<8 x i1>* %R) {
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: conv1:
-; AVX512BW:       ## BB#0: ## %entry
+; AVX512BW:       ## %bb.0: ## %entry
 ; AVX512BW-NEXT:    kxnorw %k0, %k0, %k0
 ; AVX512BW-NEXT:    kmovd %k0, %eax
 ; AVX512BW-NEXT:    movb %al, (%rdi)
@@ -491,7 +491,7 @@ define i8 @conv1(<8 x i1>* %R) {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: conv1:
-; AVX512DQ:       ## BB#0: ## %entry
+; AVX512DQ:       ## %bb.0: ## %entry
 ; AVX512DQ-NEXT:    kxnorw %k0, %k0, %k0
 ; AVX512DQ-NEXT:    kmovb %k0, (%rdi)
 ; AVX512DQ-NEXT:    movb $-2, -{{[0-9]+}}(%rsp)
@@ -509,7 +509,7 @@ entry:
 
 define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1) {
 ; KNL-LABEL: test4:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
 ; KNL-NEXT:    vpmovqd %zmm0, %ymm0
 ; KNL-NEXT:    vpcmpgtq %ymm3, %ymm2, %ymm1
@@ -519,7 +519,7 @@ define <4 x i32> @test4(<4 x i64> %x, <4
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test4:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpgtq %ymm1, %ymm0, %k0
 ; SKX-NEXT:    vpcmpgtq %ymm3, %ymm2, %k1
 ; SKX-NEXT:    kandnw %k0, %k1, %k0
@@ -528,7 +528,7 @@ define <4 x i32> @test4(<4 x i64> %x, <4
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test4:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512BW-NEXT:    vpcmpgtq %ymm3, %ymm2, %ymm1
@@ -538,7 +538,7 @@ define <4 x i32> @test4(<4 x i64> %x, <4
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test4:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpmovqd %zmm0, %ymm0
 ; AVX512DQ-NEXT:    vpcmpgtq %ymm3, %ymm2, %ymm1
@@ -555,14 +555,14 @@ define <4 x i32> @test4(<4 x i64> %x, <4
 
 define <2 x i64> @test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1) {
 ; KNL-LABEL: test5:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
 ; KNL-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm1
 ; KNL-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test5:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpcmpgtq %xmm0, %xmm1, %k0
 ; SKX-NEXT:    vpcmpgtq %xmm3, %xmm2, %k1
 ; SKX-NEXT:    kandnw %k1, %k0, %k0
@@ -570,14 +570,14 @@ define <2 x i64> @test5(<2 x i64> %x, <2
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test5:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
 ; AVX512BW-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm1
 ; AVX512BW-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test5:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
 ; AVX512DQ-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm1
 ; AVX512DQ-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
@@ -602,7 +602,7 @@ false:
 }
 define void @test7(<8 x i1> %mask)  {
 ; KNL-LABEL: test7:
-; KNL:       ## BB#0: ## %allocas
+; KNL:       ## %bb.0: ## %allocas
 ; KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -615,7 +615,7 @@ define void @test7(<8 x i1> %mask)  {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test7:
-; SKX:       ## BB#0: ## %allocas
+; SKX:       ## %bb.0: ## %allocas
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovw2m %xmm0, %k0
 ; SKX-NEXT:    movb $85, %al
@@ -625,7 +625,7 @@ define void @test7(<8 x i1> %mask)  {
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test7:
-; AVX512BW:       ## BB#0: ## %allocas
+; AVX512BW:       ## %bb.0: ## %allocas
 ; AVX512BW-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpmovw2m %zmm0, %k0
 ; AVX512BW-NEXT:    movb $85, %al
@@ -637,7 +637,7 @@ define void @test7(<8 x i1> %mask)  {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test7:
-; AVX512DQ:       ## BB#0: ## %allocas
+; AVX512DQ:       ## %bb.0: ## %allocas
 ; AVX512DQ-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; AVX512DQ-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -661,11 +661,11 @@ false:
 }
 define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
 ; KNL-LABEL: test8:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    cmpl %esi, %edi
 ; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; KNL-NEXT:    jg LBB17_1
-; KNL-NEXT:  ## BB#2:
+; KNL-NEXT:  ## %bb.2:
 ; KNL-NEXT:    vpcmpltud %zmm2, %zmm1, %k1
 ; KNL-NEXT:    jmp LBB17_3
 ; KNL-NEXT:  LBB17_1:
@@ -677,11 +677,11 @@ define <16 x i8> @test8(<16 x i32>%a, <1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test8:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    cmpl %esi, %edi
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    jg LBB17_1
-; SKX-NEXT:  ## BB#2:
+; SKX-NEXT:  ## %bb.2:
 ; SKX-NEXT:    vpcmpltud %zmm2, %zmm1, %k0
 ; SKX-NEXT:    vpmovm2b %k0, %xmm0
 ; SKX-NEXT:    vzeroupper
@@ -693,11 +693,11 @@ define <16 x i8> @test8(<16 x i32>%a, <1
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test8:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    cmpl %esi, %edi
 ; AVX512BW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-NEXT:    jg LBB17_1
-; AVX512BW-NEXT:  ## BB#2:
+; AVX512BW-NEXT:  ## %bb.2:
 ; AVX512BW-NEXT:    vpcmpltud %zmm2, %zmm1, %k0
 ; AVX512BW-NEXT:    jmp LBB17_3
 ; AVX512BW-NEXT:  LBB17_1:
@@ -709,11 +709,11 @@ define <16 x i8> @test8(<16 x i32>%a, <1
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test8:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    cmpl %esi, %edi
 ; AVX512DQ-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512DQ-NEXT:    jg LBB17_1
-; AVX512DQ-NEXT:  ## BB#2:
+; AVX512DQ-NEXT:  ## %bb.2:
 ; AVX512DQ-NEXT:    vpcmpltud %zmm2, %zmm1, %k0
 ; AVX512DQ-NEXT:    jmp LBB17_3
 ; AVX512DQ-NEXT:  LBB17_1:
@@ -732,10 +732,10 @@ define <16 x i8> @test8(<16 x i32>%a, <1
 }
 define <16 x i1> @test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
 ; KNL-LABEL: test9:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    cmpl %esi, %edi
 ; KNL-NEXT:    jg LBB18_1
-; KNL-NEXT:  ## BB#2:
+; KNL-NEXT:  ## %bb.2:
 ; KNL-NEXT:    vpmovsxbd %xmm1, %zmm0
 ; KNL-NEXT:    jmp LBB18_3
 ; KNL-NEXT:  LBB18_1:
@@ -749,10 +749,10 @@ define <16 x i1> @test9(<16 x i1>%a, <16
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test9:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    cmpl %esi, %edi
 ; SKX-NEXT:    jg LBB18_1
-; SKX-NEXT:  ## BB#2:
+; SKX-NEXT:  ## %bb.2:
 ; SKX-NEXT:    vpsllw $7, %xmm1, %xmm0
 ; SKX-NEXT:    jmp LBB18_3
 ; SKX-NEXT:  LBB18_1:
@@ -763,10 +763,10 @@ define <16 x i1> @test9(<16 x i1>%a, <16
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test9:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    cmpl %esi, %edi
 ; AVX512BW-NEXT:    jg LBB18_1
-; AVX512BW-NEXT:  ## BB#2:
+; AVX512BW-NEXT:  ## %bb.2:
 ; AVX512BW-NEXT:    vpsllw $7, %xmm1, %xmm0
 ; AVX512BW-NEXT:    jmp LBB18_3
 ; AVX512BW-NEXT:  LBB18_1:
@@ -779,10 +779,10 @@ define <16 x i1> @test9(<16 x i1>%a, <16
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test9:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    cmpl %esi, %edi
 ; AVX512DQ-NEXT:    jg LBB18_1
-; AVX512DQ-NEXT:  ## BB#2:
+; AVX512DQ-NEXT:  ## %bb.2:
 ; AVX512DQ-NEXT:    vpmovsxbd %xmm1, %zmm0
 ; AVX512DQ-NEXT:    jmp LBB18_3
 ; AVX512DQ-NEXT:  LBB18_1:
@@ -805,19 +805,19 @@ define <16 x i1> @test9(<16 x i1>%a, <16
 
 define <4 x i1> @test11(<4 x i1>%a, <4 x i1>%b, i32 %a1, i32 %b1) {
 ; KNL-LABEL: test11:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    cmpl %esi, %edi
 ; KNL-NEXT:    jg LBB20_2
-; KNL-NEXT:  ## BB#1:
+; KNL-NEXT:  ## %bb.1:
 ; KNL-NEXT:    vmovaps %xmm1, %xmm0
 ; KNL-NEXT:  LBB20_2:
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test11:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    cmpl %esi, %edi
 ; SKX-NEXT:    jg LBB20_1
-; SKX-NEXT:  ## BB#2:
+; SKX-NEXT:  ## %bb.2:
 ; SKX-NEXT:    vpslld $31, %xmm1, %xmm0
 ; SKX-NEXT:    jmp LBB20_3
 ; SKX-NEXT:  LBB20_1:
@@ -828,19 +828,19 @@ define <4 x i1> @test11(<4 x i1>%a, <4 x
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test11:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    cmpl %esi, %edi
 ; AVX512BW-NEXT:    jg LBB20_2
-; AVX512BW-NEXT:  ## BB#1:
+; AVX512BW-NEXT:  ## %bb.1:
 ; AVX512BW-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX512BW-NEXT:  LBB20_2:
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test11:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    cmpl %esi, %edi
 ; AVX512DQ-NEXT:    jg LBB20_2
-; AVX512DQ-NEXT:  ## BB#1:
+; AVX512DQ-NEXT:  ## %bb.1:
 ; AVX512DQ-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX512DQ-NEXT:  LBB20_2:
 ; AVX512DQ-NEXT:    retq
@@ -851,7 +851,7 @@ define <4 x i1> @test11(<4 x i1>%a, <4 x
 
 define i32 @test12(i32 %x, i32 %y)  {
 ; CHECK-LABEL: test12:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %a = bitcast i16 21845 to <16 x i1>
@@ -862,7 +862,7 @@ define i32 @test12(i32 %x, i32 %y)  {
 
 define i32 @test13(i32 %x, i32 %y)  {
 ; CHECK-LABEL: test13:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl %esi, %eax
 ; CHECK-NEXT:    retq
   %a = bitcast i16 21845 to <16 x i1>
@@ -878,7 +878,7 @@ define i32 @test13(i32 %x, i32 %y)  {
 
 define <16 x i1> @test15(i32 %x, i32 %y)  {
 ; KNL-LABEL: test15:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    cmpl %esi, %edi
 ; KNL-NEXT:    movw $21845, %ax ## imm = 0x5555
 ; KNL-NEXT:    movw $1, %cx
@@ -890,7 +890,7 @@ define <16 x i1> @test15(i32 %x, i32 %y)
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test15:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    cmpl %esi, %edi
 ; SKX-NEXT:    movw $21845, %ax ## imm = 0x5555
 ; SKX-NEXT:    movw $1, %cx
@@ -900,7 +900,7 @@ define <16 x i1> @test15(i32 %x, i32 %y)
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test15:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    cmpl %esi, %edi
 ; AVX512BW-NEXT:    movw $21845, %ax ## imm = 0x5555
 ; AVX512BW-NEXT:    movw $1, %cx
@@ -912,7 +912,7 @@ define <16 x i1> @test15(i32 %x, i32 %y)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test15:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    cmpl %esi, %edi
 ; AVX512DQ-NEXT:    movw $21845, %ax ## imm = 0x5555
 ; AVX512DQ-NEXT:    movw $1, %cx
@@ -932,7 +932,7 @@ define <16 x i1> @test15(i32 %x, i32 %y)
 define <64 x i8> @test16(i64 %x) {
 ;
 ; KNL-LABEL: test16:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    pushq %rbp
 ; KNL-NEXT:    .cfi_def_cfa_offset 16
 ; KNL-NEXT:    .cfi_offset %rbp, -16
@@ -968,7 +968,7 @@ define <64 x i8> @test16(i64 %x) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test16:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovq %rdi, %k0
 ; SKX-NEXT:    movb $1, %al
 ; SKX-NEXT:    kmovd %eax, %k1
@@ -984,7 +984,7 @@ define <64 x i8> @test16(i64 %x) {
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test16:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovq %rdi, %k0
 ; AVX512BW-NEXT:    movb $1, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k1
@@ -999,7 +999,7 @@ define <64 x i8> @test16(i64 %x) {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test16:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    pushq %rbp
 ; AVX512DQ-NEXT:    .cfi_def_cfa_offset 16
 ; AVX512DQ-NEXT:    .cfi_offset %rbp, -16
@@ -1042,7 +1042,7 @@ define <64 x i8> @test16(i64 %x) {
 define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
 ;
 ; KNL-LABEL: test17:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    pushq %rbp
 ; KNL-NEXT:    .cfi_def_cfa_offset 16
 ; KNL-NEXT:    .cfi_offset %rbp, -16
@@ -1080,7 +1080,7 @@ define <64 x i8> @test17(i64 %x, i32 %y,
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test17:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovq %rdi, %k0
 ; SKX-NEXT:    cmpl %edx, %esi
 ; SKX-NEXT:    setg %al
@@ -1097,7 +1097,7 @@ define <64 x i8> @test17(i64 %x, i32 %y,
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test17:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovq %rdi, %k0
 ; AVX512BW-NEXT:    cmpl %edx, %esi
 ; AVX512BW-NEXT:    setg %al
@@ -1113,7 +1113,7 @@ define <64 x i8> @test17(i64 %x, i32 %y,
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test17:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    pushq %rbp
 ; AVX512DQ-NEXT:    .cfi_def_cfa_offset 16
 ; AVX512DQ-NEXT:    .cfi_offset %rbp, -16
@@ -1158,7 +1158,7 @@ define <64 x i8> @test17(i64 %x, i32 %y,
 
 define <8 x i1> @test18(i8 %a, i16 %y) {
 ; KNL-LABEL: test18:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    kmovw %esi, %k2
 ; KNL-NEXT:    kshiftlw $7, %k2, %k0
@@ -1181,7 +1181,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test18:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    kmovd %esi, %k2
 ; SKX-NEXT:    kshiftlw $7, %k2, %k0
@@ -1202,7 +1202,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test18:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k1
 ; AVX512BW-NEXT:    kmovd %esi, %k2
 ; AVX512BW-NEXT:    kshiftlw $7, %k2, %k0
@@ -1225,7 +1225,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test18:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k1
 ; AVX512DQ-NEXT:    kmovw %esi, %k2
 ; AVX512DQ-NEXT:    kshiftlw $7, %k2, %k0
@@ -1255,7 +1255,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
 }
 define <32 x i16> @test21(<32 x i16> %x , <32 x i1> %mask) nounwind readnone {
 ; KNL-LABEL: test21:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
 ; KNL-NEXT:    vpsllw $15, %ymm3, %ymm3
 ; KNL-NEXT:    vpsraw $15, %ymm3, %ymm3
@@ -1268,21 +1268,21 @@ define <32 x i16> @test21(<32 x i16> %x
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test21:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %ymm1, %ymm1
 ; SKX-NEXT:    vpmovb2m %ymm1, %k1
 ; SKX-NEXT:    vmovdqu16 %zmm0, %zmm0 {%k1} {z}
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test21:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpsllw $7, %ymm1, %ymm1
 ; AVX512BW-NEXT:    vpmovb2m %zmm1, %k1
 ; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm0 {%k1} {z}
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test21:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
 ; AVX512DQ-NEXT:    vpsllw $15, %ymm3, %ymm3
 ; AVX512DQ-NEXT:    vpsraw $15, %ymm3, %ymm3
@@ -1299,7 +1299,7 @@ define <32 x i16> @test21(<32 x i16> %x
 
 define void @test22(<4 x i1> %a, <4 x i1>* %addr) {
 ; KNL-LABEL: test22:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; KNL-NEXT:    vpslld $31, %ymm0, %ymm0
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -1309,14 +1309,14 @@ define void @test22(<4 x i1> %a, <4 x i1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test22:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k0
 ; SKX-NEXT:    kmovb %k0, (%rdi)
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test22:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; AVX512BW-NEXT:    vpslld $31, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -1326,7 +1326,7 @@ define void @test22(<4 x i1> %a, <4 x i1
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test22:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; AVX512DQ-NEXT:    vpslld $31, %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -1339,7 +1339,7 @@ define void @test22(<4 x i1> %a, <4 x i1
 
 define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
 ; KNL-LABEL: test23:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -1349,14 +1349,14 @@ define void @test23(<2 x i1> %a, <2 x i1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test23:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm0, %xmm0
 ; SKX-NEXT:    vptestmq %xmm0, %xmm0, %k0
 ; SKX-NEXT:    kmovb %k0, (%rdi)
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test23:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; AVX512BW-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -1366,7 +1366,7 @@ define void @test23(<2 x i1> %a, <2 x i1
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test23:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; AVX512DQ-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -1379,7 +1379,7 @@ define void @test23(<2 x i1> %a, <2 x i1
 
 define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
 ; KNL-LABEL: store_v1i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL-NEXT:    kxorw %k1, %k0, %k0
@@ -1388,7 +1388,7 @@ define void @store_v1i1(<1 x i1> %c , <1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: store_v1i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX-NEXT:    kxorw %k1, %k0, %k0
@@ -1396,7 +1396,7 @@ define void @store_v1i1(<1 x i1> %c , <1
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_v1i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    kxnorw %k0, %k0, %k1
 ; AVX512BW-NEXT:    kxorw %k1, %k0, %k0
@@ -1405,7 +1405,7 @@ define void @store_v1i1(<1 x i1> %c , <1
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_v1i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    kxnorw %k0, %k0, %k1
 ; AVX512DQ-NEXT:    kxorw %k1, %k0, %k0
@@ -1418,7 +1418,7 @@ define void @store_v1i1(<1 x i1> %c , <1
 
 define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) {
 ; KNL-LABEL: store_v2i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; KNL-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
@@ -1429,7 +1429,7 @@ define void @store_v2i1(<2 x i1> %c , <2
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: store_v2i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm0, %xmm0
 ; SKX-NEXT:    vptestmq %xmm0, %xmm0, %k0
 ; SKX-NEXT:    knotw %k0, %k0
@@ -1437,7 +1437,7 @@ define void @store_v2i1(<2 x i1> %c , <2
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_v2i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpsllq $63, %zmm0, %zmm0
@@ -1448,7 +1448,7 @@ define void @store_v2i1(<2 x i1> %c , <2
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_v2i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX512DQ-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512DQ-NEXT:    vpsllq $63, %zmm0, %zmm0
@@ -1463,7 +1463,7 @@ define void @store_v2i1(<2 x i1> %c , <2
 
 define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) {
 ; KNL-LABEL: store_v4i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; KNL-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; KNL-NEXT:    vpslld $31, %ymm0, %ymm0
@@ -1474,7 +1474,7 @@ define void @store_v4i1(<4 x i1> %c , <4
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: store_v4i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k0
 ; SKX-NEXT:    knotw %k0, %k0
@@ -1482,7 +1482,7 @@ define void @store_v4i1(<4 x i1> %c , <4
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_v4i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpslld $31, %ymm0, %ymm0
@@ -1493,7 +1493,7 @@ define void @store_v4i1(<4 x i1> %c , <4
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_v4i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX512DQ-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; AVX512DQ-NEXT:    vpslld $31, %ymm0, %ymm0
@@ -1508,7 +1508,7 @@ define void @store_v4i1(<4 x i1> %c , <4
 
 define void @store_v8i1(<8 x i1> %c , <8 x i1>* %ptr) {
 ; KNL-LABEL: store_v8i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -1519,7 +1519,7 @@ define void @store_v8i1(<8 x i1> %c , <8
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: store_v8i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovw2m %xmm0, %k0
 ; SKX-NEXT:    knotb %k0, %k0
@@ -1527,7 +1527,7 @@ define void @store_v8i1(<8 x i1> %c , <8
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_v8i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpmovw2m %zmm0, %k0
 ; AVX512BW-NEXT:    knotw %k0, %k0
@@ -1537,7 +1537,7 @@ define void @store_v8i1(<8 x i1> %c , <8
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_v8i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; AVX512DQ-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -1552,7 +1552,7 @@ define void @store_v8i1(<8 x i1> %c , <8
 
 define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
 ; KNL-LABEL: store_v16i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -1562,7 +1562,7 @@ define void @store_v16i1(<16 x i1> %c ,
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: store_v16i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovb2m %xmm0, %k0
 ; SKX-NEXT:    knotw %k0, %k0
@@ -1570,7 +1570,7 @@ define void @store_v16i1(<16 x i1> %c ,
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_v16i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpmovb2m %zmm0, %k0
 ; AVX512BW-NEXT:    knotw %k0, %k0
@@ -1579,7 +1579,7 @@ define void @store_v16i1(<16 x i1> %c ,
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_v16i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; AVX512DQ-NEXT:    vpslld $31, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -1607,7 +1607,7 @@ define void @store_v16i1(<16 x i1> %c ,
 
 define void @f1(i32 %c) {
 ; CHECK-LABEL: f1:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movzbl {{.*}}(%rip), %edi
 ; CHECK-NEXT:    xorl $1, %edi
 ; CHECK-NEXT:    movb %dil, {{.*}}(%rip)
@@ -1625,7 +1625,7 @@ declare void @f2(i32) #1
 
 define void @store_i16_i1(i16 %x, i1 *%y) {
 ; CHECK-LABEL: store_i16_i1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    andl $1, %edi
 ; CHECK-NEXT:    movb %dil, (%rsi)
 ; CHECK-NEXT:    retq
@@ -1636,7 +1636,7 @@ define void @store_i16_i1(i16 %x, i1 *%y
 
 define void @store_i8_i1(i8 %x, i1 *%y) {
 ; CHECK-LABEL: store_i8_i1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    andl $1, %edi
 ; CHECK-NEXT:    movb %dil, (%rsi)
 ; CHECK-NEXT:    retq
@@ -1647,27 +1647,27 @@ define void @store_i8_i1(i8 %x, i1 *%y)
 
 define <32 x i16> @test_build_vec_v32i1(<32 x i16> %x) {
 ; KNL-LABEL: test_build_vec_v32i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; KNL-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_build_vec_v32i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movl $1497715861, %eax ## imm = 0x59455495
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vmovdqu16 %zmm0, %zmm0 {%k1} {z}
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_build_vec_v32i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    movl $1497715861, %eax ## imm = 0x59455495
 ; AVX512BW-NEXT:    kmovd %eax, %k1
 ; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm0 {%k1} {z}
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test_build_vec_v32i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
 ; AVX512DQ-NEXT:    retq
@@ -1677,23 +1677,23 @@ define <32 x i16> @test_build_vec_v32i1(
 
 define <64 x i8> @test_build_vec_v64i1(<64 x i8> %x) {
 ; KNL-LABEL: test_build_vec_v64i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; KNL-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_build_vec_v64i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zmm0[2],zero,zero,zero,zmm0[6],zero,zmm0[8],zero,zmm0[10],zero,zmm0[12],zero,zero,zmm0[15],zero,zero,zmm0[18],zero,zmm0[20],zero,zmm0[22],zero,zmm0[24],zero,zero,zmm0[27],zero,zero,zmm0[30],zero,zmm0[32],zero,zmm0[34],zero,zero,zero,zmm0[38],zero,zmm0[40],zero,zero,zmm0[43,44],zero,zmm0[46],zero,zmm0[48],zero,zmm0[50],zero,zero,zero,zmm0[54],zero,zmm0[56],zero,zero,zmm0[59,60],zero,zmm0[62],zero
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_build_vec_v64i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zmm0[2],zero,zero,zero,zmm0[6],zero,zmm0[8],zero,zmm0[10],zero,zmm0[12],zero,zero,zmm0[15],zero,zero,zmm0[18],zero,zmm0[20],zero,zmm0[22],zero,zmm0[24],zero,zero,zmm0[27],zero,zero,zmm0[30],zero,zmm0[32],zero,zmm0[34],zero,zero,zero,zmm0[38],zero,zmm0[40],zero,zero,zmm0[43,44],zero,zmm0[46],zero,zmm0[48],zero,zmm0[50],zero,zero,zero,zmm0[54],zero,zmm0[56],zero,zero,zmm0[59,60],zero,zmm0[62],zero
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test_build_vec_v64i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vandps {{.*}}(%rip), %ymm1, %ymm1
 ; AVX512DQ-NEXT:    retq
@@ -1703,7 +1703,7 @@ define <64 x i8> @test_build_vec_v64i1(<
 
 define void @ktest_1(<8 x double> %in, double * %base) {
 ; KNL-LABEL: ktest_1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vmovupd (%rdi), %zmm1
 ; KNL-NEXT:    vcmpltpd %zmm0, %zmm1, %k1
 ; KNL-NEXT:    vmovupd 8(%rdi), %zmm1 {%k1} {z}
@@ -1711,7 +1711,7 @@ define void @ktest_1(<8 x double> %in, d
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    testb %al, %al
 ; KNL-NEXT:    je LBB41_2
-; KNL-NEXT:  ## BB#1: ## %L1
+; KNL-NEXT:  ## %bb.1: ## %L1
 ; KNL-NEXT:    vmovapd %zmm0, (%rdi)
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
@@ -1721,14 +1721,14 @@ define void @ktest_1(<8 x double> %in, d
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: ktest_1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vmovupd (%rdi), %zmm1
 ; SKX-NEXT:    vcmpltpd %zmm0, %zmm1, %k1
 ; SKX-NEXT:    vmovupd 8(%rdi), %zmm1 {%k1} {z}
 ; SKX-NEXT:    vcmpltpd %zmm1, %zmm0, %k0 {%k1}
 ; SKX-NEXT:    ktestb %k0, %k0
 ; SKX-NEXT:    je LBB41_2
-; SKX-NEXT:  ## BB#1: ## %L1
+; SKX-NEXT:  ## %bb.1: ## %L1
 ; SKX-NEXT:    vmovapd %zmm0, (%rdi)
 ; SKX-NEXT:    vzeroupper
 ; SKX-NEXT:    retq
@@ -1738,7 +1738,7 @@ define void @ktest_1(<8 x double> %in, d
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: ktest_1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vmovupd (%rdi), %zmm1
 ; AVX512BW-NEXT:    vcmpltpd %zmm0, %zmm1, %k1
 ; AVX512BW-NEXT:    vmovupd 8(%rdi), %zmm1 {%k1} {z}
@@ -1746,7 +1746,7 @@ define void @ktest_1(<8 x double> %in, d
 ; AVX512BW-NEXT:    kmovd %k0, %eax
 ; AVX512BW-NEXT:    testb %al, %al
 ; AVX512BW-NEXT:    je LBB41_2
-; AVX512BW-NEXT:  ## BB#1: ## %L1
+; AVX512BW-NEXT:  ## %bb.1: ## %L1
 ; AVX512BW-NEXT:    vmovapd %zmm0, (%rdi)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1756,14 +1756,14 @@ define void @ktest_1(<8 x double> %in, d
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: ktest_1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vmovupd (%rdi), %zmm1
 ; AVX512DQ-NEXT:    vcmpltpd %zmm0, %zmm1, %k1
 ; AVX512DQ-NEXT:    vmovupd 8(%rdi), %zmm1 {%k1} {z}
 ; AVX512DQ-NEXT:    vcmpltpd %zmm1, %zmm0, %k0 {%k1}
 ; AVX512DQ-NEXT:    ktestb %k0, %k0
 ; AVX512DQ-NEXT:    je LBB41_2
-; AVX512DQ-NEXT:  ## BB#1: ## %L1
+; AVX512DQ-NEXT:  ## %bb.1: ## %L1
 ; AVX512DQ-NEXT:    vmovapd %zmm0, (%rdi)
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    retq
@@ -1801,7 +1801,7 @@ End:
 define void @ktest_2(<32 x float> %in, float * %base) {
 ;
 ; KNL-LABEL: ktest_2:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    pushq %rbp
 ; KNL-NEXT:    .cfi_def_cfa_offset 16
 ; KNL-NEXT:    .cfi_offset %rbp, -16
@@ -2083,7 +2083,7 @@ define void @ktest_2(<32 x float> %in, f
 ; KNL-NEXT:    kmovw %k0, (%rsp)
 ; KNL-NEXT:    cmpl $0, (%rsp)
 ; KNL-NEXT:    je LBB42_2
-; KNL-NEXT:  ## BB#1: ## %L1
+; KNL-NEXT:  ## %bb.1: ## %L1
 ; KNL-NEXT:    vmovaps %zmm0, (%rdi)
 ; KNL-NEXT:    vmovaps %zmm1, 64(%rdi)
 ; KNL-NEXT:    jmp LBB42_3
@@ -2097,7 +2097,7 @@ define void @ktest_2(<32 x float> %in, f
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: ktest_2:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %zmm2
 ; SKX-NEXT:    vmovups 64(%rdi), %zmm3
 ; SKX-NEXT:    vcmpltps %zmm0, %zmm2, %k1
@@ -2111,7 +2111,7 @@ define void @ktest_2(<32 x float> %in, f
 ; SKX-NEXT:    kord %k1, %k0, %k0
 ; SKX-NEXT:    ktestd %k0, %k0
 ; SKX-NEXT:    je LBB42_2
-; SKX-NEXT:  ## BB#1: ## %L1
+; SKX-NEXT:  ## %bb.1: ## %L1
 ; SKX-NEXT:    vmovaps %zmm0, (%rdi)
 ; SKX-NEXT:    vmovaps %zmm1, 64(%rdi)
 ; SKX-NEXT:    vzeroupper
@@ -2123,7 +2123,7 @@ define void @ktest_2(<32 x float> %in, f
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: ktest_2:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vmovups (%rdi), %zmm2
 ; AVX512BW-NEXT:    vmovups 64(%rdi), %zmm3
 ; AVX512BW-NEXT:    vcmpltps %zmm0, %zmm2, %k1
@@ -2137,7 +2137,7 @@ define void @ktest_2(<32 x float> %in, f
 ; AVX512BW-NEXT:    kord %k1, %k0, %k0
 ; AVX512BW-NEXT:    ktestd %k0, %k0
 ; AVX512BW-NEXT:    je LBB42_2
-; AVX512BW-NEXT:  ## BB#1: ## %L1
+; AVX512BW-NEXT:  ## %bb.1: ## %L1
 ; AVX512BW-NEXT:    vmovaps %zmm0, (%rdi)
 ; AVX512BW-NEXT:    vmovaps %zmm1, 64(%rdi)
 ; AVX512BW-NEXT:    vzeroupper
@@ -2149,7 +2149,7 @@ define void @ktest_2(<32 x float> %in, f
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: ktest_2:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    pushq %rbp
 ; AVX512DQ-NEXT:    .cfi_def_cfa_offset 16
 ; AVX512DQ-NEXT:    .cfi_offset %rbp, -16
@@ -2431,7 +2431,7 @@ define void @ktest_2(<32 x float> %in, f
 ; AVX512DQ-NEXT:    kmovw %k0, (%rsp)
 ; AVX512DQ-NEXT:    cmpl $0, (%rsp)
 ; AVX512DQ-NEXT:    je LBB42_2
-; AVX512DQ-NEXT:  ## BB#1: ## %L1
+; AVX512DQ-NEXT:  ## %bb.1: ## %L1
 ; AVX512DQ-NEXT:    vmovaps %zmm0, (%rdi)
 ; AVX512DQ-NEXT:    vmovaps %zmm1, 64(%rdi)
 ; AVX512DQ-NEXT:    jmp LBB42_3
@@ -2472,27 +2472,27 @@ End:
 
 define <8 x i64> @load_8i1(<8 x i1>* %a) {
 ; KNL-LABEL: load_8i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    movzbl (%rdi), %eax
 ; KNL-NEXT:    kmovw %eax, %k1
 ; KNL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: load_8i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovb (%rdi), %k0
 ; SKX-NEXT:    vpmovm2q %k0, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_8i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    movzbl (%rdi), %eax
 ; AVX512BW-NEXT:    kmovd %eax, %k1
 ; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: load_8i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovb (%rdi), %k0
 ; AVX512DQ-NEXT:    vpmovm2q %k0, %zmm0
 ; AVX512DQ-NEXT:    retq
@@ -2503,25 +2503,25 @@ define <8 x i64> @load_8i1(<8 x i1>* %a)
 
 define <16 x i32> @load_16i1(<16 x i1>* %a) {
 ; KNL-LABEL: load_16i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw (%rdi), %k1
 ; KNL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: load_16i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovw (%rdi), %k0
 ; SKX-NEXT:    vpmovm2d %k0, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_16i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovw (%rdi), %k1
 ; AVX512BW-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: load_16i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw (%rdi), %k0
 ; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
 ; AVX512DQ-NEXT:    retq
@@ -2532,7 +2532,7 @@ define <16 x i32> @load_16i1(<16 x i1>*
 
 define <2 x i16> @load_2i1(<2 x i1>* %a) {
 ; KNL-LABEL: load_2i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    movzbl (%rdi), %eax
 ; KNL-NEXT:    kmovw %eax, %k1
 ; KNL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2541,13 +2541,13 @@ define <2 x i16> @load_2i1(<2 x i1>* %a)
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: load_2i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovb (%rdi), %k0
 ; SKX-NEXT:    vpmovm2q %k0, %xmm0
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_2i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    movzbl (%rdi), %eax
 ; AVX512BW-NEXT:    kmovd %eax, %k1
 ; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2556,7 +2556,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: load_2i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovb (%rdi), %k0
 ; AVX512DQ-NEXT:    vpmovm2q %k0, %zmm0
 ; AVX512DQ-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2569,7 +2569,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a)
 
 define <4 x i16> @load_4i1(<4 x i1>* %a) {
 ; KNL-LABEL: load_4i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    movzbl (%rdi), %eax
 ; KNL-NEXT:    kmovw %eax, %k1
 ; KNL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2579,13 +2579,13 @@ define <4 x i16> @load_4i1(<4 x i1>* %a)
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: load_4i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovb (%rdi), %k0
 ; SKX-NEXT:    vpmovm2d %k0, %xmm0
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_4i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    movzbl (%rdi), %eax
 ; AVX512BW-NEXT:    kmovd %eax, %k1
 ; AVX512BW-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2595,7 +2595,7 @@ define <4 x i16> @load_4i1(<4 x i1>* %a)
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: load_4i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovb (%rdi), %k0
 ; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
 ; AVX512DQ-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2608,7 +2608,7 @@ define <4 x i16> @load_4i1(<4 x i1>* %a)
 
 define <32 x i16> @load_32i1(<32 x i1>* %a) {
 ; KNL-LABEL: load_32i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw (%rdi), %k1
 ; KNL-NEXT:    kmovw 2(%rdi), %k2
 ; KNL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -2618,19 +2618,19 @@ define <32 x i16> @load_32i1(<32 x i1>*
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: load_32i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd (%rdi), %k0
 ; SKX-NEXT:    vpmovm2w %k0, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_32i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd (%rdi), %k0
 ; AVX512BW-NEXT:    vpmovm2w %k0, %zmm0
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: load_32i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw (%rdi), %k0
 ; AVX512DQ-NEXT:    kmovw 2(%rdi), %k1
 ; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
@@ -2645,7 +2645,7 @@ define <32 x i16> @load_32i1(<32 x i1>*
 
 define <64 x i8> @load_64i1(<64 x i1>* %a) {
 ; KNL-LABEL: load_64i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw (%rdi), %k1
 ; KNL-NEXT:    kmovw 2(%rdi), %k2
 ; KNL-NEXT:    kmovw 4(%rdi), %k3
@@ -2663,19 +2663,19 @@ define <64 x i8> @load_64i1(<64 x i1>* %
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: load_64i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovq (%rdi), %k0
 ; SKX-NEXT:    vpmovm2b %k0, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_64i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovq (%rdi), %k0
 ; AVX512BW-NEXT:    vpmovm2b %k0, %zmm0
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: load_64i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw (%rdi), %k0
 ; AVX512DQ-NEXT:    kmovw 2(%rdi), %k1
 ; AVX512DQ-NEXT:    kmovw 4(%rdi), %k2
@@ -2698,7 +2698,7 @@ define <64 x i8> @load_64i1(<64 x i1>* %
 
 define void @store_8i1(<8 x i1>* %a, <8 x i1> %v) {
 ; KNL-LABEL: store_8i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -2708,14 +2708,14 @@ define void @store_8i1(<8 x i1>* %a, <8
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: store_8i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovw2m %xmm0, %k0
 ; SKX-NEXT:    kmovb %k0, (%rdi)
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_8i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpmovw2m %zmm0, %k0
 ; AVX512BW-NEXT:    kmovd %k0, %eax
@@ -2724,7 +2724,7 @@ define void @store_8i1(<8 x i1>* %a, <8
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_8i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; AVX512DQ-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -2737,7 +2737,7 @@ define void @store_8i1(<8 x i1>* %a, <8
 
 define void @store_8i1_1(<8 x i1>* %a, <8 x i16> %v) {
 ; KNL-LABEL: store_8i1_1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -2747,14 +2747,14 @@ define void @store_8i1_1(<8 x i1>* %a, <
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: store_8i1_1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovw2m %xmm0, %k0
 ; SKX-NEXT:    kmovb %k0, (%rdi)
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_8i1_1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpmovw2m %zmm0, %k0
 ; AVX512BW-NEXT:    kmovd %k0, %eax
@@ -2763,7 +2763,7 @@ define void @store_8i1_1(<8 x i1>* %a, <
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_8i1_1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; AVX512DQ-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vptestmq %zmm0, %zmm0, %k0
@@ -2777,7 +2777,7 @@ define void @store_8i1_1(<8 x i1>* %a, <
 
 define void @store_16i1(<16 x i1>* %a, <16 x i1> %v) {
 ; KNL-LABEL: store_16i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -2786,14 +2786,14 @@ define void @store_16i1(<16 x i1>* %a, <
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: store_16i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovb2m %xmm0, %k0
 ; SKX-NEXT:    kmovw %k0, (%rdi)
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_16i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; AVX512BW-NEXT:    vpmovb2m %zmm0, %k0
 ; AVX512BW-NEXT:    kmovw %k0, (%rdi)
@@ -2801,7 +2801,7 @@ define void @store_16i1(<16 x i1>* %a, <
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_16i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; AVX512DQ-NEXT:    vpslld $31, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -2814,7 +2814,7 @@ define void @store_16i1(<16 x i1>* %a, <
 
 define void @store_32i1(<32 x i1>* %a, <32 x i1> %v) {
 ; KNL-LABEL: store_32i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; KNL-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; KNL-NEXT:    vpslld $31, %zmm1, %zmm1
@@ -2828,7 +2828,7 @@ define void @store_32i1(<32 x i1>* %a, <
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: store_32i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %ymm0, %ymm0
 ; SKX-NEXT:    vpmovb2m %ymm0, %k0
 ; SKX-NEXT:    kmovd %k0, (%rdi)
@@ -2836,7 +2836,7 @@ define void @store_32i1(<32 x i1>* %a, <
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_32i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpsllw $7, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vpmovb2m %zmm0, %k0
 ; AVX512BW-NEXT:    kmovd %k0, (%rdi)
@@ -2844,7 +2844,7 @@ define void @store_32i1(<32 x i1>* %a, <
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_32i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; AVX512DQ-NEXT:    vpslld $31, %zmm1, %zmm1
@@ -2862,7 +2862,7 @@ define void @store_32i1(<32 x i1>* %a, <
 
 define void @store_32i1_1(<32 x i1>* %a, <32 x i16> %v) {
 ; KNL-LABEL: store_32i1_1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; KNL-NEXT:    vpmovdb %zmm0, %xmm0
 ; KNL-NEXT:    vpmovsxwd %ymm1, %zmm1
@@ -2879,7 +2879,7 @@ define void @store_32i1_1(<32 x i1>* %a,
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: store_32i1_1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $15, %zmm0, %zmm0
 ; SKX-NEXT:    vpmovw2m %zmm0, %k0
 ; SKX-NEXT:    kmovd %k0, (%rdi)
@@ -2887,7 +2887,7 @@ define void @store_32i1_1(<32 x i1>* %a,
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_32i1_1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpsllw $15, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpmovw2m %zmm0, %k0
 ; AVX512BW-NEXT:    kmovd %k0, (%rdi)
@@ -2895,7 +2895,7 @@ define void @store_32i1_1(<32 x i1>* %a,
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_32i1_1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; AVX512DQ-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512DQ-NEXT:    vpmovsxwd %ymm1, %zmm1
@@ -2919,7 +2919,7 @@ define void @store_32i1_1(<32 x i1>* %a,
 define void @store_64i1(<64 x i1>* %a, <64 x i1> %v) {
 ;
 ; KNL-LABEL: store_64i1:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    pushq %rbp
 ; KNL-NEXT:    .cfi_def_cfa_offset 16
 ; KNL-NEXT:    pushq %r15
@@ -3228,7 +3228,7 @@ define void @store_64i1(<64 x i1>* %a, <
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: store_64i1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %zmm0, %zmm0
 ; SKX-NEXT:    vpmovb2m %zmm0, %k0
 ; SKX-NEXT:    kmovq %k0, (%rdi)
@@ -3236,7 +3236,7 @@ define void @store_64i1(<64 x i1>* %a, <
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_64i1:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpsllw $7, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpmovb2m %zmm0, %k0
 ; AVX512BW-NEXT:    kmovq %k0, (%rdi)
@@ -3244,7 +3244,7 @@ define void @store_64i1(<64 x i1>* %a, <
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_64i1:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    pushq %rbp
 ; AVX512DQ-NEXT:    .cfi_def_cfa_offset 16
 ; AVX512DQ-NEXT:    pushq %r15
@@ -3557,7 +3557,7 @@ define void @store_64i1(<64 x i1>* %a, <
 
 define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
 ; KNL-LABEL: test_bitcast_v8i1_zext:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; KNL-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
@@ -3567,7 +3567,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_bitcast_v8i1_zext:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; SKX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; SKX-NEXT:    kmovb %k0, %eax
@@ -3576,7 +3576,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_bitcast_v8i1_zext:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; AVX512BW-NEXT:    kmovd %k0, %eax
@@ -3586,7 +3586,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test_bitcast_v8i1_zext:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512DQ-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; AVX512DQ-NEXT:    kmovb %k0, %eax
@@ -3603,7 +3603,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x
 
 define i32 @test_bitcast_v16i1_zext(<16 x i32> %a) {
 ; CHECK-LABEL: test_bitcast_v16i1_zext:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
@@ -3619,7 +3619,7 @@ define i32 @test_bitcast_v16i1_zext(<16
 
 define i16 @test_v16i1_add(i16 %x, i16 %y) {
 ; KNL-LABEL: test_v16i1_add:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    kmovw %esi, %k1
 ; KNL-NEXT:    kxorw %k1, %k0, %k0
@@ -3628,7 +3628,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v16i1_add:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    kmovd %esi, %k1
 ; SKX-NEXT:    kxorw %k1, %k0, %k0
@@ -3637,7 +3637,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_v16i1_add:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    kmovd %esi, %k1
 ; AVX512BW-NEXT:    kxorw %k1, %k0, %k0
@@ -3646,7 +3646,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test_v16i1_add:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    kmovw %esi, %k1
 ; AVX512DQ-NEXT:    kxorw %k1, %k0, %k0
@@ -3662,7 +3662,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
 
 define i16 @test_v16i1_sub(i16 %x, i16 %y) {
 ; KNL-LABEL: test_v16i1_sub:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    kmovw %esi, %k1
 ; KNL-NEXT:    kxorw %k1, %k0, %k0
@@ -3671,7 +3671,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v16i1_sub:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    kmovd %esi, %k1
 ; SKX-NEXT:    kxorw %k1, %k0, %k0
@@ -3680,7 +3680,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_v16i1_sub:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    kmovd %esi, %k1
 ; AVX512BW-NEXT:    kxorw %k1, %k0, %k0
@@ -3689,7 +3689,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test_v16i1_sub:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    kmovw %esi, %k1
 ; AVX512DQ-NEXT:    kxorw %k1, %k0, %k0
@@ -3705,7 +3705,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
 
 define i16 @test_v16i1_mul(i16 %x, i16 %y) {
 ; KNL-LABEL: test_v16i1_mul:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    kmovw %esi, %k1
 ; KNL-NEXT:    kandw %k1, %k0, %k0
@@ -3714,7 +3714,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v16i1_mul:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    kmovd %esi, %k1
 ; SKX-NEXT:    kandw %k1, %k0, %k0
@@ -3723,7 +3723,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_v16i1_mul:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    kmovd %esi, %k1
 ; AVX512BW-NEXT:    kandw %k1, %k0, %k0
@@ -3732,7 +3732,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test_v16i1_mul:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    kmovw %esi, %k1
 ; AVX512DQ-NEXT:    kandw %k1, %k0, %k0
@@ -3748,7 +3748,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
 
 define i8 @test_v8i1_add(i8 %x, i8 %y) {
 ; KNL-LABEL: test_v8i1_add:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    kmovw %esi, %k1
 ; KNL-NEXT:    kxorw %k1, %k0, %k0
@@ -3757,7 +3757,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v8i1_add:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    kmovd %esi, %k1
 ; SKX-NEXT:    kxorb %k1, %k0, %k0
@@ -3766,7 +3766,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_v8i1_add:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    kmovd %esi, %k1
 ; AVX512BW-NEXT:    kxorw %k1, %k0, %k0
@@ -3775,7 +3775,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test_v8i1_add:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    kmovw %esi, %k1
 ; AVX512DQ-NEXT:    kxorb %k1, %k0, %k0
@@ -3791,7 +3791,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
 
 define i8 @test_v8i1_sub(i8 %x, i8 %y) {
 ; KNL-LABEL: test_v8i1_sub:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    kmovw %esi, %k1
 ; KNL-NEXT:    kxorw %k1, %k0, %k0
@@ -3800,7 +3800,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v8i1_sub:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    kmovd %esi, %k1
 ; SKX-NEXT:    kxorb %k1, %k0, %k0
@@ -3809,7 +3809,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_v8i1_sub:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    kmovd %esi, %k1
 ; AVX512BW-NEXT:    kxorw %k1, %k0, %k0
@@ -3818,7 +3818,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test_v8i1_sub:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    kmovw %esi, %k1
 ; AVX512DQ-NEXT:    kxorb %k1, %k0, %k0
@@ -3834,7 +3834,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
 
 define i8 @test_v8i1_mul(i8 %x, i8 %y) {
 ; KNL-LABEL: test_v8i1_mul:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k0
 ; KNL-NEXT:    kmovw %esi, %k1
 ; KNL-NEXT:    kandw %k1, %k0, %k0
@@ -3843,7 +3843,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_v8i1_mul:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0
 ; SKX-NEXT:    kmovd %esi, %k1
 ; SKX-NEXT:    kandb %k1, %k0, %k0
@@ -3852,7 +3852,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
 ; SKX-NEXT:    retq
 ;
 ; AVX512BW-LABEL: test_v8i1_mul:
-; AVX512BW:       ## BB#0:
+; AVX512BW:       ## %bb.0:
 ; AVX512BW-NEXT:    kmovd %edi, %k0
 ; AVX512BW-NEXT:    kmovd %esi, %k1
 ; AVX512BW-NEXT:    kandw %k1, %k0, %k0
@@ -3861,7 +3861,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: test_v8i1_mul:
-; AVX512DQ:       ## BB#0:
+; AVX512DQ:       ## %bb.0:
 ; AVX512DQ-NEXT:    kmovw %edi, %k0
 ; AVX512DQ-NEXT:    kmovw %esi, %k1
 ; AVX512DQ-NEXT:    kandb %k1, %k0, %k0

Modified: llvm/trunk/test/CodeGen/X86/avx512-mask-spills.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-mask-spills.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-mask-spills.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-mask-spills.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 declare void @f()
 define <4 x i1> @test_4i1(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test_4i1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    vpcmpnleud %xmm1, %xmm0, %k0
@@ -26,7 +26,7 @@ define <4 x i1> @test_4i1(<4 x i32> %a,
 
 define <8 x i1> @test_8i1(<8 x i32> %a, <8 x i32> %b) {
 ; CHECK-LABEL: test_8i1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    vpcmpnleud %ymm1, %ymm0, %k0
@@ -49,7 +49,7 @@ define <8 x i1> @test_8i1(<8 x i32> %a,
 
 define <16 x i1> @test_16i1(<16 x i32> %a, <16 x i32> %b) {
 ; CHECK-LABEL: test_16i1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
@@ -71,7 +71,7 @@ define <16 x i1> @test_16i1(<16 x i32> %
 
 define <32 x i1> @test_32i1(<32 x i16> %a, <32 x i16> %b) {
 ; CHECK-LABEL: test_32i1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    vpcmpnleuw %zmm1, %zmm0, %k0
@@ -93,7 +93,7 @@ define <32 x i1> @test_32i1(<32 x i16> %
 
 define <64 x i1> @test_64i1(<64 x i8> %a, <64 x i8> %b) {
 ; CHECK-LABEL: test_64i1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    vpcmpnleub %zmm1, %zmm0, %k0

Modified: llvm/trunk/test/CodeGen/X86/avx512-mask-zext-bugfix.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-mask-zext-bugfix.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-mask-zext-bugfix.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-mask-zext-bugfix.ll Mon Dec  4 09:18:51 2017
@@ -16,7 +16,7 @@ declare i32 @check_mask16(i16 zeroext %r
 ; Function Attrs: nounwind uwtable
 define void @test_xmm(i32 %shift, i32 %mulp, <2 x i64> %a,i8* %arraydecay,i8* %fname){
 ; CHECK-LABEL: test_xmm:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    subq $56, %rsp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    movl $2, %esi

Modified: llvm/trunk/test/CodeGen/X86/avx512-masked-memop-64-32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-masked-memop-64-32.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-masked-memop-64-32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-masked-memop-64-32.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <16 x i32> @test1(<16 x i32> %trigger, <16 x i32>* %addr) {
 ; AVX512-LABEL: test1:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
 ; AVX512-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} {z}
@@ -16,7 +16,7 @@ define <16 x i32> @test1(<16 x i32> %tri
 
 define <16 x i32> @test2(<16 x i32> %trigger, <16 x i32>* %addr) {
 ; AVX512-LABEL: test2:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
 ; AVX512-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} {z}
@@ -28,7 +28,7 @@ define <16 x i32> @test2(<16 x i32> %tri
 
 define void @test3(<16 x i32> %trigger, <16 x i32>* %addr, <16 x i32> %val) {
 ; AVX512-LABEL: test3:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpcmpeqd %zmm2, %zmm0, %k1
 ; AVX512-NEXT:    vmovdqu32 %zmm1, (%rdi) {%k1}
@@ -41,7 +41,7 @@ define void @test3(<16 x i32> %trigger,
 
 define <16 x float> @test4(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %dst) {
 ; AVX512-LABEL: test4:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpcmpeqd %zmm2, %zmm0, %k1
 ; AVX512-NEXT:    vblendmps (%rdi), %zmm1, %zmm0 {%k1}
@@ -53,7 +53,7 @@ define <16 x float> @test4(<16 x i32> %t
 
 define void @test13(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %val) {
 ; AVX512-LABEL: test13:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpcmpeqd %zmm2, %zmm0, %k1
 ; AVX512-NEXT:    vmovups %zmm1, (%rdi) {%k1}
@@ -66,7 +66,7 @@ define void @test13(<16 x i32> %trigger,
 
 define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
 ; AVX512-LABEL: one_mask_bit_set5:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm0
 ; AVX512-NEXT:    vmovlps %xmm0, 48(%rdi)
 ; AVX512-NEXT:    vzeroupper
@@ -78,7 +78,7 @@ define void @one_mask_bit_set5(<8 x doub
 define <8 x double> @load_one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
 ;
 ; AVX512-LABEL: load_one_mask_bit_set5:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm1
 ; AVX512-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512-NEXT:    vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
@@ -98,7 +98,7 @@ declare <16 x i32*> @llvm.masked.load.v1
 
 define <16 x i32*> @test23(<16 x i32*> %trigger, <16 x i32*>* %addr) {
 ; AVX512-LABEL: test23:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpcmpeqq %zmm2, %zmm0, %k1
 ; AVX512-NEXT:    vpcmpeqq %zmm2, %zmm1, %k2
@@ -116,7 +116,7 @@ declare <16 x %mystruct*> @llvm.masked.l
 
 define <16 x %mystruct*> @test24(<16 x i1> %mask, <16 x %mystruct*>* %addr) {
 ; AVX512F-LABEL: test24:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; AVX512F-NEXT:    vpslld $31, %zmm0, %zmm0
 ; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k1
@@ -126,7 +126,7 @@ define <16 x %mystruct*> @test24(<16 x i
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test24:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovb2m %xmm0, %k1
 ; SKX-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1} {z}
@@ -139,7 +139,7 @@ define <16 x %mystruct*> @test24(<16 x i
 
 define void @test_store_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64> %src0)  {
 ; AVX512F-LABEL: test_store_16i64:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; AVX512F-NEXT:    vpslld $31, %zmm0, %zmm0
 ; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k1
@@ -150,7 +150,7 @@ define void @test_store_16i64(<16 x i64>
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test_store_16i64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovb2m %xmm0, %k1
 ; SKX-NEXT:    vmovdqu64 %zmm1, (%rdi) {%k1}
@@ -165,7 +165,7 @@ declare void @llvm.masked.store.v16i64.p
 
 define void @test_store_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16 x double> %src0)  {
 ; AVX512F-LABEL: test_store_16f64:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; AVX512F-NEXT:    vpslld $31, %zmm0, %zmm0
 ; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k1
@@ -176,7 +176,7 @@ define void @test_store_16f64(<16 x doub
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test_store_16f64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovb2m %xmm0, %k1
 ; SKX-NEXT:    vmovupd %zmm1, (%rdi) {%k1}
@@ -191,7 +191,7 @@ declare void @llvm.masked.store.v16f64.p
 
 define <16 x i64> @test_load_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64> %src0)  {
 ; AVX512F-LABEL: test_load_16i64:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; AVX512F-NEXT:    vpslld $31, %zmm0, %zmm0
 ; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k1
@@ -201,7 +201,7 @@ define <16 x i64> @test_load_16i64(<16 x
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test_load_16i64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovb2m %xmm0, %k1
 ; SKX-NEXT:    vpblendmq (%rdi), %zmm1, %zmm0 {%k1}
@@ -215,7 +215,7 @@ declare <16 x i64> @llvm.masked.load.v16
 
 define <16 x double> @test_load_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16 x double> %src0)  {
 ; AVX512F-LABEL: test_load_16f64:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; AVX512F-NEXT:    vpslld $31, %zmm0, %zmm0
 ; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k1
@@ -225,7 +225,7 @@ define <16 x double> @test_load_16f64(<1
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test_load_16f64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovb2m %xmm0, %k1
 ; SKX-NEXT:    vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
@@ -239,7 +239,7 @@ declare <16 x double> @llvm.masked.load.
 
 define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 x double> %src0)  {
 ; AVX512F-LABEL: test_load_32f64:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm5
 ; AVX512F-NEXT:    vpmovsxbd %xmm5, %zmm5
 ; AVX512F-NEXT:    vpslld $31, %zmm5, %zmm5
@@ -257,7 +257,7 @@ define <32 x double> @test_load_32f64(<3
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test_load_32f64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $7, %ymm0, %ymm0
 ; SKX-NEXT:    vpmovb2m %ymm0, %k1
 ; SKX-NEXT:    vblendmpd (%rdi), %zmm1, %zmm0 {%k1}

Modified: llvm/trunk/test/CodeGen/X86/avx512-masked_memop-16-8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-masked_memop-16-8.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-masked_memop-16-8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-masked_memop-16-8.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define <16 x i8> @test_mask_load_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
 ; CHECK-LABEL: test_mask_load_16xi8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovb2m %xmm0, %k1
 ; CHECK-NEXT:    vmovdqu8 (%rdi), %xmm0 {%k1} {z}
@@ -17,7 +17,7 @@ declare <16 x i8> @llvm.masked.load.v16i
 
 define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
 ; CHECK-LABEL: test_mask_load_32xi8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $7, %ymm0, %ymm0
 ; CHECK-NEXT:    vpmovb2m %ymm0, %k1
 ; CHECK-NEXT:    vpblendmb (%rdi), %ymm1, %ymm0 {%k1}
@@ -29,7 +29,7 @@ declare <32 x i8> @llvm.masked.load.v32i
 
 define <64 x i8> @test_mask_load_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
 ; CHECK-LABEL: test_mask_load_64xi8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $7, %zmm0, %zmm0
 ; CHECK-NEXT:    vpmovb2m %zmm0, %k1
 ; CHECK-NEXT:    vpblendmb (%rdi), %zmm1, %zmm0 {%k1}
@@ -41,7 +41,7 @@ declare <64 x i8> @llvm.masked.load.v64i
 
 define <8 x i16> @test_mask_load_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
 ; CHECK-LABEL: test_mask_load_8xi16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovw2m %xmm0, %k1
 ; CHECK-NEXT:    vmovdqu16 (%rdi), %xmm0 {%k1} {z}
@@ -53,7 +53,7 @@ declare <8 x i16> @llvm.masked.load.v8i1
 
 define <16 x i16> @test_mask_load_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
 ; CHECK-LABEL: test_mask_load_16xi16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovb2m %xmm0, %k1
 ; CHECK-NEXT:    vmovdqu16 (%rdi), %ymm0 {%k1} {z}
@@ -65,7 +65,7 @@ declare <16 x i16> @llvm.masked.load.v16
 
 define <32 x i16> @test_mask_load_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
 ; CHECK-LABEL: test_mask_load_32xi16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $7, %ymm0, %ymm0
 ; CHECK-NEXT:    vpmovb2m %ymm0, %k1
 ; CHECK-NEXT:    vpblendmw (%rdi), %zmm1, %zmm0 {%k1}
@@ -77,7 +77,7 @@ declare <32 x i16> @llvm.masked.load.v32
 
 define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
 ; CHECK-LABEL: test_mask_store_16xi8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovb2m %xmm0, %k1
 ; CHECK-NEXT:    vmovdqu8 %xmm1, (%rdi) {%k1}
@@ -89,7 +89,7 @@ declare void @llvm.masked.store.v16i8.p0
 
 define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
 ; CHECK-LABEL: test_mask_store_32xi8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $7, %ymm0, %ymm0
 ; CHECK-NEXT:    vpmovb2m %ymm0, %k1
 ; CHECK-NEXT:    vmovdqu8 %ymm1, (%rdi) {%k1}
@@ -102,7 +102,7 @@ declare void @llvm.masked.store.v32i8.p0
 
 define void @test_mask_store_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
 ; CHECK-LABEL: test_mask_store_64xi8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $7, %zmm0, %zmm0
 ; CHECK-NEXT:    vpmovb2m %zmm0, %k1
 ; CHECK-NEXT:    vmovdqu8 %zmm1, (%rdi) {%k1}
@@ -115,7 +115,7 @@ declare void @llvm.masked.store.v64i8.p0
 
 define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
 ; CHECK-LABEL: test_mask_store_8xi16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovw2m %xmm0, %k1
 ; CHECK-NEXT:    vmovdqu16 %xmm1, (%rdi) {%k1}
@@ -127,7 +127,7 @@ declare void @llvm.masked.store.v8i16.p0
 
 define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
 ; CHECK-LABEL: test_mask_store_16xi16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovb2m %xmm0, %k1
 ; CHECK-NEXT:    vmovdqu16 %ymm1, (%rdi) {%k1}
@@ -140,7 +140,7 @@ declare void @llvm.masked.store.v16i16.p
 
 define void @test_mask_store_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
 ; CHECK-LABEL: test_mask_store_32xi16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpsllw $7, %ymm0, %ymm0
 ; CHECK-NEXT:    vpmovb2m %ymm0, %k1
 ; CHECK-NEXT:    vmovdqu16 %zmm1, (%rdi) {%k1}

Modified: llvm/trunk/test/CodeGen/X86/avx512-memfold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-memfold.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-memfold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-memfold.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i8 @test_int_x86_avx512_mask_cmp_ss(<4 x float> %a, float* %b, i8 %mask) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vcmpunordss (%rdi), %xmm0, %k0 {%k1}
 ; CHECK-NEXT:    kmovw %k0, %eax
@@ -21,7 +21,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(
 
 define <4 x float> @test_mask_max_ss(<4 x float> %a, float* %b, i8 %mask) {
 ; CHECK-LABEL: test_mask_max_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -37,7 +37,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float> @test_maskz_add_ss(<4 x float> %a, float* %b, i8 %mask) {
 ; CHECK-LABEL: test_maskz_add_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -56,7 +56,7 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double> @test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, double* %c, i8 %mask){
 ; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -71,7 +71,7 @@ define <2 x double> @test_int_x86_avx512
 ; TODO: We shouldn't fold the load twice here.
 define <4 x float> @test_mask_add_ss_double_use(<4 x float> %a, float* %b, i8 %mask, <4 x float> %c) {
 ; CHECK-LABEL: test_mask_add_ss_double_use:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    kmovw %esi, %k1
 ; CHECK-NEXT:    vaddss %xmm2, %xmm0, %xmm1 {%k1}

Modified: llvm/trunk/test/CodeGen/X86/avx512-mov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-mov.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-mov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-mov.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @test1(float %x) {
 ; CHECK-LABEL: test1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovd %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
    %res = bitcast float %x to i32
@@ -12,7 +12,7 @@ define i32 @test1(float %x) {
 
 define <4 x i32> @test2(i32 %x) {
 ; CHECK-LABEL: test2:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovd %edi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
    %res = insertelement <4 x i32>undef, i32 %x, i32 0
@@ -21,7 +21,7 @@ define <4 x i32> @test2(i32 %x) {
 
 define <2 x i64> @test3(i64 %x) {
 ; CHECK-LABEL: test3:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovq %rdi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
    %res = insertelement <2 x i64>undef, i64 %x, i32 0
@@ -30,7 +30,7 @@ define <2 x i64> @test3(i64 %x) {
 
 define <4 x i32> @test4(i32* %x) {
 ; CHECK-LABEL: test4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
 ; CHECK-NEXT:    ## xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -41,7 +41,7 @@ define <4 x i32> @test4(i32* %x) {
 
 define void @test5(float %x, float* %y) {
 ; CHECK-LABEL: test5:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
    store float %x, float* %y, align 4
@@ -50,7 +50,7 @@ define void @test5(float %x, float* %y)
 
 define void @test6(double %x, double* %y) {
 ; CHECK-LABEL: test6:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovsd %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
    store double %x, double* %y, align 8
@@ -59,7 +59,7 @@ define void @test6(double %x, double* %y
 
 define float @test7(i32* %x) {
 ; CHECK-LABEL: test7:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
 ; CHECK-NEXT:    ## xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -70,7 +70,7 @@ define float @test7(i32* %x) {
 
 define i32 @test8(<4 x i32> %x) {
 ; CHECK-LABEL: test8:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovd %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
    %res = extractelement <4 x i32> %x, i32 0
@@ -79,7 +79,7 @@ define i32 @test8(<4 x i32> %x) {
 
 define i64 @test9(<2 x i64> %x) {
 ; CHECK-LABEL: test9:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovq %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
    %res = extractelement <2 x i64> %x, i32 0
@@ -88,7 +88,7 @@ define i64 @test9(<2 x i64> %x) {
 
 define <4 x i32> @test10(i32* %x) {
 ; CHECK-LABEL: test10:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
 ; CHECK-NEXT:    ## xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -99,7 +99,7 @@ define <4 x i32> @test10(i32* %x) {
 
 define <4 x float> @test11(float* %x) {
 ; CHECK-LABEL: test11:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
 ; CHECK-NEXT:    ## xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -110,7 +110,7 @@ define <4 x float> @test11(float* %x) {
 
 define <2 x double> @test12(double* %x) {
 ; CHECK-LABEL: test12:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovsd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
 ; CHECK-NEXT:    ## xmm0 = mem[0],zero
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -121,7 +121,7 @@ define <2 x double> @test12(double* %x)
 
 define <2 x i64> @test13(i64 %x) {
 ; CHECK-LABEL: test13:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovq %rdi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
    %res = insertelement <2 x i64>zeroinitializer, i64 %x, i32 0
@@ -130,7 +130,7 @@ define <2 x i64> @test13(i64 %x) {
 
 define <4 x i32> @test14(i32 %x) {
 ; CHECK-LABEL: test14:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovd %edi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
    %res = insertelement <4 x i32>zeroinitializer, i32 %x, i32 0
@@ -139,7 +139,7 @@ define <4 x i32> @test14(i32 %x) {
 
 define <4 x i32> @test15(i32* %x) {
 ; CHECK-LABEL: test15:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
 ; CHECK-NEXT:    ## xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
@@ -150,7 +150,7 @@ define <4 x i32> @test15(i32* %x) {
 
 define <16 x i32> @test16(i8 * %addr) {
 ; CHECK-LABEL: test16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovups (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x10,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <16 x i32>*
@@ -160,7 +160,7 @@ define <16 x i32> @test16(i8 * %addr) {
 
 define <16 x i32> @test17(i8 * %addr) {
 ; CHECK-LABEL: test17:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <16 x i32>*
@@ -170,7 +170,7 @@ define <16 x i32> @test17(i8 * %addr) {
 
 define void @test18(i8 * %addr, <8 x i64> %data) {
 ; CHECK-LABEL: test18:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x29,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <8 x i64>*
@@ -180,7 +180,7 @@ define void @test18(i8 * %addr, <8 x i64
 
 define void @test19(i8 * %addr, <16 x i32> %data) {
 ; CHECK-LABEL: test19:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovups %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x11,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <16 x i32>*
@@ -190,7 +190,7 @@ define void @test19(i8 * %addr, <16 x i3
 
 define void @test20(i8 * %addr, <16 x i32> %data) {
 ; CHECK-LABEL: test20:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x29,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <16 x i32>*
@@ -200,7 +200,7 @@ define void @test20(i8 * %addr, <16 x i3
 
 define  <8 x i64> @test21(i8 * %addr) {
 ; CHECK-LABEL: test21:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <8 x i64>*
@@ -210,7 +210,7 @@ define  <8 x i64> @test21(i8 * %addr) {
 
 define void @test22(i8 * %addr, <8 x i64> %data) {
 ; CHECK-LABEL: test22:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovups %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x11,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <8 x i64>*
@@ -220,7 +220,7 @@ define void @test22(i8 * %addr, <8 x i64
 
 define <8 x i64> @test23(i8 * %addr) {
 ; CHECK-LABEL: test23:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovups (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x10,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <8 x i64>*
@@ -230,7 +230,7 @@ define <8 x i64> @test23(i8 * %addr) {
 
 define void @test24(i8 * %addr, <8 x double> %data) {
 ; CHECK-LABEL: test24:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x29,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <8 x double>*
@@ -240,7 +240,7 @@ define void @test24(i8 * %addr, <8 x dou
 
 define <8 x double> @test25(i8 * %addr) {
 ; CHECK-LABEL: test25:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <8 x double>*
@@ -250,7 +250,7 @@ define <8 x double> @test25(i8 * %addr)
 
 define void @test26(i8 * %addr, <16 x float> %data) {
 ; CHECK-LABEL: test26:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x29,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <16 x float>*
@@ -260,7 +260,7 @@ define void @test26(i8 * %addr, <16 x fl
 
 define <16 x float> @test27(i8 * %addr) {
 ; CHECK-LABEL: test27:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <16 x float>*
@@ -270,7 +270,7 @@ define <16 x float> @test27(i8 * %addr)
 
 define void @test28(i8 * %addr, <8 x double> %data) {
 ; CHECK-LABEL: test28:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovups %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x11,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <8 x double>*
@@ -280,7 +280,7 @@ define void @test28(i8 * %addr, <8 x dou
 
 define <8 x double> @test29(i8 * %addr) {
 ; CHECK-LABEL: test29:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovups (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x10,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <8 x double>*
@@ -290,7 +290,7 @@ define <8 x double> @test29(i8 * %addr)
 
 define void @test30(i8 * %addr, <16 x float> %data) {
 ; CHECK-LABEL: test30:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovups %zmm0, (%rdi) ## encoding: [0x62,0xf1,0x7c,0x48,0x11,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <16 x float>*
@@ -300,7 +300,7 @@ define void @test30(i8 * %addr, <16 x fl
 
 define <16 x float> @test31(i8 * %addr) {
 ; CHECK-LABEL: test31:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovups (%rdi), %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x10,0x07]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %vaddr = bitcast i8* %addr to <16 x float>*
@@ -310,7 +310,7 @@ define <16 x float> @test31(i8 * %addr)
 
 define <16 x i32> @test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
 ; CHECK-LABEL: test32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x48,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqa32 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0x6f,0x07]
@@ -324,7 +324,7 @@ define <16 x i32> @test32(i8 * %addr, <1
 
 define <16 x i32> @test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
 ; CHECK-LABEL: test33:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x48,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x49,0x6f,0x07]
@@ -338,7 +338,7 @@ define <16 x i32> @test33(i8 * %addr, <1
 
 define <16 x i32> @test34(i8 * %addr, <16 x i32> %mask1) {
 ; CHECK-LABEL: test34:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x48,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqa32 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x6f,0x07]
@@ -352,7 +352,7 @@ define <16 x i32> @test34(i8 * %addr, <1
 
 define <16 x i32> @test35(i8 * %addr, <16 x i32> %mask1) {
 ; CHECK-LABEL: test35:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x48,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xc9,0x6f,0x07]
@@ -366,7 +366,7 @@ define <16 x i32> @test35(i8 * %addr, <1
 
 define <8 x i64> @test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
 ; CHECK-LABEL: test36:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x48,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x6f,0x07]
@@ -380,7 +380,7 @@ define <8 x i64> @test36(i8 * %addr, <8
 
 define <8 x i64> @test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
 ; CHECK-LABEL: test37:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0xef,0xd2]
 ; CHECK-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x48,0x1f,0xca,0x04]
 ; CHECK-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x49,0x6f,0x07]
@@ -394,7 +394,7 @@ define <8 x i64> @test37(i8 * %addr, <8
 
 define <8 x i64> @test38(i8 * %addr, <8 x i64> %mask1) {
 ; CHECK-LABEL: test38:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqq %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x48,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x6f,0x07]
@@ -408,7 +408,7 @@ define <8 x i64> @test38(i8 * %addr, <8
 
 define <8 x i64> @test39(i8 * %addr, <8 x i64> %mask1) {
 ; CHECK-LABEL: test39:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xef,0xc9]
 ; CHECK-NEXT:    vpcmpneqq %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x48,0x1f,0xc9,0x04]
 ; CHECK-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfe,0xc9,0x6f,0x07]
@@ -422,7 +422,7 @@ define <8 x i64> @test39(i8 * %addr, <8
 
 define <16 x float> @test40(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
 ; CHECK-LABEL: test40:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
 ; CHECK-NEXT:    vcmpneq_oqps %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0x74,0x48,0xc2,0xca,0x0c]
 ; CHECK-NEXT:    vmovaps (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x28,0x07]
@@ -436,7 +436,7 @@ define <16 x float> @test40(i8 * %addr,
 
 define <16 x float> @test41(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
 ; CHECK-LABEL: test41:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
 ; CHECK-NEXT:    vcmpneq_oqps %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0x74,0x48,0xc2,0xca,0x0c]
 ; CHECK-NEXT:    vmovups (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x10,0x07]
@@ -450,7 +450,7 @@ define <16 x float> @test41(i8 * %addr,
 
 define <16 x float> @test42(i8 * %addr, <16 x float> %mask1) {
 ; CHECK-LABEL: test42:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
 ; CHECK-NEXT:    vcmpneq_oqps %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x48,0xc2,0xc9,0x0c]
 ; CHECK-NEXT:    vmovaps (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x28,0x07]
@@ -464,7 +464,7 @@ define <16 x float> @test42(i8 * %addr,
 
 define <16 x float> @test43(i8 * %addr, <16 x float> %mask1) {
 ; CHECK-LABEL: test43:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
 ; CHECK-NEXT:    vcmpneq_oqps %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x48,0xc2,0xc9,0x0c]
 ; CHECK-NEXT:    vmovups (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x10,0x07]
@@ -478,7 +478,7 @@ define <16 x float> @test43(i8 * %addr,
 
 define <8 x double> @test44(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
 ; CHECK-LABEL: test44:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vxorpd %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0x57,0xd2]
 ; CHECK-NEXT:    vcmpneq_oqpd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0xf5,0x48,0xc2,0xca,0x0c]
 ; CHECK-NEXT:    vmovapd (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x28,0x07]
@@ -492,7 +492,7 @@ define <8 x double> @test44(i8 * %addr,
 
 define <8 x double> @test45(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
 ; CHECK-LABEL: test45:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vxorpd %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0x57,0xd2]
 ; CHECK-NEXT:    vcmpneq_oqpd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0xf5,0x48,0xc2,0xca,0x0c]
 ; CHECK-NEXT:    vmovupd (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x10,0x07]
@@ -506,7 +506,7 @@ define <8 x double> @test45(i8 * %addr,
 
 define <8 x double> @test46(i8 * %addr, <8 x double> %mask1) {
 ; CHECK-LABEL: test46:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x57,0xc9]
 ; CHECK-NEXT:    vcmpneq_oqpd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0xfd,0x48,0xc2,0xc9,0x0c]
 ; CHECK-NEXT:    vmovapd (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x28,0x07]
@@ -520,7 +520,7 @@ define <8 x double> @test46(i8 * %addr,
 
 define <8 x double> @test47(i8 * %addr, <8 x double> %mask1) {
 ; CHECK-LABEL: test47:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x57,0xc9]
 ; CHECK-NEXT:    vcmpneq_oqpd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0xfd,0x48,0xc2,0xc9,0x0c]
 ; CHECK-NEXT:    vmovupd (%rdi), %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x10,0x07]

Modified: llvm/trunk/test/CodeGen/X86/avx512-pmovxrm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-pmovxrm.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-pmovxrm.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-pmovxrm.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define <32 x i16> @test_llvm_x86_avx512_pmovsxbw(<32 x i8>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovsxbw:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovsxbw (%eax), %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovsxbw:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovsxbw (%rdi), %zmm0
 ; X64-NEXT:    retq
   %1 = load <32 x i8>, <32 x i8>* %a, align 1
@@ -20,13 +20,13 @@ define <32 x i16> @test_llvm_x86_avx512_
 
 define <16 x i32> @test_llvm_x86_avx512_pmovsxbd(<16 x i8>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovsxbd:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovsxbd (%eax), %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovsxbd:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovsxbd (%rdi), %zmm0
 ; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -36,13 +36,13 @@ define <16 x i32> @test_llvm_x86_avx512_
 
 define <8 x i64> @test_llvm_x86_avx512_pmovsxbq(<16 x i8>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovsxbq:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovsxbq (%eax), %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovsxbq:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovsxbq (%rdi), %zmm0
 ; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -53,13 +53,13 @@ define <8 x i64> @test_llvm_x86_avx512_p
 
 define <16 x i32> @test_llvm_x86_avx512_pmovsxwd(<16 x i16>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovsxwd:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovsxwd (%eax), %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovsxwd:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovsxwd (%rdi), %zmm0
 ; X64-NEXT:    retq
   %1 = load <16 x i16>, <16 x i16>* %a, align 1
@@ -69,13 +69,13 @@ define <16 x i32> @test_llvm_x86_avx512_
 
 define <8 x i64> @test_llvm_x86_avx512_pmovsxwq(<8 x i16>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovsxwq:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovsxwq (%eax), %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovsxwq:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovsxwq (%rdi), %zmm0
 ; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -85,13 +85,13 @@ define <8 x i64> @test_llvm_x86_avx512_p
 
 define <8 x i64> @test_llvm_x86_avx512_pmovsxdq(<8 x i32>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovsxdq:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovsxdq (%eax), %zmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovsxdq:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovsxdq (%rdi), %zmm0
 ; X64-NEXT:    retq
   %1 = load <8 x i32>, <8 x i32>* %a, align 1
@@ -101,13 +101,13 @@ define <8 x i64> @test_llvm_x86_avx512_p
 
 define <32 x i16> @test_llvm_x86_avx512_pmovzxbw(<32 x i8>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovzxbw:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovzxbw {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero,mem[16],zero,mem[17],zero,mem[18],zero,mem[19],zero,mem[20],zero,mem[21],zero,mem[22],zero,mem[23],zero,mem[24],zero,mem[25],zero,mem[26],zero,mem[27],zero,mem[28],zero,mem[29],zero,mem[30],zero,mem[31],zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovzxbw:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovzxbw {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero,mem[16],zero,mem[17],zero,mem[18],zero,mem[19],zero,mem[20],zero,mem[21],zero,mem[22],zero,mem[23],zero,mem[24],zero,mem[25],zero,mem[26],zero,mem[27],zero,mem[28],zero,mem[29],zero,mem[30],zero,mem[31],zero
 ; X64-NEXT:    retq
   %1 = load <32 x i8>, <32 x i8>* %a, align 1
@@ -117,13 +117,13 @@ define <32 x i16> @test_llvm_x86_avx512_
 
 define <16 x i32> @test_llvm_x86_avx512_pmovzxbd(<16 x i8>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovzxbd:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovzxbd {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovzxbd:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovzxbd {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
 ; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -133,13 +133,13 @@ define <16 x i32> @test_llvm_x86_avx512_
 
 define <8 x i64> @test_llvm_x86_avx512_pmovzxbq(<16 x i8>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovzxbq:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovzxbq:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
 ; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -150,13 +150,13 @@ define <8 x i64> @test_llvm_x86_avx512_p
 
 define <16 x i32> @test_llvm_x86_avx512_pmovzxwd(<16 x i16>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovzxwd:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovzxwd:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; X64-NEXT:    retq
   %1 = load <16 x i16>, <16 x i16>* %a, align 1
@@ -166,13 +166,13 @@ define <16 x i32> @test_llvm_x86_avx512_
 
 define <8 x i64> @test_llvm_x86_avx512_pmovzxwq(<8 x i16>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovzxwq:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovzxwq {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovzxwq:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovzxwq {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -182,13 +182,13 @@ define <8 x i64> @test_llvm_x86_avx512_p
 
 define <8 x i64> @test_llvm_x86_avx512_pmovzxdq(<8 x i32>* %a) {
 ; X32-LABEL: test_llvm_x86_avx512_pmovzxdq:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpmovzxdq {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx512_pmovzxdq:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpmovzxdq {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; X64-NEXT:    retq
   %1 = load <8 x i32>, <8 x i32>* %a, align 1

Modified: llvm/trunk/test/CodeGen/X86/avx512-regcall-Mask.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-regcall-Mask.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-regcall-Mask.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-regcall-Mask.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 ; Test regcall when receiving arguments of v64i1 type
 define x86_regcallcc i64 @test_argv64i1(<64 x i1> %x0, <64 x i1> %x1, <64 x i1> %x2, <64 x i1> %x3, <64 x i1> %x4, <64 x i1> %x5, <64 x i1> %x6, <64 x i1> %x7, <64 x i1> %x8, <64 x i1> %x9, <64 x i1> %x10, <64 x i1> %x11, <64 x i1> %x12)  {
 ; X32-LABEL: test_argv64i1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-8, %esp
@@ -50,7 +50,7 @@ define x86_regcallcc i64 @test_argv64i1(
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argv64i1:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    addq %rcx, %rax
 ; WIN64-NEXT:    addq %rdx, %rax
 ; WIN64-NEXT:    addq %rdi, %rax
@@ -66,7 +66,7 @@ define x86_regcallcc i64 @test_argv64i1(
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argv64i1:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    addq %rcx, %rax
 ; LINUXOSX64-NEXT:    addq %rdx, %rax
 ; LINUXOSX64-NEXT:    addq %rdi, %rax
@@ -111,7 +111,7 @@ define x86_regcallcc i64 @test_argv64i1(
 ; Test regcall when passing arguments of v64i1 type
 define i64 @caller_argv64i1() #0 {
 ; X32-LABEL: caller_argv64i1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    pushl %edi
 ; X32-NEXT:    subl $88, %esp
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [2,1,2,1]
@@ -132,7 +132,7 @@ define i64 @caller_argv64i1() #0 {
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: caller_argv64i1:
-; WIN64:       # BB#0: # %entry
+; WIN64:       # %bb.0: # %entry
 ; WIN64-NEXT:    pushq %r15
 ; WIN64-NEXT:    .seh_pushreg 15
 ; WIN64-NEXT:    pushq %r14
@@ -178,7 +178,7 @@ define i64 @caller_argv64i1() #0 {
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: caller_argv64i1:
-; LINUXOSX64:       # BB#0: # %entry
+; LINUXOSX64:       # %bb.0: # %entry
 ; LINUXOSX64-NEXT:    pushq %r15
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    pushq %r14
@@ -229,13 +229,13 @@ entry:
 ; Test regcall when returning v64i1 type
 define x86_regcallcc <64 x i1> @test_retv64i1()  {
 ; X32-LABEL: test_retv64i1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl $2, %eax
 ; X32-NEXT:    movl $1, %ecx
 ; X32-NEXT:    retl
 ;
 ; CHECK64-LABEL: test_retv64i1:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movabsq $4294967298, %rax # imm = 0x100000002
 ; CHECK64-NEXT:    retq
   %a = bitcast i64 4294967298 to <64 x i1>
@@ -245,7 +245,7 @@ define x86_regcallcc <64 x i1> @test_ret
 ; Test regcall when processing result of v64i1 type
 define <64 x i1> @caller_retv64i1() #0 {
 ; X32-LABEL: caller_retv64i1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    calll _test_retv64i1
 ; X32-NEXT:    kmovd %eax, %k0
 ; X32-NEXT:    kmovd %ecx, %k1
@@ -254,7 +254,7 @@ define <64 x i1> @caller_retv64i1() #0 {
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: caller_retv64i1:
-; WIN64:       # BB#0: # %entry
+; WIN64:       # %bb.0: # %entry
 ; WIN64-NEXT:    pushq %rsi
 ; WIN64-NEXT:    .seh_pushreg 6
 ; WIN64-NEXT:    pushq %rdi
@@ -280,7 +280,7 @@ define <64 x i1> @caller_retv64i1() #0 {
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: caller_retv64i1:
-; LINUXOSX64:       # BB#0: # %entry
+; LINUXOSX64:       # %bb.0: # %entry
 ; LINUXOSX64-NEXT:    pushq %rax
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    callq test_retv64i1
@@ -297,7 +297,7 @@ entry:
 declare i32 @test_argv32i1helper(<32 x i1> %x0, <32 x i1> %x1, <32 x i1> %x2)
 define x86_regcallcc i32 @test_argv32i1(<32 x i1> %x0, <32 x i1> %x1, <32 x i1> %x2)  {
 ; X32-LABEL: test_argv32i1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    subl $72, %esp
 ; X32-NEXT:    vmovups %xmm7, {{[0-9]+}}(%esp) # 16-byte Spill
@@ -324,7 +324,7 @@ define x86_regcallcc i32 @test_argv32i1(
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argv32i1:
-; WIN64:       # BB#0: # %entry
+; WIN64:       # %bb.0: # %entry
 ; WIN64-NEXT:    pushq %r11
 ; WIN64-NEXT:    .seh_pushreg 11
 ; WIN64-NEXT:    pushq %r10
@@ -356,7 +356,7 @@ define x86_regcallcc i32 @test_argv32i1(
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_argv32i1:
-; LINUXOSX64:       # BB#0: # %entry
+; LINUXOSX64:       # %bb.0: # %entry
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    subq $128, %rsp
@@ -408,7 +408,7 @@ entry:
 ; Test regcall when passing arguments of v32i1 type
 define i32 @caller_argv32i1() #0 {
 ; X32-LABEL: caller_argv32i1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl $1, %eax
 ; X32-NEXT:    movl $1, %ecx
 ; X32-NEXT:    movl $1, %edx
@@ -416,7 +416,7 @@ define i32 @caller_argv32i1() #0 {
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: caller_argv32i1:
-; WIN64:       # BB#0: # %entry
+; WIN64:       # %bb.0: # %entry
 ; WIN64-NEXT:    pushq %rsi
 ; WIN64-NEXT:    .seh_pushreg 6
 ; WIN64-NEXT:    pushq %rdi
@@ -443,7 +443,7 @@ define i32 @caller_argv32i1() #0 {
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: caller_argv32i1:
-; LINUXOSX64:       # BB#0: # %entry
+; LINUXOSX64:       # %bb.0: # %entry
 ; LINUXOSX64-NEXT:    pushq %rax
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    movl $1, %eax
@@ -461,12 +461,12 @@ entry:
 ; Test regcall when returning v32i1 type
 define x86_regcallcc <32 x i1> @test_retv32i1()  {
 ; X32-LABEL: test_retv32i1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl $1, %eax
 ; X32-NEXT:    retl
 ;
 ; CHECK64-LABEL: test_retv32i1:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl $1, %eax
 ; CHECK64-NEXT:    retq
   %a = bitcast i32 1 to <32 x i1>
@@ -476,13 +476,13 @@ define x86_regcallcc <32 x i1> @test_ret
 ; Test regcall when processing result of v32i1 type
 define i32 @caller_retv32i1() #0 {
 ; X32-LABEL: caller_retv32i1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    calll _test_retv32i1
 ; X32-NEXT:    incl %eax
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: caller_retv32i1:
-; WIN64:       # BB#0: # %entry
+; WIN64:       # %bb.0: # %entry
 ; WIN64-NEXT:    pushq %rsi
 ; WIN64-NEXT:    .seh_pushreg 6
 ; WIN64-NEXT:    pushq %rdi
@@ -507,7 +507,7 @@ define i32 @caller_retv32i1() #0 {
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: caller_retv32i1:
-; LINUXOSX64:       # BB#0: # %entry
+; LINUXOSX64:       # %bb.0: # %entry
 ; LINUXOSX64-NEXT:    pushq %rax
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    callq test_retv32i1
@@ -525,7 +525,7 @@ entry:
 declare i16 @test_argv16i1helper(<16 x i1> %x0, <16 x i1> %x1, <16 x i1> %x2)
 define x86_regcallcc i16 @test_argv16i1(<16 x i1> %x0, <16 x i1> %x1, <16 x i1> %x2)  {
 ; X32-LABEL: test_argv16i1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    subl $72, %esp
 ; X32-NEXT:    vmovups %xmm7, {{[0-9]+}}(%esp) # 16-byte Spill
@@ -552,7 +552,7 @@ define x86_regcallcc i16 @test_argv16i1(
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argv16i1:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %r11
 ; WIN64-NEXT:    .seh_pushreg 11
 ; WIN64-NEXT:    pushq %r10
@@ -584,7 +584,7 @@ define x86_regcallcc i16 @test_argv16i1(
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_argv16i1:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    subq $128, %rsp
@@ -635,7 +635,7 @@ define x86_regcallcc i16 @test_argv16i1(
 ; Test regcall when passing arguments of v16i1 type
 define i16 @caller_argv16i1() #0 {
 ; X32-LABEL: caller_argv16i1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl $1, %eax
 ; X32-NEXT:    movl $1, %ecx
 ; X32-NEXT:    movl $1, %edx
@@ -643,7 +643,7 @@ define i16 @caller_argv16i1() #0 {
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: caller_argv16i1:
-; WIN64:       # BB#0: # %entry
+; WIN64:       # %bb.0: # %entry
 ; WIN64-NEXT:    pushq %rsi
 ; WIN64-NEXT:    .seh_pushreg 6
 ; WIN64-NEXT:    pushq %rdi
@@ -670,7 +670,7 @@ define i16 @caller_argv16i1() #0 {
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: caller_argv16i1:
-; LINUXOSX64:       # BB#0: # %entry
+; LINUXOSX64:       # %bb.0: # %entry
 ; LINUXOSX64-NEXT:    pushq %rax
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    movl $1, %eax
@@ -688,12 +688,12 @@ entry:
 ; Test regcall when returning v16i1 type
 define x86_regcallcc <16 x i1> @test_retv16i1()  {
 ; X32-LABEL: test_retv16i1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movw $1, %ax
 ; X32-NEXT:    retl
 ;
 ; CHECK64-LABEL: test_retv16i1:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movw $1, %ax
 ; CHECK64-NEXT:    retq
   %a = bitcast i16 1 to <16 x i1>
@@ -703,7 +703,7 @@ define x86_regcallcc <16 x i1> @test_ret
 ; Test regcall when processing result of v16i1 type
 define i16 @caller_retv16i1() #0 {
 ; X32-LABEL: caller_retv16i1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    calll _test_retv16i1
 ; X32-NEXT:    # kill: %ax<def> %ax<kill> %eax<def>
 ; X32-NEXT:    incl %eax
@@ -711,7 +711,7 @@ define i16 @caller_retv16i1() #0 {
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: caller_retv16i1:
-; WIN64:       # BB#0: # %entry
+; WIN64:       # %bb.0: # %entry
 ; WIN64-NEXT:    pushq %rsi
 ; WIN64-NEXT:    .seh_pushreg 6
 ; WIN64-NEXT:    pushq %rdi
@@ -738,7 +738,7 @@ define i16 @caller_retv16i1() #0 {
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: caller_retv16i1:
-; LINUXOSX64:       # BB#0: # %entry
+; LINUXOSX64:       # %bb.0: # %entry
 ; LINUXOSX64-NEXT:    pushq %rax
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    callq test_retv16i1
@@ -758,7 +758,7 @@ entry:
 declare i8 @test_argv8i1helper(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2)
 define x86_regcallcc i8 @test_argv8i1(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2)  {
 ; X32-LABEL: test_argv8i1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    subl $72, %esp
 ; X32-NEXT:    vmovups %xmm7, {{[0-9]+}}(%esp) # 16-byte Spill
@@ -785,7 +785,7 @@ define x86_regcallcc i8 @test_argv8i1(<8
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argv8i1:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %r11
 ; WIN64-NEXT:    .seh_pushreg 11
 ; WIN64-NEXT:    pushq %r10
@@ -817,7 +817,7 @@ define x86_regcallcc i8 @test_argv8i1(<8
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_argv8i1:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    subq $128, %rsp
@@ -868,7 +868,7 @@ define x86_regcallcc i8 @test_argv8i1(<8
 ; Test regcall when passing arguments of v8i1 type
 define i8 @caller_argv8i1() #0 {
 ; X32-LABEL: caller_argv8i1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl $1, %eax
 ; X32-NEXT:    movl $1, %ecx
 ; X32-NEXT:    movl $1, %edx
@@ -876,7 +876,7 @@ define i8 @caller_argv8i1() #0 {
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: caller_argv8i1:
-; WIN64:       # BB#0: # %entry
+; WIN64:       # %bb.0: # %entry
 ; WIN64-NEXT:    pushq %rsi
 ; WIN64-NEXT:    .seh_pushreg 6
 ; WIN64-NEXT:    pushq %rdi
@@ -903,7 +903,7 @@ define i8 @caller_argv8i1() #0 {
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: caller_argv8i1:
-; LINUXOSX64:       # BB#0: # %entry
+; LINUXOSX64:       # %bb.0: # %entry
 ; LINUXOSX64-NEXT:    pushq %rax
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    movl $1, %eax
@@ -921,12 +921,12 @@ entry:
 ; Test regcall when returning v8i1 type
 define x86_regcallcc <8 x i1> @test_retv8i1()  {
 ; X32-LABEL: test_retv8i1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb $1, %al
 ; X32-NEXT:    retl
 ;
 ; CHECK64-LABEL: test_retv8i1:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movb $1, %al
 ; CHECK64-NEXT:    retq
   %a = bitcast i8 1 to <8 x i1>
@@ -936,7 +936,7 @@ define x86_regcallcc <8 x i1> @test_retv
 ; Test regcall when processing result of v8i1 type
 define <8 x i1> @caller_retv8i1() #0 {
 ; X32-LABEL: caller_retv8i1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    calll _test_retv8i1
 ; X32-NEXT:    # kill: %al<def> %al<kill> %eax<def>
 ; X32-NEXT:    kmovd %eax, %k0
@@ -946,7 +946,7 @@ define <8 x i1> @caller_retv8i1() #0 {
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: caller_retv8i1:
-; WIN64:       # BB#0: # %entry
+; WIN64:       # %bb.0: # %entry
 ; WIN64-NEXT:    pushq %rsi
 ; WIN64-NEXT:    .seh_pushreg 6
 ; WIN64-NEXT:    pushq %rdi
@@ -975,7 +975,7 @@ define <8 x i1> @caller_retv8i1() #0 {
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: caller_retv8i1:
-; LINUXOSX64:       # BB#0: # %entry
+; LINUXOSX64:       # %bb.0: # %entry
 ; LINUXOSX64-NEXT:    pushq %rax
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    callq test_retv8i1

Modified: llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll Mon Dec  4 09:18:51 2017
@@ -6,19 +6,19 @@
 ; Test regcall when receiving/returning i1
 define x86_regcallcc i1 @test_argReti1(i1 %a)  {
 ; X32-LABEL: test_argReti1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    incb %al
 ; X32-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argReti1:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    incb %al
 ; WIN64-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argReti1:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    incb %al
 ; LINUXOSX64-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; LINUXOSX64-NEXT:    retq
@@ -29,7 +29,7 @@ define x86_regcallcc i1 @test_argReti1(i
 ; Test regcall when passing/retrieving i1
 define x86_regcallcc i1 @test_CallargReti1(i1 %a)  {
 ; X32-LABEL: test_CallargReti1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    incb %al
 ; X32-NEXT:    movzbl %al, %eax
@@ -39,7 +39,7 @@ define x86_regcallcc i1 @test_CallargRet
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargReti1:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    .seh_endprologue
@@ -54,7 +54,7 @@ define x86_regcallcc i1 @test_CallargRet
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargReti1:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    .cfi_offset %rsp, -16
@@ -73,19 +73,19 @@ define x86_regcallcc i1 @test_CallargRet
 ; Test regcall when receiving/returning i8
 define x86_regcallcc i8 @test_argReti8(i8 %a)  {
 ; X32-LABEL: test_argReti8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    incb %al
 ; X32-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argReti8:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    incb %al
 ; WIN64-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argReti8:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    incb %al
 ; LINUXOSX64-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
 ; LINUXOSX64-NEXT:    retq
@@ -96,7 +96,7 @@ define x86_regcallcc i8 @test_argReti8(i
 ; Test regcall when passing/retrieving i8
 define x86_regcallcc i8 @test_CallargReti8(i8 %a)  {
 ; X32-LABEL: test_CallargReti8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    incb %al
 ; X32-NEXT:    movzbl %al, %eax
@@ -106,7 +106,7 @@ define x86_regcallcc i8 @test_CallargRet
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargReti8:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    .seh_endprologue
@@ -121,7 +121,7 @@ define x86_regcallcc i8 @test_CallargRet
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargReti8:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    .cfi_offset %rsp, -16
@@ -140,19 +140,19 @@ define x86_regcallcc i8 @test_CallargRet
 ; Test regcall when receiving/returning i16
 define x86_regcallcc i16 @test_argReti16(i16 %a)  {
 ; X32-LABEL: test_argReti16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    incl %eax
 ; X32-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argReti16:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    incl %eax
 ; WIN64-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argReti16:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    incl %eax
 ; LINUXOSX64-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
 ; LINUXOSX64-NEXT:    retq
@@ -163,7 +163,7 @@ define x86_regcallcc i16 @test_argReti16
 ; Test regcall when passing/retrieving i16
 define x86_regcallcc i16 @test_CallargReti16(i16 %a)  {
 ; X32-LABEL: test_CallargReti16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    incl %eax
 ; X32-NEXT:    calll _test_argReti16
@@ -174,7 +174,7 @@ define x86_regcallcc i16 @test_CallargRe
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargReti16:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    .seh_endprologue
@@ -190,7 +190,7 @@ define x86_regcallcc i16 @test_CallargRe
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargReti16:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    .cfi_offset %rsp, -16
@@ -210,17 +210,17 @@ define x86_regcallcc i16 @test_CallargRe
 ; Test regcall when receiving/returning i32
 define x86_regcallcc i32 @test_argReti32(i32 %a)  {
 ; X32-LABEL: test_argReti32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    incl %eax
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argReti32:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    incl %eax
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argReti32:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    incl %eax
 ; LINUXOSX64-NEXT:    retq
   %add = add i32 %a, 1
@@ -230,7 +230,7 @@ define x86_regcallcc i32 @test_argReti32
 ; Test regcall when passing/retrieving i32
 define x86_regcallcc i32 @test_CallargReti32(i32 %a)  {
 ; X32-LABEL: test_CallargReti32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    incl %eax
 ; X32-NEXT:    calll _test_argReti32
@@ -239,7 +239,7 @@ define x86_regcallcc i32 @test_CallargRe
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargReti32:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    .seh_endprologue
@@ -253,7 +253,7 @@ define x86_regcallcc i32 @test_CallargRe
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargReti32:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    .cfi_offset %rsp, -16
@@ -271,19 +271,19 @@ define x86_regcallcc i32 @test_CallargRe
 ; Test regcall when receiving/returning i64
 define x86_regcallcc i64 @test_argReti64(i64 %a)  {
 ; X32-LABEL: test_argReti64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    addl $3, %eax
 ; X32-NEXT:    adcl $1, %ecx
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argReti64:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    movabsq $4294967299, %rcx # imm = 0x100000003
 ; WIN64-NEXT:    addq %rcx, %rax
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argReti64:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    movabsq $4294967299, %rcx # imm = 0x100000003
 ; LINUXOSX64-NEXT:    addq %rcx, %rax
 ; LINUXOSX64-NEXT:    retq
@@ -294,7 +294,7 @@ define x86_regcallcc i64 @test_argReti64
 ; Test regcall when passing/retrieving i64
 define x86_regcallcc i64 @test_CallargReti64(i64 %a)  {
 ; X32-LABEL: test_CallargReti64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    addl $1, %eax
 ; X32-NEXT:    adcl $0, %ecx
@@ -305,7 +305,7 @@ define x86_regcallcc i64 @test_CallargRe
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargReti64:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    .seh_endprologue
@@ -319,7 +319,7 @@ define x86_regcallcc i64 @test_CallargRe
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargReti64:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    .cfi_offset %rsp, -16
@@ -337,17 +337,17 @@ define x86_regcallcc i64 @test_CallargRe
 ; Test regcall when receiving/returning float
 define x86_regcallcc float @test_argRetFloat(float %a)  {
 ; X32-LABEL: test_argRetFloat:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vaddss __real at 3f800000, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argRetFloat:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    vaddss __real@{{.*}}(%rip), %xmm0, %xmm0
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argRetFloat:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    vaddss {{.*}}(%rip), %xmm0, %xmm0
 ; LINUXOSX64-NEXT:    retq
   %add = fadd float 1.0, %a
@@ -357,7 +357,7 @@ define x86_regcallcc float @test_argRetF
 ; Test regcall when passing/retrieving float
 define x86_regcallcc float @test_CallargRetFloat(float %a)  {
 ; X32-LABEL: test_CallargRetFloat:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    subl $24, %esp
 ; X32-NEXT:    vmovups %xmm4, (%esp) # 16-byte Spill
@@ -371,7 +371,7 @@ define x86_regcallcc float @test_Callarg
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargRetFloat:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    subq $16, %rsp
@@ -392,7 +392,7 @@ define x86_regcallcc float @test_Callarg
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargRetFloat:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    subq $16, %rsp
@@ -417,17 +417,17 @@ define x86_regcallcc float @test_Callarg
 ; Test regcall when receiving/returning double
 define x86_regcallcc double @test_argRetDouble(double %a)  {
 ; X32-LABEL: test_argRetDouble:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vaddsd __real at 3ff0000000000000, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argRetDouble:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    vaddsd __real@{{.*}}(%rip), %xmm0, %xmm0
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argRetDouble:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    vaddsd {{.*}}(%rip), %xmm0, %xmm0
 ; LINUXOSX64-NEXT:    retq
   %add = fadd double %a, 1.0
@@ -437,7 +437,7 @@ define x86_regcallcc double @test_argRet
 ; Test regcall when passing/retrieving double
 define x86_regcallcc double @test_CallargRetDouble(double %a)  {
 ; X32-LABEL: test_CallargRetDouble:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    subl $24, %esp
 ; X32-NEXT:    vmovups %xmm4, (%esp) # 16-byte Spill
@@ -451,7 +451,7 @@ define x86_regcallcc double @test_Callar
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargRetDouble:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    subq $16, %rsp
@@ -472,7 +472,7 @@ define x86_regcallcc double @test_Callar
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargRetDouble:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    subq $16, %rsp
@@ -497,17 +497,17 @@ define x86_regcallcc double @test_Callar
 ; Test regcall when receiving/returning long double
 define x86_regcallcc x86_fp80 @test_argRetf80(x86_fp80 %a0) nounwind {
 ; X32-LABEL: test_argRetf80:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    fadd %st(0), %st(0)
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argRetf80:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    fadd %st(0), %st(0)
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argRetf80:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    fadd %st(0), %st(0)
 ; LINUXOSX64-NEXT:    retq
   %r0 = fadd x86_fp80 %a0, %a0
@@ -517,7 +517,7 @@ define x86_regcallcc x86_fp80 @test_argR
 ; Test regcall when passing/retrieving long double
 define x86_regcallcc x86_fp80 @test_CallargRetf80(x86_fp80 %a)  {
 ; X32-LABEL: test_CallargRetf80:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    fadd %st(0), %st(0)
 ; X32-NEXT:    calll _test_argRetf80
@@ -526,7 +526,7 @@ define x86_regcallcc x86_fp80 @test_Call
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargRetf80:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    .seh_endprologue
@@ -540,7 +540,7 @@ define x86_regcallcc x86_fp80 @test_Call
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargRetf80:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    .cfi_offset %rsp, -16
@@ -558,17 +558,17 @@ define x86_regcallcc x86_fp80 @test_Call
 ; Test regcall when receiving/returning pointer
 define x86_regcallcc [4 x i32]* @test_argRetPointer([4 x i32]* %a)  {
 ; X32-LABEL: test_argRetPointer:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    incl %eax
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argRetPointer:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    incl %eax
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argRetPointer:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    incl %eax
 ; LINUXOSX64-NEXT:    retq
   %b = ptrtoint [4 x i32]* %a to i32
@@ -580,7 +580,7 @@ define x86_regcallcc [4 x i32]* @test_ar
 ; Test regcall when passing/retrieving pointer
 define x86_regcallcc [4 x i32]* @test_CallargRetPointer([4 x i32]* %a)  {
 ; X32-LABEL: test_CallargRetPointer:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    incl %eax
 ; X32-NEXT:    calll _test_argRetPointer
@@ -589,7 +589,7 @@ define x86_regcallcc [4 x i32]* @test_Ca
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargRetPointer:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    .seh_endprologue
@@ -603,7 +603,7 @@ define x86_regcallcc [4 x i32]* @test_Ca
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargRetPointer:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    .cfi_offset %rsp, -16
@@ -625,17 +625,17 @@ define x86_regcallcc [4 x i32]* @test_Ca
 ; Test regcall when receiving/returning 128 bit vector
 define x86_regcallcc <4 x i32> @test_argRet128Vector(<4 x i32> %a, <4 x i32> %b)  {
 ; X32-LABEL: test_argRet128Vector:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argRet128Vector:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argRet128Vector:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
 ; LINUXOSX64-NEXT:    retq
   %d = select <4 x i1> undef , <4 x i32> %a, <4 x i32> %b
@@ -645,7 +645,7 @@ define x86_regcallcc <4 x i32> @test_arg
 ; Test regcall when passing/retrieving 128 bit vector
 define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a)  {
 ; X32-LABEL: test_CallargRet128Vector:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    subl $24, %esp
 ; X32-NEXT:    vmovups %xmm4, (%esp) # 16-byte Spill
@@ -659,7 +659,7 @@ define x86_regcallcc <4 x i32> @test_Cal
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargRet128Vector:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    subq $16, %rsp
@@ -680,7 +680,7 @@ define x86_regcallcc <4 x i32> @test_Cal
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargRet128Vector:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    subq $16, %rsp
@@ -704,17 +704,17 @@ define x86_regcallcc <4 x i32> @test_Cal
 ; Test regcall when receiving/returning 256 bit vector
 define x86_regcallcc <8 x i32> @test_argRet256Vector(<8 x i32> %a, <8 x i32> %b)  {
 ; X32-LABEL: test_argRet256Vector:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argRet256Vector:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argRet256Vector:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
 ; LINUXOSX64-NEXT:    retq
   %d = select <8 x i1> undef , <8 x i32> %a, <8 x i32> %b
@@ -724,7 +724,7 @@ define x86_regcallcc <8 x i32> @test_arg
 ; Test regcall when passing/retrieving 256 bit vector
 define x86_regcallcc <8 x i32> @test_CallargRet256Vector(<8 x i32> %a)  {
 ; X32-LABEL: test_CallargRet256Vector:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    subl $56, %esp
 ; X32-NEXT:    vmovdqu %ymm0, (%esp) # 32-byte Spill
@@ -737,7 +737,7 @@ define x86_regcallcc <8 x i32> @test_Cal
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargRet256Vector:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    subq $48, %rsp
@@ -756,7 +756,7 @@ define x86_regcallcc <8 x i32> @test_Cal
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargRet256Vector:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    subq $48, %rsp
@@ -778,17 +778,17 @@ define x86_regcallcc <8 x i32> @test_Cal
 ; Test regcall when receiving/returning 512 bit vector
 define x86_regcallcc <16 x i32> @test_argRet512Vector(<16 x i32> %a, <16 x i32> %b)  {
 ; X32-LABEL: test_argRet512Vector:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argRet512Vector:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argRet512Vector:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
 ; LINUXOSX64-NEXT:    retq
   %d = select <16 x i1> undef , <16 x i32> %a, <16 x i32> %b
@@ -798,7 +798,7 @@ define x86_regcallcc <16 x i32> @test_ar
 ; Test regcall when passing/retrieving 512 bit vector
 define x86_regcallcc <16 x i32> @test_CallargRet512Vector(<16 x i32> %a)  {
 ; X32-LABEL: test_CallargRet512Vector:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %esp
 ; X32-NEXT:    subl $120, %esp
 ; X32-NEXT:    vmovdqu64 %zmm0, (%esp) # 64-byte Spill
@@ -811,7 +811,7 @@ define x86_regcallcc <16 x i32> @test_Ca
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_CallargRet512Vector:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rsp
 ; WIN64-NEXT:    .seh_pushreg 4
 ; WIN64-NEXT:    subq $112, %rsp
@@ -830,7 +830,7 @@ define x86_regcallcc <16 x i32> @test_Ca
 ; WIN64-NEXT:    .seh_endproc
 ;
 ; LINUXOSX64-LABEL: test_CallargRet512Vector:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rsp
 ; LINUXOSX64-NEXT:    .cfi_def_cfa_offset 16
 ; LINUXOSX64-NEXT:    subq $112, %rsp
@@ -852,7 +852,7 @@ define x86_regcallcc <16 x i32> @test_Ca
 ; Test regcall when running multiple input parameters - callee saved xmms
 define x86_regcallcc <32 x float> @testf32_inp(<32 x float> %a, <32 x float> %b, <32 x float> %c) nounwind {
 ; X32-LABEL: testf32_inp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $44, %esp
 ; X32-NEXT:    vmovups %xmm7, {{[0-9]+}}(%esp) # 16-byte Spill
 ; X32-NEXT:    vmovups %xmm6, (%esp) # 16-byte Spill
@@ -870,7 +870,7 @@ define x86_regcallcc <32 x float> @testf
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: testf32_inp:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    vaddps %zmm2, %zmm0, %zmm6
 ; WIN64-NEXT:    vaddps %zmm3, %zmm1, %zmm7
 ; WIN64-NEXT:    vmulps %zmm2, %zmm0, %zmm0
@@ -882,7 +882,7 @@ define x86_regcallcc <32 x float> @testf
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: testf32_inp:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    vaddps %zmm2, %zmm0, %zmm6
 ; LINUXOSX64-NEXT:    vaddps %zmm3, %zmm1, %zmm7
 ; LINUXOSX64-NEXT:    vmulps %zmm2, %zmm0, %zmm0
@@ -902,7 +902,7 @@ define x86_regcallcc <32 x float> @testf
 ; Test regcall when running multiple input parameters - callee saved GPRs
 define x86_regcallcc i32 @testi32_inp(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %b1, i32 %b2, i32 %b3, i32 %b4, i32 %b5, i32 %b6) nounwind {
 ; X32-LABEL: testi32_inp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    subl $20, %esp
@@ -954,7 +954,7 @@ define x86_regcallcc i32 @testi32_inp(i3
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: testi32_inp:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %r13
 ; WIN64-NEXT:    pushq %rbp
 ; WIN64-NEXT:    pushq %rbx
@@ -993,7 +993,7 @@ define x86_regcallcc i32 @testi32_inp(i3
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: testi32_inp:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rbp
 ; LINUXOSX64-NEXT:    pushq %rbx
 ; LINUXOSX64-NEXT:    movl %eax, %r10d
@@ -1058,7 +1058,7 @@ define x86_regcallcc i32 @testi32_inp(i3
 ; Test that parameters, overflowing register capacity, are passed through the stack
 define x86_regcallcc <32 x float> @testf32_stack(<32 x float> %a0, <32 x float> %b0, <32 x float> %c0, <32 x float> %a1, <32 x float> %b1, <32 x float> %c1, <32 x float> %a2, <32 x float> %b2, <32 x float> %c2) nounwind {
 ; X32-LABEL: testf32_stack:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-64, %esp
@@ -1084,7 +1084,7 @@ define x86_regcallcc <32 x float> @testf
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: testf32_stack:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    pushq %rbp
 ; WIN64-NEXT:    subq $48, %rsp
 ; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
@@ -1110,7 +1110,7 @@ define x86_regcallcc <32 x float> @testf
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: testf32_stack:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    pushq %rbp
 ; LINUXOSX64-NEXT:    movq %rsp, %rbp
 ; LINUXOSX64-NEXT:    andq $-64, %rsp
@@ -1148,7 +1148,7 @@ define x86_regcallcc <32 x float> @testf
 ; Test regcall when passing/retrieving mixed types
 define x86_regcallcc i32 @test_argRetMixTypes(double, float, i8 signext, i32, i64, i16 signext, i32*) #0 {
 ; X32-LABEL: test_argRetMixTypes:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-8, %esp
@@ -1176,7 +1176,7 @@ define x86_regcallcc i32 @test_argRetMix
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argRetMixTypes:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; WIN64-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
 ; WIN64-NEXT:    vcvtsi2sdl %eax, %xmm2, %xmm1
@@ -1193,7 +1193,7 @@ define x86_regcallcc i32 @test_argRetMix
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argRetMixTypes:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; LINUXOSX64-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
 ; LINUXOSX64-NEXT:    vcvtsi2sdl %eax, %xmm2, %xmm1
@@ -1229,7 +1229,7 @@ define x86_regcallcc i32 @test_argRetMix
 
 define x86_regcallcc %struct.complex @test_argMultiRet(float, double, i32, i8, i64) local_unnamed_addr #0 {
 ; X32-LABEL: test_argMultiRet:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vaddsd __real at 4014000000000000, %xmm1, %xmm1
 ; X32-NEXT:    movl $4, %eax
 ; X32-NEXT:    movb $7, %cl
@@ -1238,7 +1238,7 @@ define x86_regcallcc %struct.complex @te
 ; X32-NEXT:    retl
 ;
 ; WIN64-LABEL: test_argMultiRet:
-; WIN64:       # BB#0:
+; WIN64:       # %bb.0:
 ; WIN64-NEXT:    vaddsd __real@{{.*}}(%rip), %xmm1, %xmm1
 ; WIN64-NEXT:    movl $4, %eax
 ; WIN64-NEXT:    movb $7, %cl
@@ -1246,7 +1246,7 @@ define x86_regcallcc %struct.complex @te
 ; WIN64-NEXT:    retq
 ;
 ; LINUXOSX64-LABEL: test_argMultiRet:
-; LINUXOSX64:       # BB#0:
+; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    vaddsd {{.*}}(%rip), %xmm1, %xmm1
 ; LINUXOSX64-NEXT:    movl $4, %eax
 ; LINUXOSX64-NEXT:    movb $7, %cl

Modified: llvm/trunk/test/CodeGen/X86/avx512-rotate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-rotate.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-rotate.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-rotate.ll Mon Dec  4 09:18:51 2017
@@ -14,7 +14,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
 
 define <16 x i32> @test_splat_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
 ; KNL-LABEL: test_splat_rol_v16i32:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vprold $5, %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vprold $5, %zmm0, %zmm2 {%k1} {z}
@@ -24,7 +24,7 @@ define <16 x i32> @test_splat_rol_v16i32
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_splat_rol_v16i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vprold $5, %zmm0, %zmm1 {%k1}
 ; SKX-NEXT:    vprold $5, %zmm0, %zmm2 {%k1} {z}
@@ -42,7 +42,7 @@ define <16 x i32> @test_splat_rol_v16i32
 
 define <8 x i64>@test_splat_rol_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
 ; KNL-LABEL: test_splat_rol_v8i64:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vprolq $5, %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vprolq $5, %zmm0, %zmm2 {%k1} {z}
@@ -52,7 +52,7 @@ define <8 x i64>@test_splat_rol_v8i64(<8
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_splat_rol_v8i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vprolq $5, %zmm0, %zmm1 {%k1}
 ; SKX-NEXT:    vprolq $5, %zmm0, %zmm2 {%k1} {z}
@@ -70,7 +70,7 @@ define <8 x i64>@test_splat_rol_v8i64(<8
 
 define <16 x i32> @test_splat_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
 ; KNL-LABEL: test_splat_ror_v16i32:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vprord $5, %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vprord $5, %zmm0, %zmm2 {%k1} {z}
@@ -80,7 +80,7 @@ define <16 x i32> @test_splat_ror_v16i32
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_splat_ror_v16i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vprord $5, %zmm0, %zmm1 {%k1}
 ; SKX-NEXT:    vprord $5, %zmm0, %zmm2 {%k1} {z}
@@ -98,7 +98,7 @@ define <16 x i32> @test_splat_ror_v16i32
 
 define <8 x i64>@test_splat_ror_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
 ; KNL-LABEL: test_splat_ror_v8i64:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vprorq $5, %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vprorq $5, %zmm0, %zmm2 {%k1} {z}
@@ -108,7 +108,7 @@ define <8 x i64>@test_splat_ror_v8i64(<8
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_splat_ror_v8i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vprorq $5, %zmm0, %zmm1 {%k1}
 ; SKX-NEXT:    vprorq $5, %zmm0, %zmm2 {%k1} {z}
@@ -128,7 +128,7 @@ define <8 x i64>@test_splat_ror_v8i64(<8
 
 define <16 x i32> @test_splat_bounds_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
 ; KNL-LABEL: test_splat_bounds_rol_v16i32:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vprold $1, %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vprold $31, %zmm0, %zmm2 {%k1} {z}
@@ -138,7 +138,7 @@ define <16 x i32> @test_splat_bounds_rol
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_splat_bounds_rol_v16i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vprold $1, %zmm0, %zmm1 {%k1}
 ; SKX-NEXT:    vprold $31, %zmm0, %zmm2 {%k1} {z}
@@ -156,7 +156,7 @@ define <16 x i32> @test_splat_bounds_rol
 
 define <8 x i64>@test_splat_bounds_rol_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
 ; KNL-LABEL: test_splat_bounds_rol_v8i64:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vprolq $62, %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vprolq $1, %zmm0, %zmm2 {%k1} {z}
@@ -166,7 +166,7 @@ define <8 x i64>@test_splat_bounds_rol_v
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_splat_bounds_rol_v8i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vprolq $62, %zmm0, %zmm1 {%k1}
 ; SKX-NEXT:    vprolq $1, %zmm0, %zmm2 {%k1} {z}
@@ -184,7 +184,7 @@ define <8 x i64>@test_splat_bounds_rol_v
 
 define <16 x i32> @test_splat_bounds_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
 ; KNL-LABEL: test_splat_bounds_ror_v16i32:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vprord $1, %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vprord $31, %zmm0, %zmm2 {%k1} {z}
@@ -194,7 +194,7 @@ define <16 x i32> @test_splat_bounds_ror
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_splat_bounds_ror_v16i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vprord $1, %zmm0, %zmm1 {%k1}
 ; SKX-NEXT:    vprord $31, %zmm0, %zmm2 {%k1} {z}
@@ -212,7 +212,7 @@ define <16 x i32> @test_splat_bounds_ror
 
 define <8 x i64>@test_splat_bounds_ror_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
 ; KNL-LABEL: test_splat_bounds_ror_v8i64:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vprorq $62, %zmm0, %zmm1 {%k1}
 ; KNL-NEXT:    vprorq $1, %zmm0, %zmm2 {%k1} {z}
@@ -222,7 +222,7 @@ define <8 x i64>@test_splat_bounds_ror_v
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: test_splat_bounds_ror_v8i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vprorq $62, %zmm0, %zmm1 {%k1}
 ; SKX-NEXT:    vprorq $1, %zmm0, %zmm2 {%k1} {z}
@@ -244,7 +244,7 @@ define <8 x i64>@test_splat_bounds_ror_v
 
 define <8 x i64> @test_fold_rol_v8i64() {
 ; CHECK-LABEL: test_fold_rol_v8i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps {{.*#+}} zmm0 = [1,2,4,9223372036854775808,2,4611686018427387904,9223372036854775808,9223372036854775808]
 ; CHECK-NEXT:    retq
   %res = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, <8 x i64> <i64 0, i64 1, i64 2, i64 63, i64 65, i64 65534, i64 65535, i64 -1>, <8 x i64> zeroinitializer, i8 -1)
@@ -253,7 +253,7 @@ define <8 x i64> @test_fold_rol_v8i64()
 
 define <16 x i32> @test_fold_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1) {
 ; CHECK-LABEL: test_fold_rol_v16i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
 ; CHECK-NEXT:    vprolvd {{.*}}(%rip), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
@@ -264,7 +264,7 @@ define <16 x i32> @test_fold_rol_v16i32(
 
 define <8 x i64> @test_fold_ror_v8i64() {
 ; CHECK-LABEL: test_fold_ror_v8i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1]
 ; CHECK-NEXT:    vprorvq {{.*}}(%rip), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
@@ -275,7 +275,7 @@ define <8 x i64> @test_fold_ror_v8i64()
 
 define <16 x i32> @test_fold_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1) {
 ; CHECK-LABEL: test_fold_ror_v16i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
 ; CHECK-NEXT:    vprorvd {{.*}}(%rip), %zmm0, %zmm0
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx512-scalarIntrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-scalarIntrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-scalarIntrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-scalarIntrinsics.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define <4 x float> @test_rsqrt14_ss(<4 x float> %a0) {
 ; CHECK-LABEL: test_rsqrt14_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrsqrt14ss %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
     %res = call <4 x float> @llvm.x86.avx512.rsqrt14.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1) ;
@@ -14,7 +14,7 @@ define <4 x float> @test_rsqrt14_ss(<4 x
 
 define <4 x float> @test_rsqrt14_ss_load(<4 x float> %a0, <4 x float>* %a1ptr) {
 ; CHECK-LABEL: test_rsqrt14_ss_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrsqrt14ss (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a1 = load <4 x float>, <4 x float>* %a1ptr
@@ -25,7 +25,7 @@ declare <4 x float> @llvm.x86.avx512.rsq
 
 define <4 x float> @test_rcp14_ss(<4 x float> %a0) {
 ; CHECK-LABEL: test_rcp14_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrcp14ss %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
     %res = call <4 x float> @llvm.x86.avx512.rcp14.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1) ;
@@ -34,7 +34,7 @@ define <4 x float> @test_rcp14_ss(<4 x f
 
 define <4 x float> @test_rcp14_ss_load(<4 x float> %a0, <4 x float>* %a1ptr) {
 ; CHECK-LABEL: test_rcp14_ss_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrcp14ss (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a1 = load <4 x float>, <4 x float>* %a1ptr
@@ -45,7 +45,7 @@ declare <4 x float> @llvm.x86.avx512.rcp
 
 define <2 x double> @test_rsqrt14_sd(<2 x double> %a0) {
 ; CHECK-LABEL: test_rsqrt14_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrsqrt14sd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
     %res = call <2 x double> @llvm.x86.avx512.rsqrt14.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> zeroinitializer, i8 -1) ;
@@ -54,7 +54,7 @@ define <2 x double> @test_rsqrt14_sd(<2
 
 define <2 x double> @test_rsqrt14_sd_load(<2 x double> %a0, <2 x double>* %a1ptr) {
 ; CHECK-LABEL: test_rsqrt14_sd_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrsqrt14sd (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a1 = load <2 x double>, <2 x double>* %a1ptr
@@ -65,7 +65,7 @@ declare <2 x double> @llvm.x86.avx512.rs
 
 define <2 x double> @test_rcp14_sd(<2 x double> %a0) {
 ; CHECK-LABEL: test_rcp14_sd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrcp14sd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
     %res = call <2 x double> @llvm.x86.avx512.rcp14.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> zeroinitializer, i8 -1) ;
@@ -75,7 +75,7 @@ define <2 x double> @test_rcp14_sd(<2 x
 
 define <2 x double> @test_rcp14_sd_load(<2 x double> %a0, <2 x double>* %a1ptr) {
 ; CHECK-LABEL: test_rcp14_sd_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vrcp14sd (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a1 = load <2 x double>, <2 x double>* %a1ptr
@@ -87,7 +87,7 @@ declare <2 x double> @llvm.x86.avx512.rc
 declare <4 x float> @llvm.x86.avx512.mask.scalef.ss(<4 x float>, <4 x float>,<4 x float>, i8, i32)
 define <4 x float>@test_int_x86_avx512_mask_scalef_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) {
 ; SKX-LABEL: test_int_x86_avx512_mask_scalef_ss:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vscalefss %xmm1, %xmm0, %xmm2 {%k1}
 ; SKX-NEXT:    vscalefss {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -95,7 +95,7 @@ define <4 x float>@test_int_x86_avx512_m
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test_int_x86_avx512_mask_scalef_ss:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vscalefss %xmm1, %xmm0, %xmm2 {%k1}
 ; KNL-NEXT:    vscalefss {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -109,7 +109,7 @@ define <4 x float>@test_int_x86_avx512_m
 
 define <4 x float>@test_int_x86_avx512_mask_scalef_ss_load(<4 x float> %x0, <4 x float>* %x1ptr) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_scalef_ss_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vscalefss (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %x1 = load <4 x float>, <4 x float>* %x1ptr
@@ -120,7 +120,7 @@ define <4 x float>@test_int_x86_avx512_m
 declare <2 x double> @llvm.x86.avx512.mask.scalef.sd(<2 x double>, <2 x double>,<2 x double>, i8, i32)
 define <2 x double>@test_int_x86_avx512_mask_scalef_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) {
 ; SKX-LABEL: test_int_x86_avx512_mask_scalef_sd:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1
 ; SKX-NEXT:    vscalefsd %xmm1, %xmm0, %xmm2 {%k1}
 ; SKX-NEXT:    vscalefsd {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -128,7 +128,7 @@ define <2 x double>@test_int_x86_avx512_
 ; SKX-NEXT:    retq
 ;
 ; KNL-LABEL: test_int_x86_avx512_mask_scalef_sd:
-; KNL:       ## BB#0:
+; KNL:       ## %bb.0:
 ; KNL-NEXT:    kmovw %edi, %k1
 ; KNL-NEXT:    vscalefsd %xmm1, %xmm0, %xmm2 {%k1}
 ; KNL-NEXT:    vscalefsd {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -142,7 +142,7 @@ define <2 x double>@test_int_x86_avx512_
 
 define <2 x double>@test_int_x86_avx512_mask_scalef_sd_load(<2 x double> %x0, <2 x double>* %x1ptr) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_scalef_sd_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vscalefsd (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %x1 = load <2 x double>, <2 x double>* %x1ptr

Modified: llvm/trunk/test/CodeGen/X86/avx512-scalar_mask.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-scalar_mask.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-scalar_mask.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-scalar_mask.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float>@test_var_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
 ; CHECK-LABEL: test_var_mask:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    retq
@@ -16,7 +16,7 @@ define <4 x float>@test_var_mask(<4 x fl
 
 define <4 x float>@test_var_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
 ; CHECK-LABEL: test_var_maskz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    retq
@@ -27,7 +27,7 @@ define <4 x float>@test_var_maskz(<4 x f
 ; FIXME: we should just return %xmm0 here.
 define <4 x float>@test_const0_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
 ; CHECK-LABEL: test_const0_mask:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    kmovw %eax, %k1
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
@@ -39,7 +39,7 @@ define <4 x float>@test_const0_mask(<4 x
 ; FIXME: we should zero the lower element of xmm0 and return it.
 define <4 x float>@test_const0_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
 ; CHECK-LABEL: test_const0_maskz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    kmovw %eax, %k1
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
@@ -51,7 +51,7 @@ define <4 x float>@test_const0_maskz(<4
 ; FIXME: we should just return %xmm0 here.
 define <4 x float>@test_const2_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
 ; CHECK-LABEL: test_const2_mask:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movb $2, %al
 ; CHECK-NEXT:    kmovw %eax, %k1
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
@@ -63,7 +63,7 @@ define <4 x float>@test_const2_mask(<4 x
 ; FIXME: we should zero the lower element of xmm0 and return it.
 define <4 x float>@test_const2_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
 ; CHECK-LABEL: test_const2_maskz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movb $2, %al
 ; CHECK-NEXT:    kmovw %eax, %k1
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
@@ -74,7 +74,7 @@ define <4 x float>@test_const2_maskz(<4
 
 define <4 x float>@test_const_allone_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
 ; CHECK-LABEL: test_const_allone_mask:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 -1, i32 4)
@@ -83,7 +83,7 @@ define <4 x float>@test_const_allone_mas
 
 define <4 x float>@test_const_allone_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
 ; CHECK-LABEL: test_const_allone_maskz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 -1, i32 4)
@@ -92,7 +92,7 @@ define <4 x float>@test_const_allone_mas
 
 define <4 x float>@test_const_3_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
 ; CHECK-LABEL: test_const_3_mask:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 3, i32 4)
@@ -101,7 +101,7 @@ define <4 x float>@test_const_3_mask(<4
 
 define <4 x float>@test_const_3_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
 ; CHECK-LABEL: test_const_3_maskz:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 3, i32 4)

Modified: llvm/trunk/test/CodeGen/X86/avx512-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-schedule.ll Mon Dec  4 09:18:51 2017
@@ -6,12 +6,12 @@
 
 define <8 x double> @addpd512(<8 x double> %y, <8 x double> %x) {
 ; GENERIC-LABEL: addpd512:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vaddpd %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: addpd512:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vaddpd %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -21,12 +21,12 @@ entry:
 
 define <8 x double> @addpd512fold(<8 x double> %y) {
 ; GENERIC-LABEL: addpd512fold:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vaddpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: addpd512fold:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vaddpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -36,12 +36,12 @@ entry:
 
 define <16 x float> @addps512(<16 x float> %y, <16 x float> %x) {
 ; GENERIC-LABEL: addps512:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vaddps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: addps512:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vaddps %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -51,12 +51,12 @@ entry:
 
 define <16 x float> @addps512fold(<16 x float> %y) {
 ; GENERIC-LABEL: addps512fold:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vaddps {{.*}}(%rip), %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: addps512fold:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vaddps {{.*}}(%rip), %zmm0, %zmm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -66,12 +66,12 @@ entry:
 
 define <8 x double> @subpd512(<8 x double> %y, <8 x double> %x) {
 ; GENERIC-LABEL: subpd512:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vsubpd %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: subpd512:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vsubpd %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -81,12 +81,12 @@ entry:
 
 define <8 x double> @subpd512fold(<8 x double> %y, <8 x double>* %x) {
 ; GENERIC-LABEL: subpd512fold:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vsubpd (%rdi), %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: subpd512fold:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vsubpd (%rdi), %zmm0, %zmm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -97,12 +97,12 @@ entry:
 
 define <16 x float> @subps512(<16 x float> %y, <16 x float> %x) {
 ; GENERIC-LABEL: subps512:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vsubps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: subps512:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vsubps %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -112,12 +112,12 @@ entry:
 
 define <16 x float> @subps512fold(<16 x float> %y, <16 x float>* %x) {
 ; GENERIC-LABEL: subps512fold:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vsubps (%rdi), %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: subps512fold:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vsubps (%rdi), %zmm0, %zmm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -128,12 +128,12 @@ entry:
 
 define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
 ; GENERIC-LABEL: imulq512:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmullq %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: imulq512:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmullq %zmm0, %zmm1, %zmm0 # sched: [12:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %z = mul <8 x i64>%x, %y
@@ -142,12 +142,12 @@ define <8 x i64> @imulq512(<8 x i64> %y,
 
 define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
 ; GENERIC-LABEL: imulq256:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmullq %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: imulq256:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmullq %ymm0, %ymm1, %ymm0 # sched: [12:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %z = mul <4 x i64>%x, %y
@@ -156,12 +156,12 @@ define <4 x i64> @imulq256(<4 x i64> %y,
 
 define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
 ; GENERIC-LABEL: imulq128:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmullq %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: imulq128:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmullq %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %z = mul <2 x i64>%x, %y
@@ -170,12 +170,12 @@ define <2 x i64> @imulq128(<2 x i64> %y,
 
 define <8 x double> @mulpd512(<8 x double> %y, <8 x double> %x) {
 ; GENERIC-LABEL: mulpd512:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vmulpd %zmm0, %zmm1, %zmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mulpd512:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmulpd %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -185,12 +185,12 @@ entry:
 
 define <8 x double> @mulpd512fold(<8 x double> %y) {
 ; GENERIC-LABEL: mulpd512fold:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vmulpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mulpd512fold:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmulpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -200,12 +200,12 @@ entry:
 
 define <16 x float> @mulps512(<16 x float> %y, <16 x float> %x) {
 ; GENERIC-LABEL: mulps512:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vmulps %zmm0, %zmm1, %zmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mulps512:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmulps %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -215,12 +215,12 @@ entry:
 
 define <16 x float> @mulps512fold(<16 x float> %y) {
 ; GENERIC-LABEL: mulps512fold:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vmulps {{.*}}(%rip), %zmm0, %zmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mulps512fold:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmulps {{.*}}(%rip), %zmm0, %zmm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -230,12 +230,12 @@ entry:
 
 define <8 x double> @divpd512(<8 x double> %y, <8 x double> %x) {
 ; GENERIC-LABEL: divpd512:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vdivpd %zmm0, %zmm1, %zmm0 # sched: [24:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: divpd512:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vdivpd %zmm0, %zmm1, %zmm0 # sched: [23:2.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -245,12 +245,12 @@ entry:
 
 define <8 x double> @divpd512fold(<8 x double> %y) {
 ; GENERIC-LABEL: divpd512fold:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vdivpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [28:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: divpd512fold:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vdivpd {{.*}}(%rip), %zmm0, %zmm0 # sched: [30:2.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -260,12 +260,12 @@ entry:
 
 define <16 x float> @divps512(<16 x float> %y, <16 x float> %x) {
 ; GENERIC-LABEL: divps512:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vdivps %zmm0, %zmm1, %zmm0 # sched: [24:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: divps512:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vdivps %zmm0, %zmm1, %zmm0 # sched: [23:2.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -275,12 +275,12 @@ entry:
 
 define <16 x float> @divps512fold(<16 x float> %y) {
 ; GENERIC-LABEL: divps512fold:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vdivps {{.*}}(%rip), %zmm0, %zmm0 # sched: [28:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: divps512fold:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vdivps {{.*}}(%rip), %zmm0, %zmm0 # sched: [24:2.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -290,12 +290,12 @@ entry:
 
 define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
 ; GENERIC-LABEL: vpaddq_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpaddq %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddq_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x = add <8 x i64> %i, %j
@@ -304,12 +304,12 @@ define <8 x i64> @vpaddq_test(<8 x i64>
 
 define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
 ; GENERIC-LABEL: vpaddq_fold_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpaddq (%rdi), %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddq_fold_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddq (%rdi), %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %tmp = load <8 x i64>, <8 x i64>* %j, align 4
@@ -319,12 +319,12 @@ define <8 x i64> @vpaddq_fold_test(<8 x
 
 define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
 ; GENERIC-LABEL: vpaddq_broadcast_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddq_broadcast_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x = add <8 x i64> %i, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -333,12 +333,12 @@ define <8 x i64> @vpaddq_broadcast_test(
 
 define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
 ; GENERIC-LABEL: vpaddq_broadcast2_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpaddq (%rdi){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddq_broadcast2_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddq (%rdi){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %tmp = load i64, i64* %j
@@ -356,12 +356,12 @@ define <8 x i64> @vpaddq_broadcast2_test
 
 define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
 ; GENERIC-LABEL: vpaddd_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpaddd %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddd_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddd %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x = add <16 x i32> %i, %j
@@ -370,12 +370,12 @@ define <16 x i32> @vpaddd_test(<16 x i32
 
 define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
 ; GENERIC-LABEL: vpaddd_fold_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpaddd (%rdi), %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddd_fold_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddd (%rdi), %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %tmp = load <16 x i32>, <16 x i32>* %j, align 4
@@ -385,12 +385,12 @@ define <16 x i32> @vpaddd_fold_test(<16
 
 define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
 ; GENERIC-LABEL: vpaddd_broadcast_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddd_broadcast_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x = add <16 x i32> %i, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -399,14 +399,14 @@ define <16 x i32> @vpaddd_broadcast_test
 
 define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: vpaddd_mask_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1
 ; GENERIC-NEXT:    vpaddd %zmm1, %zmm0, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddd_mask_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpaddd %zmm1, %zmm0, %zmm0 {%k1} # sched: [1:0.33]
@@ -419,14 +419,14 @@ define <16 x i32> @vpaddd_mask_test(<16
 
 define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: vpaddd_maskz_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1
 ; GENERIC-NEXT:    vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddd_maskz_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [1:0.33]
@@ -439,14 +439,14 @@ define <16 x i32> @vpaddd_maskz_test(<16
 
 define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: vpaddd_mask_fold_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vpaddd (%rdi), %zmm0, %zmm0 {%k1} # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddd_mask_fold_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpaddd (%rdi), %zmm0, %zmm0 {%k1} # sched: [8:0.50]
@@ -460,14 +460,14 @@ define <16 x i32> @vpaddd_mask_fold_test
 
 define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: vpaddd_mask_broadcast_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddd_mask_broadcast_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} # sched: [8:0.50]
@@ -480,14 +480,14 @@ define <16 x i32> @vpaddd_mask_broadcast
 
 define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: vpaddd_maskz_fold_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z} # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddd_maskz_fold_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -501,14 +501,14 @@ define <16 x i32> @vpaddd_maskz_fold_tes
 
 define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: vpaddd_maskz_broadcast_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z} # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpaddd_maskz_broadcast_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -521,12 +521,12 @@ define <16 x i32> @vpaddd_maskz_broadcas
 
 define <8 x i64> @vpsubq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
 ; GENERIC-LABEL: vpsubq_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsubq %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpsubq_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x = sub <8 x i64> %i, %j
@@ -535,12 +535,12 @@ define <8 x i64> @vpsubq_test(<8 x i64>
 
 define <16 x i32> @vpsubd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
 ; GENERIC-LABEL: vpsubd_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsubd %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpsubd_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubd %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x = sub <16 x i32> %i, %j
@@ -549,12 +549,12 @@ define <16 x i32> @vpsubd_test(<16 x i32
 
 define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) {
 ; GENERIC-LABEL: vpmulld_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmulld %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpmulld_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmulld %zmm1, %zmm0, %zmm0 # sched: [8:0.67]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x = mul <16 x i32> %i, %j
@@ -564,12 +564,12 @@ define <16 x i32> @vpmulld_test(<16 x i3
 declare float @sqrtf(float) readnone
 define float @sqrtA(float %a) nounwind uwtable readnone ssp {
 ; GENERIC-LABEL: sqrtA:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [114:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sqrtA:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -580,12 +580,12 @@ entry:
 declare double @sqrt(double) readnone
 define double @sqrtB(double %a) nounwind uwtable readnone ssp {
 ; GENERIC-LABEL: sqrtB:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [21:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sqrtB:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [18:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -596,12 +596,12 @@ entry:
 declare float @llvm.sqrt.f32(float)
 define float @sqrtC(float %a) nounwind {
 ; GENERIC-LABEL: sqrtC:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [114:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sqrtC:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = call float @llvm.sqrt.f32(float %a)
@@ -611,12 +611,12 @@ define float @sqrtC(float %a) nounwind {
 declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
 define <16 x float> @sqrtD(<16 x float> %a) nounwind {
 ; GENERIC-LABEL: sqrtD:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vsqrtps %zmm0, %zmm0 # sched: [14:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sqrtD:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vsqrtps %zmm0, %zmm0 # sched: [19:2.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a)
@@ -626,12 +626,12 @@ define <16 x float> @sqrtD(<16 x float>
 declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
 define <8 x double> @sqrtE(<8 x double> %a) nounwind {
 ; GENERIC-LABEL: sqrtE:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vsqrtpd %zmm0, %zmm0 # sched: [14:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sqrtE:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vsqrtpd %zmm0, %zmm0 # sched: [31:2.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a)
@@ -640,12 +640,12 @@ define <8 x double> @sqrtE(<8 x double>
 
 define <16 x float> @fadd_broadcast(<16 x float> %a) nounwind {
 ; GENERIC-LABEL: fadd_broadcast:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: fadd_broadcast:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fadd <16 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
@@ -654,12 +654,12 @@ define <16 x float> @fadd_broadcast(<16
 
 define <8 x i64> @addq_broadcast(<8 x i64> %a) nounwind {
 ; GENERIC-LABEL: addq_broadcast:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: addq_broadcast:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = add <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -668,12 +668,12 @@ define <8 x i64> @addq_broadcast(<8 x i6
 
 define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
 ; GENERIC-LABEL: orq_broadcast:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: orq_broadcast:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = or <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -682,12 +682,12 @@ define <8 x i64> @orq_broadcast(<8 x i64
 
 define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
 ; GENERIC-LABEL: andd512fold:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vandps (%rdi), %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: andd512fold:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vandps (%rdi), %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -698,12 +698,12 @@ entry:
 
 define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
 ; GENERIC-LABEL: andqbrst:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vandpd (%rdi){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: andqbrst:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vandpd (%rdi){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -716,14 +716,14 @@ entry:
 
 define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
 ; GENERIC-LABEL: test_mask_vaddps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; GENERIC-NEXT:    vaddps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mask_vaddps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vaddps %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -738,14 +738,14 @@ define <16 x float> @test_mask_vaddps(<1
 
 define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: test_mask_vmulps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; GENERIC-NEXT:    vmulps %zmm2, %zmm1, %zmm0 {%k1} # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mask_vmulps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmulps %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -758,14 +758,14 @@ define <16 x float> @test_mask_vmulps(<1
 
 define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: test_mask_vminps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; GENERIC-NEXT:    vminps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mask_vminps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vminps %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -779,14 +779,14 @@ define <16 x float> @test_mask_vminps(<1
 
 define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i, <8 x double> %j, <8 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: test_mask_vminpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1
 ; GENERIC-NEXT:    vminpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mask_vminpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vminpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -800,14 +800,14 @@ define <8 x double> @test_mask_vminpd(<8
 
 define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: test_mask_vmaxps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; GENERIC-NEXT:    vmaxps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mask_vmaxps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmaxps %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -821,14 +821,14 @@ define <16 x float> @test_mask_vmaxps(<1
 
 define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i, <8 x double> %j, <8 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: test_mask_vmaxpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1
 ; GENERIC-NEXT:    vmaxpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mask_vmaxpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %ymm4, %ymm3, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmaxpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -842,14 +842,14 @@ define <8 x double> @test_mask_vmaxpd(<8
 
 define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: test_mask_vsubps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; GENERIC-NEXT:    vsubps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mask_vsubps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vsubps %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -862,14 +862,14 @@ define <16 x float> @test_mask_vsubps(<1
 
 define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i, <16 x float> %j, <16 x i32> %mask1) nounwind readnone {
 ; GENERIC-LABEL: test_mask_vdivps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1
 ; GENERIC-NEXT:    vdivps %zmm2, %zmm1, %zmm0 {%k1} # sched: [24:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mask_vdivps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm4, %zmm3, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vdivps %zmm2, %zmm1, %zmm0 {%k1} # sched: [23:2.00]
@@ -882,14 +882,14 @@ define <16 x float> @test_mask_vdivps(<1
 
 define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i, <8 x double> %j, <8 x i64> %mask1) nounwind readnone {
 ; GENERIC-LABEL: test_mask_vaddpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqq %zmm4, %zmm3, %k1
 ; GENERIC-NEXT:    vaddpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mask_vaddpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm4, %xmm4, %xmm4 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqq %zmm4, %zmm3, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vaddpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [4:0.33]
@@ -902,14 +902,14 @@ define <8 x double> @test_mask_vaddpd(<8
 
 define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j, <8 x i64> %mask1) nounwind readnone {
 ; GENERIC-LABEL: test_maskz_vaddpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqq %zmm3, %zmm2, %k1
 ; GENERIC-NEXT:    vaddpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_maskz_vaddpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vaddpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [4:0.33]
@@ -922,14 +922,14 @@ define <8 x double> @test_maskz_vaddpd(<
 
 define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i, <8 x double>* %j,  <8 x i64> %mask1) nounwind {
 ; GENERIC-LABEL: test_mask_fold_vaddpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqq %zmm3, %zmm2, %k1
 ; GENERIC-NEXT:    vaddpd (%rdi), %zmm1, %zmm0 {%k1} # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mask_fold_vaddpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqq %zmm3, %zmm2, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vaddpd (%rdi), %zmm1, %zmm0 {%k1} # sched: [11:0.50]
@@ -943,14 +943,14 @@ define <8 x double> @test_mask_fold_vadd
 
 define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j, <8 x i64> %mask1) nounwind {
 ; GENERIC-LABEL: test_maskz_fold_vaddpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vaddpd (%rdi), %zmm0, %zmm0 {%k1} {z} # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_maskz_fold_vaddpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vaddpd (%rdi), %zmm0, %zmm0 {%k1} {z} # sched: [11:0.50]
@@ -964,12 +964,12 @@ define <8 x double> @test_maskz_fold_vad
 
 define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind {
 ; GENERIC-LABEL: test_broadcast_vaddpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vaddpd (%rdi){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_broadcast_vaddpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vaddpd (%rdi){1to8}, %zmm0, %zmm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %tmp = load double, double* %j
@@ -982,7 +982,7 @@ define <8 x double> @test_broadcast_vadd
 
 define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double> %i, double* %j, <8 x i64> %mask1) nounwind {
 ; GENERIC-LABEL: test_mask_broadcast_vaddpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqq %zmm0, %zmm2, %k1
 ; GENERIC-NEXT:    vaddpd (%rdi){1to8}, %zmm1, %zmm1 {%k1} # sched: [7:1.00]
@@ -990,7 +990,7 @@ define <8 x double> @test_mask_broadcast
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mask_broadcast_vaddpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqq %zmm0, %zmm2, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vaddpd (%rdi){1to8}, %zmm1, %zmm1 {%k1} # sched: [11:0.50]
@@ -1008,14 +1008,14 @@ define <8 x double> @test_mask_broadcast
 
 define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
 ; GENERIC-LABEL: test_maskz_broadcast_vaddpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vaddpd (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_maskz_broadcast_vaddpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vaddpd (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} # sched: [11:0.50]
@@ -1033,12 +1033,12 @@ define <8 x double> @test_maskz_broadcas
 
 define <16 x float>  @test_fxor(<16 x float> %a) {
 ; GENERIC-LABEL: test_fxor:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_fxor:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 
@@ -1048,12 +1048,12 @@ define <16 x float>  @test_fxor(<16 x fl
 
 define <8 x float>  @test_fxor_8f32(<8 x float> %a) {
 ; GENERIC-LABEL: test_fxor_8f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps {{.*}}(%rip){1to8}, %ymm0, %ymm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_fxor_8f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps {{.*}}(%rip){1to8}, %ymm0, %ymm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
@@ -1062,12 +1062,12 @@ define <8 x float>  @test_fxor_8f32(<8 x
 
 define <8 x double> @fabs_v8f64(<8 x double> %p)
 ; GENERIC-LABEL: fabs_v8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: fabs_v8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 {
@@ -1078,12 +1078,12 @@ declare <8 x double> @llvm.fabs.v8f64(<8
 
 define <16 x float> @fabs_v16f32(<16 x float> %p)
 ; GENERIC-LABEL: fabs_v16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: fabs_v16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 {
@@ -1094,7 +1094,7 @@ declare <16 x float> @llvm.fabs.v16f32(<
 
 define double @test1(double %a, double %b) nounwind {
 ; GENERIC-LABEL: test1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vucomisd %xmm1, %xmm0 # sched: [2:1.00]
 ; GENERIC-NEXT:    jne .LBB64_1 # sched: [1:1.00]
 ; GENERIC-NEXT:    jnp .LBB64_2 # sched: [1:1.00]
@@ -1106,7 +1106,7 @@ define double @test1(double %a, double %
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vucomisd %xmm1, %xmm0 # sched: [2:1.00]
 ; SKX-NEXT:    jne .LBB64_1 # sched: [1:0.50]
 ; SKX-NEXT:    jnp .LBB64_2 # sched: [1:0.50]
@@ -1129,10 +1129,10 @@ l2:
 
 define float @test2(float %a, float %b) nounwind {
 ; GENERIC-LABEL: test2:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vucomiss %xmm0, %xmm1 # sched: [2:1.00]
 ; GENERIC-NEXT:    jbe .LBB65_2 # sched: [1:1.00]
-; GENERIC-NEXT:  # BB#1: # %l1
+; GENERIC-NEXT:  # %bb.1: # %l1
 ; GENERIC-NEXT:    vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ; GENERIC-NEXT:  .LBB65_2: # %l2
@@ -1140,10 +1140,10 @@ define float @test2(float %a, float %b)
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test2:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vucomiss %xmm0, %xmm1 # sched: [2:1.00]
 ; SKX-NEXT:    jbe .LBB65_2 # sched: [1:0.50]
-; SKX-NEXT:  # BB#1: # %l1
+; SKX-NEXT:  # %bb.1: # %l1
 ; SKX-NEXT:    vsubss %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ; SKX-NEXT:  .LBB65_2: # %l2
@@ -1162,14 +1162,14 @@ l2:
 
 define i32 @test3(float %a, float %b) {
 ; GENERIC-LABEL: test3:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcmpeqss %xmm1, %xmm0, %k0
 ; GENERIC-NEXT:    kmovd %k0, %eax
 ; GENERIC-NEXT:    movzbl %al, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test3:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcmpeqss %xmm1, %xmm0, %k0
 ; SKX-NEXT:    kmovd %k0, %eax # sched: [3:1.00]
 ; SKX-NEXT:    movzbl %al, %eax # sched: [1:0.25]
@@ -1182,12 +1182,12 @@ define i32 @test3(float %a, float %b) {
 
 define float @test5(float %p) #0 {
 ; GENERIC-LABEL: test5:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vucomiss %xmm1, %xmm0 # sched: [2:1.00]
 ; GENERIC-NEXT:    jne .LBB67_1 # sched: [1:1.00]
 ; GENERIC-NEXT:    jp .LBB67_1 # sched: [1:1.00]
-; GENERIC-NEXT:  # BB#2: # %return
+; GENERIC-NEXT:  # %bb.2: # %return
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ; GENERIC-NEXT:  .LBB67_1: # %if.end
 ; GENERIC-NEXT:    seta %al # sched: [2:1.00]
@@ -1196,12 +1196,12 @@ define float @test5(float %p) #0 {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test5:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vucomiss %xmm1, %xmm0 # sched: [2:1.00]
 ; SKX-NEXT:    jne .LBB67_1 # sched: [1:0.50]
 ; SKX-NEXT:    jp .LBB67_1 # sched: [1:0.50]
-; SKX-NEXT:  # BB#2: # %return
+; SKX-NEXT:  # %bb.2: # %return
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ; SKX-NEXT:  .LBB67_1: # %if.end
 ; SKX-NEXT:    seta %al # sched: [2:1.00]
@@ -1224,14 +1224,14 @@ return:
 
 define i32 @test6(i32 %a, i32 %b) {
 ; GENERIC-LABEL: test6:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    xorl %eax, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    cmpl %esi, %edi # sched: [1:0.33]
 ; GENERIC-NEXT:    sete %al # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test6:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; SKX-NEXT:    cmpl %esi, %edi # sched: [1:0.25]
 ; SKX-NEXT:    sete %al # sched: [1:0.50]
@@ -1243,14 +1243,14 @@ define i32 @test6(i32 %a, i32 %b) {
 
 define i32 @test7(double %x, double %y) #2 {
 ; GENERIC-LABEL: test7:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    xorl %eax, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    vucomisd %xmm1, %xmm0 # sched: [2:1.00]
 ; GENERIC-NEXT:    setne %al # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test7:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; SKX-NEXT:    vucomisd %xmm1, %xmm0 # sched: [2:1.00]
 ; SKX-NEXT:    setne %al # sched: [1:0.50]
@@ -1263,7 +1263,7 @@ entry:
 
 define i32 @test8(i32 %a1, i32 %a2, i32 %a3) {
 ; GENERIC-LABEL: test8:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    xorl $-2147483648, %esi # imm = 0x80000000
 ; GENERIC-NEXT:    # sched: [1:0.33]
 ; GENERIC-NEXT:    testl %edx, %edx # sched: [1:0.33]
@@ -1275,7 +1275,7 @@ define i32 @test8(i32 %a1, i32 %a2, i32
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    notl %edi # sched: [1:0.25]
 ; SKX-NEXT:    xorl $-2147483648, %esi # imm = 0x80000000
 ; SKX-NEXT:    # sched: [1:0.25]
@@ -1296,10 +1296,10 @@ define i32 @test8(i32 %a1, i32 %a2, i32
 
 define i32 @test9(i64 %a) {
 ; GENERIC-LABEL: test9:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    testb $1, %dil # sched: [1:0.33]
 ; GENERIC-NEXT:    jne .LBB71_2 # sched: [1:1.00]
-; GENERIC-NEXT:  # BB#1: # %A
+; GENERIC-NEXT:  # %bb.1: # %A
 ; GENERIC-NEXT:    movl $6, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ; GENERIC-NEXT:  .LBB71_2: # %B
@@ -1307,10 +1307,10 @@ define i32 @test9(i64 %a) {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test9:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    testb $1, %dil # sched: [1:0.25]
 ; SKX-NEXT:    jne .LBB71_2 # sched: [1:0.50]
-; SKX-NEXT:  # BB#1: # %A
+; SKX-NEXT:  # %bb.1: # %A
 ; SKX-NEXT:    movl $6, %eax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ; SKX-NEXT:  .LBB71_2: # %B
@@ -1327,7 +1327,7 @@ B:
 
 define i32 @test10(i64 %b, i64 %c, i1 %d) {
 ; GENERIC-LABEL: test10:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movl %edx, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    andb $1, %al # sched: [1:0.33]
 ; GENERIC-NEXT:    cmpq %rsi, %rdi # sched: [1:0.33]
@@ -1336,7 +1336,7 @@ define i32 @test10(i64 %b, i64 %c, i1 %d
 ; GENERIC-NEXT:    andb $1, %cl # sched: [1:0.33]
 ; GENERIC-NEXT:    cmpb %cl, %al # sched: [1:0.33]
 ; GENERIC-NEXT:    je .LBB72_1 # sched: [1:1.00]
-; GENERIC-NEXT:  # BB#2: # %if.end.i
+; GENERIC-NEXT:  # %bb.2: # %if.end.i
 ; GENERIC-NEXT:    movl $6, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ; GENERIC-NEXT:  .LBB72_1: # %if.then.i
@@ -1344,7 +1344,7 @@ define i32 @test10(i64 %b, i64 %c, i1 %d
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test10:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movl %edx, %eax # sched: [1:0.25]
 ; SKX-NEXT:    andb $1, %al # sched: [1:0.25]
 ; SKX-NEXT:    cmpq %rsi, %rdi # sched: [1:0.25]
@@ -1353,7 +1353,7 @@ define i32 @test10(i64 %b, i64 %c, i1 %d
 ; SKX-NEXT:    andb $1, %cl # sched: [1:0.25]
 ; SKX-NEXT:    cmpb %cl, %al # sched: [1:0.25]
 ; SKX-NEXT:    je .LBB72_1 # sched: [1:0.50]
-; SKX-NEXT:  # BB#2: # %if.end.i
+; SKX-NEXT:  # %bb.2: # %if.end.i
 ; SKX-NEXT:    movl $6, %eax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ; SKX-NEXT:  .LBB72_1: # %if.then.i
@@ -1374,12 +1374,12 @@ if.end.i:
 
 define <16 x float> @sitof32(<16 x i32> %a) nounwind {
 ; GENERIC-LABEL: sitof32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sitof32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = sitofp <16 x i32> %a to <16 x float>
@@ -1388,12 +1388,12 @@ define <16 x float> @sitof32(<16 x i32>
 
 define <8 x double> @sltof864(<8 x i64> %a) {
 ; GENERIC-LABEL: sltof864:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtqq2pd %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sltof864:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtqq2pd %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = sitofp <8 x i64> %a to <8 x double>
@@ -1402,12 +1402,12 @@ define <8 x double> @sltof864(<8 x i64>
 
 define <4 x double> @slto4f64(<4 x i64> %a) {
 ; GENERIC-LABEL: slto4f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtqq2pd %ymm0, %ymm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: slto4f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtqq2pd %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = sitofp <4 x i64> %a to <4 x double>
@@ -1416,12 +1416,12 @@ define <4 x double> @slto4f64(<4 x i64>
 
 define <2 x double> @slto2f64(<2 x i64> %a) {
 ; GENERIC-LABEL: slto2f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtqq2pd %xmm0, %xmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: slto2f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtqq2pd %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = sitofp <2 x i64> %a to <2 x double>
@@ -1430,12 +1430,12 @@ define <2 x double> @slto2f64(<2 x i64>
 
 define <2 x float> @sltof2f32(<2 x i64> %a) {
 ; GENERIC-LABEL: sltof2f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtqq2ps %xmm0, %xmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sltof2f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtqq2ps %xmm0, %xmm0 # sched: [5:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = sitofp <2 x i64> %a to <2 x float>
@@ -1444,12 +1444,12 @@ define <2 x float> @sltof2f32(<2 x i64>
 
 define <4 x float> @slto4f32_mem(<4 x i64>* %a) {
 ; GENERIC-LABEL: slto4f32_mem:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtqq2psy (%rdi), %xmm0 # sched: [8:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: slto4f32_mem:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtqq2psy (%rdi), %xmm0 # sched: [9:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a1 = load <4 x i64>, <4 x i64>* %a, align 8
@@ -1459,12 +1459,12 @@ define <4 x float> @slto4f32_mem(<4 x i6
 
 define <4 x i64> @f64to4sl(<4 x double> %a) {
 ; GENERIC-LABEL: f64to4sl:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttpd2qq %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64to4sl:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttpd2qq %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fptosi <4 x double> %a to <4 x i64>
@@ -1473,12 +1473,12 @@ define <4 x i64> @f64to4sl(<4 x double>
 
 define <4 x i64> @f32to4sl(<4 x float> %a) {
 ; GENERIC-LABEL: f32to4sl:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttps2qq %xmm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f32to4sl:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttps2qq %xmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fptosi <4 x float> %a to <4 x i64>
@@ -1487,13 +1487,13 @@ define <4 x i64> @f32to4sl(<4 x float> %
 
 define <4 x float> @slto4f32(<4 x i64> %a) {
 ; GENERIC-LABEL: slto4f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtqq2ps %ymm0, %xmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: slto4f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtqq2ps %ymm0, %xmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1503,13 +1503,13 @@ define <4 x float> @slto4f32(<4 x i64> %
 
 define <4 x float> @ulto4f32(<4 x i64> %a) {
 ; GENERIC-LABEL: ulto4f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtuqq2ps %ymm0, %xmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ulto4f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtuqq2ps %ymm0, %xmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1519,12 +1519,12 @@ define <4 x float> @ulto4f32(<4 x i64> %
 
 define <8 x double> @ulto8f64(<8 x i64> %a) {
 ; GENERIC-LABEL: ulto8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtuqq2pd %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ulto8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtuqq2pd %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = uitofp <8 x i64> %a to <8 x double>
@@ -1533,13 +1533,13 @@ define <8 x double> @ulto8f64(<8 x i64>
 
 define <16 x double> @ulto16f64(<16 x i64> %a) {
 ; GENERIC-LABEL: ulto16f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtuqq2pd %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vcvtuqq2pd %zmm1, %zmm1 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ulto16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtuqq2pd %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vcvtuqq2pd %zmm1, %zmm1 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1549,12 +1549,12 @@ define <16 x double> @ulto16f64(<16 x i6
 
 define <16 x i32> @f64to16si(<16 x float> %a) nounwind {
 ; GENERIC-LABEL: f64to16si:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttps2dq %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64to16si:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttps2dq %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fptosi <16 x float> %a to <16 x i32>
@@ -1563,12 +1563,12 @@ define <16 x i32> @f64to16si(<16 x float
 
 define <16 x i32> @f32to16ui(<16 x float> %a) nounwind {
 ; GENERIC-LABEL: f32to16ui:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttps2udq %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f32to16ui:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttps2udq %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fptoui <16 x float> %a to <16 x i32>
@@ -1577,14 +1577,14 @@ define <16 x i32> @f32to16ui(<16 x float
 
 define <16 x i8> @f32to16uc(<16 x float> %f) {
 ; GENERIC-LABEL: f32to16uc:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttps2dq %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovdb %zmm0, %xmm0
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f32to16uc:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttps2dq %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vpmovdb %zmm0, %xmm0 # sched: [4:2.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
@@ -1595,13 +1595,13 @@ define <16 x i8> @f32to16uc(<16 x float>
 
 define <16 x i16> @f32to16us(<16 x float> %f) {
 ; GENERIC-LABEL: f32to16us:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttps2dq %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovdw %zmm0, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f32to16us:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttps2dq %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vpmovdw %zmm0, %ymm0 # sched: [4:2.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1611,12 +1611,12 @@ define <16 x i16> @f32to16us(<16 x float
 
 define <8 x i32> @f32to8ui(<8 x float> %a) nounwind {
 ; GENERIC-LABEL: f32to8ui:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttps2udq %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f32to8ui:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttps2udq %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fptoui <8 x float> %a to <8 x i32>
@@ -1625,12 +1625,12 @@ define <8 x i32> @f32to8ui(<8 x float> %
 
 define <4 x i32> @f32to4ui(<4 x float> %a) nounwind {
 ; GENERIC-LABEL: f32to4ui:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttps2udq %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f32to4ui:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttps2udq %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fptoui <4 x float> %a to <4 x i32>
@@ -1639,12 +1639,12 @@ define <4 x i32> @f32to4ui(<4 x float> %
 
 define <8 x i32> @f64to8ui(<8 x double> %a) nounwind {
 ; GENERIC-LABEL: f64to8ui:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttpd2udq %zmm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64to8ui:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttpd2udq %zmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fptoui <8 x double> %a to <8 x i32>
@@ -1653,14 +1653,14 @@ define <8 x i32> @f64to8ui(<8 x double>
 
 define <8 x i16> @f64to8us(<8 x double> %f) {
 ; GENERIC-LABEL: f64to8us:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttpd2dq %zmm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovdw %ymm0, %xmm0
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64to8us:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttpd2dq %zmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    vpmovdw %ymm0, %xmm0 # sched: [4:2.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
@@ -1671,14 +1671,14 @@ define <8 x i16> @f64to8us(<8 x double>
 
 define <8 x i8> @f64to8uc(<8 x double> %f) {
 ; GENERIC-LABEL: f64to8uc:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttpd2dq %zmm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovdw %ymm0, %xmm0
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64to8uc:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttpd2dq %zmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    vpmovdw %ymm0, %xmm0 # sched: [4:2.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
@@ -1689,13 +1689,13 @@ define <8 x i8> @f64to8uc(<8 x double> %
 
 define <4 x i32> @f64to4ui(<4 x double> %a) nounwind {
 ; GENERIC-LABEL: f64to4ui:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttpd2udq %ymm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64to4ui:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttpd2udq %ymm0, %xmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1705,12 +1705,12 @@ define <4 x i32> @f64to4ui(<4 x double>
 
 define <8 x double> @sito8f64(<8 x i32> %a) {
 ; GENERIC-LABEL: sito8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sito8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = sitofp <8 x i32> %a to <8 x double>
@@ -1718,18 +1718,18 @@ define <8 x double> @sito8f64(<8 x i32>
 }
 define <8 x double> @i32to8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
 ; GENERIC-LABEL: i32to8f64_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vcvtdq2pd %ymm1, %zmm0 {%k1} # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: i32to8f64_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vcvtdq2pd %ymm1, %zmm0 {%k1} # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ; VLNOBW-LABEL: i32to8f64_mask:
-; VLNOBW:       # BB#0:
+; VLNOBW:       # %bb.0:
 ; VLNOBW-NEXT:    kmovw %edi, %k1
 ; VLNOBW-NEXT:    vcvtdq2pd %ymm1, %zmm0 {%k1}
 ; VLNOBW-NEXT:    ret{{[l|q]}}
@@ -1740,18 +1740,18 @@ define <8 x double> @i32to8f64_mask(<8 x
 }
 define <8 x double> @sito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
 ; GENERIC-LABEL: sito8f64_maskz:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vcvtdq2pd %ymm0, %zmm0 {%k1} {z} # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sito8f64_maskz:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm0 {%k1} {z} # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ; VLNOBW-LABEL: sito8f64_maskz:
-; VLNOBW:       # BB#0:
+; VLNOBW:       # %bb.0:
 ; VLNOBW-NEXT:    kmovw %edi, %k1
 ; VLNOBW-NEXT:    vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
 ; VLNOBW-NEXT:    ret{{[l|q]}}
@@ -1763,12 +1763,12 @@ define <8 x double> @sito8f64_maskz(<8 x
 
 define <8 x i32> @f64to8si(<8 x double> %a) {
 ; GENERIC-LABEL: f64to8si:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttpd2dq %zmm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64to8si:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttpd2dq %zmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fptosi <8 x double> %a to <8 x i32>
@@ -1777,13 +1777,13 @@ define <8 x i32> @f64to8si(<8 x double>
 
 define <4 x i32> @f64to4si(<4 x double> %a) {
 ; GENERIC-LABEL: f64to4si:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttpd2dq %ymm0, %xmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64to4si:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttpd2dq %ymm0, %xmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1793,14 +1793,14 @@ define <4 x i32> @f64to4si(<4 x double>
 
 define <16 x float> @f64to16f32(<16 x double> %b) nounwind {
 ; GENERIC-LABEL: f64to16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtpd2ps %zmm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vcvtpd2ps %zmm1, %ymm1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64to16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtpd2ps %zmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    vcvtpd2ps %zmm1, %ymm1 # sched: [7:1.00]
 ; SKX-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [3:1.00]
@@ -1811,13 +1811,13 @@ define <16 x float> @f64to16f32(<16 x do
 
 define <4 x float> @f64to4f32(<4 x double> %b) {
 ; GENERIC-LABEL: f64to4f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtpd2ps %ymm0, %xmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64to4f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtpd2ps %ymm0, %xmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1827,7 +1827,7 @@ define <4 x float> @f64to4f32(<4 x doubl
 
 define <4 x float> @f64to4f32_mask(<4 x double> %b, <4 x i1> %mask) {
 ; GENERIC-LABEL: f64to4f32_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm1, %xmm1, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcvtpd2ps %ymm0, %xmm0 {%k1} {z} # sched: [3:1.00]
@@ -1835,7 +1835,7 @@ define <4 x float> @f64to4f32_mask(<4 x
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64to4f32_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm1, %xmm1 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm1, %xmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vcvtpd2ps %ymm0, %xmm0 {%k1} {z} # sched: [7:1.00]
@@ -1848,12 +1848,12 @@ define <4 x float> @f64to4f32_mask(<4 x
 
 define <4 x float> @f64tof32_inreg(<2 x double> %a0, <4 x float> %a1) nounwind {
 ; GENERIC-LABEL: f64tof32_inreg:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtsd2ss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64tof32_inreg:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtsd2ss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %ext = extractelement <2 x double> %a0, i32 0
@@ -1864,12 +1864,12 @@ define <4 x float> @f64tof32_inreg(<2 x
 
 define <8 x double> @f32to8f64(<8 x float> %b) nounwind {
 ; GENERIC-LABEL: f32to8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtps2pd %ymm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f32to8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtps2pd %ymm0, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a = fpext <8 x float> %b to <8 x double>
@@ -1878,13 +1878,13 @@ define <8 x double> @f32to8f64(<8 x floa
 
 define <4 x double> @f32to4f64_mask(<4 x float> %b, <4 x double> %b1, <4 x double> %a1) {
 ; GENERIC-LABEL: f32to4f64_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcmpltpd %ymm2, %ymm1, %k1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vcvtps2pd %xmm0, %ymm0 {%k1} {z} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f32to4f64_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcmpltpd %ymm2, %ymm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vcvtps2pd %xmm0, %ymm0 {%k1} {z} # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1896,12 +1896,12 @@ define <4 x double> @f32to4f64_mask(<4 x
 
 define <2 x double> @f32tof64_inreg(<2 x double> %a0, <4 x float> %a1) nounwind {
 ; GENERIC-LABEL: f32tof64_inreg:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f32tof64_inreg:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %ext = extractelement <4 x float> %a1, i32 0
@@ -1912,12 +1912,12 @@ define <2 x double> @f32tof64_inreg(<2 x
 
 define double @sltof64_load(i64* nocapture %e) {
 ; GENERIC-LABEL: sltof64_load:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vcvtsi2sdq (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sltof64_load:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vcvtsi2sdq (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -1928,12 +1928,12 @@ entry:
 
 define double @sitof64_load(i32* %e) {
 ; GENERIC-LABEL: sitof64_load:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vcvtsi2sdl (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sitof64_load:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vcvtsi2sdl (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -1944,12 +1944,12 @@ entry:
 
 define float @sitof32_load(i32* %e) {
 ; GENERIC-LABEL: sitof32_load:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vcvtsi2ssl (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sitof32_load:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vcvtsi2ssl (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -1960,12 +1960,12 @@ entry:
 
 define float @sltof32_load(i64* %e) {
 ; GENERIC-LABEL: sltof32_load:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vcvtsi2ssq (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sltof32_load:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vcvtsi2ssq (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -1976,14 +1976,14 @@ entry:
 
 define void @f32tof64_loadstore() {
 ; GENERIC-LABEL: f32tof64_loadstore:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
 ; GENERIC-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vmovsd %xmm0, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f32tof64_loadstore:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
 ; SKX-NEXT:    vmovsd %xmm0, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
@@ -1999,14 +1999,14 @@ entry:
 
 define void @f64tof32_loadstore() nounwind uwtable {
 ; GENERIC-LABEL: f64tof32_loadstore:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [6:0.50]
 ; GENERIC-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vmovss %xmm0, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: f64tof32_loadstore:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [5:0.50]
 ; SKX-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
 ; SKX-NEXT:    vmovss %xmm0, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
@@ -2022,12 +2022,12 @@ entry:
 
 define double @long_to_double(i64 %x) {
 ; GENERIC-LABEL: long_to_double:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovq %rdi, %xmm0 # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: long_to_double:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovq %rdi, %xmm0 # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %res = bitcast i64 %x to double
@@ -2036,12 +2036,12 @@ define double @long_to_double(i64 %x) {
 
 define i64 @double_to_long(double %x) {
 ; GENERIC-LABEL: double_to_long:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovq %xmm0, %rax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: double_to_long:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovq %xmm0, %rax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %res = bitcast double %x to i64
@@ -2050,12 +2050,12 @@ define i64 @double_to_long(double %x) {
 
 define float @int_to_float(i32 %x) {
 ; GENERIC-LABEL: int_to_float:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovd %edi, %xmm0 # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: int_to_float:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovd %edi, %xmm0 # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %res = bitcast i32 %x to float
@@ -2064,12 +2064,12 @@ define float @int_to_float(i32 %x) {
 
 define i32 @float_to_int(float %x) {
 ; GENERIC-LABEL: float_to_int:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovd %xmm0, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: float_to_int:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovd %xmm0, %eax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %res = bitcast float %x to i32
@@ -2078,7 +2078,7 @@ define i32 @float_to_int(float %x) {
 
 define <16 x double> @uito16f64(<16 x i32> %a) nounwind {
 ; GENERIC-LABEL: uito16f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtudq2pd %ymm0, %zmm2 # sched: [4:1.00]
 ; GENERIC-NEXT:    vextractf64x4 $1, %zmm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcvtudq2pd %ymm0, %zmm1 # sched: [4:1.00]
@@ -2086,7 +2086,7 @@ define <16 x double> @uito16f64(<16 x i3
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uito16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtudq2pd %ymm0, %zmm2 # sched: [7:1.00]
 ; SKX-NEXT:    vextractf64x4 $1, %zmm0, %ymm0 # sched: [3:1.00]
 ; SKX-NEXT:    vcvtudq2pd %ymm0, %zmm1 # sched: [7:1.00]
@@ -2098,12 +2098,12 @@ define <16 x double> @uito16f64(<16 x i3
 
 define <8 x float> @slto8f32(<8 x i64> %a) {
 ; GENERIC-LABEL: slto8f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtqq2ps %zmm0, %ymm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: slto8f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtqq2ps %zmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = sitofp <8 x i64> %a to <8 x float>
@@ -2112,14 +2112,14 @@ define <8 x float> @slto8f32(<8 x i64> %
 
 define <16 x float> @slto16f32(<16 x i64> %a) {
 ; GENERIC-LABEL: slto16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtqq2ps %zmm0, %ymm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vcvtqq2ps %zmm1, %ymm1 # sched: [4:1.00]
 ; GENERIC-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: slto16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtqq2ps %zmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    vcvtqq2ps %zmm1, %ymm1 # sched: [7:1.00]
 ; SKX-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [3:1.00]
@@ -2130,12 +2130,12 @@ define <16 x float> @slto16f32(<16 x i64
 
 define <8 x double> @slto8f64(<8 x i64> %a) {
 ; GENERIC-LABEL: slto8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtqq2pd %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: slto8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtqq2pd %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = sitofp <8 x i64> %a to <8 x double>
@@ -2144,13 +2144,13 @@ define <8 x double> @slto8f64(<8 x i64>
 
 define <16 x double> @slto16f64(<16 x i64> %a) {
 ; GENERIC-LABEL: slto16f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtqq2pd %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vcvtqq2pd %zmm1, %zmm1 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: slto16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtqq2pd %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vcvtqq2pd %zmm1, %zmm1 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2160,12 +2160,12 @@ define <16 x double> @slto16f64(<16 x i6
 
 define <8 x float> @ulto8f32(<8 x i64> %a) {
 ; GENERIC-LABEL: ulto8f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtuqq2ps %zmm0, %ymm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ulto8f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtuqq2ps %zmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = uitofp <8 x i64> %a to <8 x float>
@@ -2174,14 +2174,14 @@ define <8 x float> @ulto8f32(<8 x i64> %
 
 define <16 x float> @ulto16f32(<16 x i64> %a) {
 ; GENERIC-LABEL: ulto16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtuqq2ps %zmm0, %ymm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vcvtuqq2ps %zmm1, %ymm1 # sched: [4:1.00]
 ; GENERIC-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ulto16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtuqq2ps %zmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    vcvtuqq2ps %zmm1, %ymm1 # sched: [7:1.00]
 ; SKX-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 # sched: [3:1.00]
@@ -2192,18 +2192,18 @@ define <16 x float> @ulto16f32(<16 x i64
 
 define <8 x double> @uito8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
 ; GENERIC-LABEL: uito8f64_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vcvtudq2pd %ymm1, %zmm0 {%k1} # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uito8f64_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vcvtudq2pd %ymm1, %zmm0 {%k1} # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ; VLNOBW-LABEL: uito8f64_mask:
-; VLNOBW:       # BB#0:
+; VLNOBW:       # %bb.0:
 ; VLNOBW-NEXT:    kmovw %edi, %k1
 ; VLNOBW-NEXT:    vcvtudq2pd %ymm1, %zmm0 {%k1}
 ; VLNOBW-NEXT:    ret{{[l|q]}}
@@ -2214,13 +2214,13 @@ define <8 x double> @uito8f64_mask(<8 x
 }
 define <8 x double> @uito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
 ; GENERIC-LABEL: uito8f64_maskz:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vcvtudq2pd %ymm0, %zmm0 {%k1} {z} # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uito8f64_maskz:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vcvtudq2pd %ymm0, %zmm0 {%k1} {z} # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2232,12 +2232,12 @@ define <8 x double> @uito8f64_maskz(<8 x
 
 define <4 x double> @uito4f64(<4 x i32> %a) nounwind {
 ; GENERIC-LABEL: uito4f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtudq2pd %xmm0, %ymm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uito4f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtudq2pd %xmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = uitofp <4 x i32> %a to <4 x double>
@@ -2246,12 +2246,12 @@ define <4 x double> @uito4f64(<4 x i32>
 
 define <16 x float> @uito16f32(<16 x i32> %a) nounwind {
 ; GENERIC-LABEL: uito16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtudq2ps %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uito16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtudq2ps %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = uitofp <16 x i32> %a to <16 x float>
@@ -2260,12 +2260,12 @@ define <16 x float> @uito16f32(<16 x i32
 
 define <8 x double> @uito8f64(<8 x i32> %a) {
 ; GENERIC-LABEL: uito8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtudq2pd %ymm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uito8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtudq2pd %ymm0, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = uitofp <8 x i32> %a to <8 x double>
@@ -2274,12 +2274,12 @@ define <8 x double> @uito8f64(<8 x i32>
 
 define <8 x float> @uito8f32(<8 x i32> %a) nounwind {
 ; GENERIC-LABEL: uito8f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtudq2ps %ymm0, %ymm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uito8f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtudq2ps %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = uitofp <8 x i32> %a to <8 x float>
@@ -2288,12 +2288,12 @@ define <8 x float> @uito8f32(<8 x i32> %
 
 define <4 x float> @uito4f32(<4 x i32> %a) nounwind {
 ; GENERIC-LABEL: uito4f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtudq2ps %xmm0, %xmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uito4f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtudq2ps %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = uitofp <4 x i32> %a to <4 x float>
@@ -2302,12 +2302,12 @@ define <4 x float> @uito4f32(<4 x i32> %
 
 define i32 @fptosi(float %a) nounwind {
 ; GENERIC-LABEL: fptosi:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttss2si %xmm0, %eax # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: fptosi:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttss2si %xmm0, %eax # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fptosi float %a to i32
@@ -2316,12 +2316,12 @@ define i32 @fptosi(float %a) nounwind {
 
 define i32 @fptoui(float %a) nounwind {
 ; GENERIC-LABEL: fptoui:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttss2usi %xmm0, %eax
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: fptoui:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttss2usi %xmm0, %eax
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fptoui float %a to i32
@@ -2330,12 +2330,12 @@ define i32 @fptoui(float %a) nounwind {
 
 define float @uitof32(i32 %a) nounwind {
 ; GENERIC-LABEL: uitof32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uitof32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = uitofp i32 %a to float
@@ -2344,12 +2344,12 @@ define float @uitof32(i32 %a) nounwind {
 
 define double @uitof64(i32 %a) nounwind {
 ; GENERIC-LABEL: uitof64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtusi2sdl %edi, %xmm0, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uitof64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtusi2sdl %edi, %xmm0, %xmm0 # sched: [5:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = uitofp i32 %a to double
@@ -2358,7 +2358,7 @@ define double @uitof64(i32 %a) nounwind
 
 define <16 x float> @sbto16f32(<16 x i32> %a) {
 ; GENERIC-LABEL: sbto16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %zmm0, %zmm1, %k0
 ; GENERIC-NEXT:    vpmovm2d %k0, %zmm0
@@ -2366,7 +2366,7 @@ define <16 x float> @sbto16f32(<16 x i32
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sbto16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %zmm0, %zmm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %zmm0
@@ -2379,13 +2379,13 @@ define <16 x float> @sbto16f32(<16 x i32
 
 define <16 x float> @scto16f32(<16 x i8> %a) {
 ; GENERIC-LABEL: scto16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; GENERIC-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: scto16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbd %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2395,13 +2395,13 @@ define <16 x float> @scto16f32(<16 x i8>
 
 define <16 x float> @ssto16f32(<16 x i16> %a) {
 ; GENERIC-LABEL: ssto16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; GENERIC-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ssto16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxwd %ymm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2411,13 +2411,13 @@ define <16 x float> @ssto16f32(<16 x i16
 
 define <8 x double> @ssto16f64(<8 x i16> %a) {
 ; GENERIC-LABEL: ssto16f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxwd %xmm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ssto16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2427,7 +2427,7 @@ define <8 x double> @ssto16f64(<8 x i16>
 
 define <8 x double> @scto8f64(<8 x i8> %a) {
 ; GENERIC-LABEL: scto8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
 ; GENERIC-NEXT:    vpslld $24, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpsrad $24, %ymm0, %ymm0 # sched: [1:1.00]
@@ -2435,7 +2435,7 @@ define <8 x double> @scto8f64(<8 x i8> %
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: scto8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
 ; SKX-NEXT:    vpslld $24, %ymm0, %ymm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpsrad $24, %ymm0, %ymm0 # sched: [1:0.50]
@@ -2447,7 +2447,7 @@ define <8 x double> @scto8f64(<8 x i8> %
 
 define <16 x double> @scto16f64(<16 x i8> %a) {
 ; GENERIC-LABEL: scto16f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxbd %xmm0, %zmm1
 ; GENERIC-NEXT:    vcvtdq2pd %ymm1, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vextracti64x4 $1, %zmm1, %ymm1 # sched: [1:1.00]
@@ -2455,7 +2455,7 @@ define <16 x double> @scto16f64(<16 x i8
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: scto16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbd %xmm0, %zmm1 # sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2pd %ymm1, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vextracti64x4 $1, %zmm1, %ymm1 # sched: [3:1.00]
@@ -2467,7 +2467,7 @@ define <16 x double> @scto16f64(<16 x i8
 
 define <16 x double> @sbto16f64(<16 x double> %a) {
 ; GENERIC-LABEL: sbto16f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpltpd %zmm1, %zmm2, %k0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vcmpltpd %zmm0, %zmm2, %k1 # sched: [3:1.00]
@@ -2478,7 +2478,7 @@ define <16 x double> @sbto16f64(<16 x do
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sbto16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpltpd %zmm1, %zmm2, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vcmpltpd %zmm0, %zmm2, %k1 # sched: [3:1.00]
@@ -2494,7 +2494,7 @@ define <16 x double> @sbto16f64(<16 x do
 
 define <8 x double> @sbto8f64(<8 x double> %a) {
 ; GENERIC-LABEL: sbto8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpltpd %zmm0, %zmm1, %k0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovm2d %k0, %ymm0
@@ -2502,7 +2502,7 @@ define <8 x double> @sbto8f64(<8 x doubl
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sbto8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpltpd %zmm0, %zmm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %ymm0
@@ -2515,7 +2515,7 @@ define <8 x double> @sbto8f64(<8 x doubl
 
 define <8 x float> @sbto8f32(<8 x float> %a) {
 ; GENERIC-LABEL: sbto8f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpltps %ymm0, %ymm1, %k0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovm2d %k0, %ymm0
@@ -2523,7 +2523,7 @@ define <8 x float> @sbto8f32(<8 x float>
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sbto8f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpltps %ymm0, %ymm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %ymm0
@@ -2536,7 +2536,7 @@ define <8 x float> @sbto8f32(<8 x float>
 
 define <4 x float> @sbto4f32(<4 x float> %a) {
 ; GENERIC-LABEL: sbto4f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpltps %xmm0, %xmm1, %k0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovm2d %k0, %xmm0
@@ -2544,7 +2544,7 @@ define <4 x float> @sbto4f32(<4 x float>
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sbto4f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpltps %xmm0, %xmm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %xmm0
@@ -2557,7 +2557,7 @@ define <4 x float> @sbto4f32(<4 x float>
 
 define <4 x double> @sbto4f64(<4 x double> %a) {
 ; GENERIC-LABEL: sbto4f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpltpd %ymm0, %ymm1, %k0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovm2d %k0, %xmm0
@@ -2565,7 +2565,7 @@ define <4 x double> @sbto4f64(<4 x doubl
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sbto4f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpltpd %ymm0, %ymm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %xmm0
@@ -2578,7 +2578,7 @@ define <4 x double> @sbto4f64(<4 x doubl
 
 define <2 x float> @sbto2f32(<2 x float> %a) {
 ; GENERIC-LABEL: sbto2f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpltps %xmm0, %xmm1, %k0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovm2d %k0, %xmm0
@@ -2586,7 +2586,7 @@ define <2 x float> @sbto2f32(<2 x float>
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sbto2f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpltps %xmm0, %xmm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %xmm0
@@ -2599,7 +2599,7 @@ define <2 x float> @sbto2f32(<2 x float>
 
 define <2 x double> @sbto2f64(<2 x double> %a) {
 ; GENERIC-LABEL: sbto2f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpltpd %xmm0, %xmm1, %k0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovm2q %k0, %xmm0
@@ -2607,7 +2607,7 @@ define <2 x double> @sbto2f64(<2 x doubl
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sbto2f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpltpd %xmm0, %xmm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2q %k0, %xmm0
@@ -2620,13 +2620,13 @@ define <2 x double> @sbto2f64(<2 x doubl
 
 define <16 x float> @ucto16f32(<16 x i8> %a) {
 ; GENERIC-LABEL: ucto16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
 ; GENERIC-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ucto16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2636,14 +2636,14 @@ define <16 x float> @ucto16f32(<16 x i8>
 
 define <8 x double> @ucto8f64(<8 x i8> %a) {
 ; GENERIC-LABEL: ucto8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
 ; GENERIC-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ucto8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0 # sched: [7:0.50]
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
@@ -2654,13 +2654,13 @@ define <8 x double> @ucto8f64(<8 x i8> %
 
 define <16 x float> @swto16f32(<16 x i16> %a) {
 ; GENERIC-LABEL: swto16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxwd %ymm0, %zmm0
 ; GENERIC-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: swto16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxwd %ymm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2670,13 +2670,13 @@ define <16 x float> @swto16f32(<16 x i16
 
 define <8 x double> @swto8f64(<8 x i16> %a) {
 ; GENERIC-LABEL: swto8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxwd %xmm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: swto8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2686,7 +2686,7 @@ define <8 x double> @swto8f64(<8 x i16>
 
 define <16 x double> @swto16f64(<16 x i16> %a) {
 ; GENERIC-LABEL: swto16f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxwd %ymm0, %zmm1
 ; GENERIC-NEXT:    vcvtdq2pd %ymm1, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vextracti64x4 $1, %zmm1, %ymm1 # sched: [1:1.00]
@@ -2694,7 +2694,7 @@ define <16 x double> @swto16f64(<16 x i1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: swto16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxwd %ymm0, %zmm1 # sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2pd %ymm1, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vextracti64x4 $1, %zmm1, %ymm1 # sched: [3:1.00]
@@ -2706,7 +2706,7 @@ define <16 x double> @swto16f64(<16 x i1
 
 define <16 x double> @ucto16f64(<16 x i8> %a) {
 ; GENERIC-LABEL: ucto16f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
 ; GENERIC-NEXT:    vcvtdq2pd %ymm1, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vextracti64x4 $1, %zmm1, %ymm1 # sched: [1:1.00]
@@ -2714,7 +2714,7 @@ define <16 x double> @ucto16f64(<16 x i8
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ucto16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2pd %ymm1, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vextracti64x4 $1, %zmm1, %ymm1 # sched: [3:1.00]
@@ -2726,13 +2726,13 @@ define <16 x double> @ucto16f64(<16 x i8
 
 define <16 x float> @uwto16f32(<16 x i16> %a) {
 ; GENERIC-LABEL: uwto16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; GENERIC-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uwto16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2742,13 +2742,13 @@ define <16 x float> @uwto16f32(<16 x i16
 
 define <8 x double> @uwto8f64(<8 x i16> %a) {
 ; GENERIC-LABEL: uwto8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
 ; GENERIC-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uwto8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2758,7 +2758,7 @@ define <8 x double> @uwto8f64(<8 x i16>
 
 define <16 x double> @uwto16f64(<16 x i16> %a) {
 ; GENERIC-LABEL: uwto16f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; GENERIC-NEXT:    vcvtdq2pd %ymm1, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vextracti64x4 $1, %zmm1, %ymm1 # sched: [1:1.00]
@@ -2766,7 +2766,7 @@ define <16 x double> @uwto16f64(<16 x i1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: uwto16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2pd %ymm1, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vextracti64x4 $1, %zmm1, %ymm1 # sched: [3:1.00]
@@ -2778,12 +2778,12 @@ define <16 x double> @uwto16f64(<16 x i1
 
 define <16 x float> @sito16f32(<16 x i32> %a) {
 ; GENERIC-LABEL: sito16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sito16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = sitofp <16 x i32> %a to <16 x float>
@@ -2792,7 +2792,7 @@ define <16 x float> @sito16f32(<16 x i32
 
 define <16 x double> @sito16f64(<16 x i32> %a) {
 ; GENERIC-LABEL: sito16f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtdq2pd %ymm0, %zmm2 # sched: [4:1.00]
 ; GENERIC-NEXT:    vextractf64x4 $1, %zmm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcvtdq2pd %ymm0, %zmm1 # sched: [4:1.00]
@@ -2800,7 +2800,7 @@ define <16 x double> @sito16f64(<16 x i3
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sito16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm2 # sched: [7:1.00]
 ; SKX-NEXT:    vextractf64x4 $1, %zmm0, %ymm0 # sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm1 # sched: [7:1.00]
@@ -2812,13 +2812,13 @@ define <16 x double> @sito16f64(<16 x i3
 
 define <16 x float> @usto16f32(<16 x i16> %a) {
 ; GENERIC-LABEL: usto16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; GENERIC-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: usto16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero sched: [3:1.00]
 ; SKX-NEXT:    vcvtdq2ps %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2828,7 +2828,7 @@ define <16 x float> @usto16f32(<16 x i16
 
 define <16 x float> @ubto16f32(<16 x i32> %a) {
 ; GENERIC-LABEL: ubto16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -2836,7 +2836,7 @@ define <16 x float> @ubto16f32(<16 x i32
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -2849,7 +2849,7 @@ define <16 x float> @ubto16f32(<16 x i32
 
 define <16 x double> @ubto16f64(<16 x i32> %a) {
 ; GENERIC-LABEL: ubto16f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
 ; GENERIC-NEXT:    movl {{.*}}(%rip), %eax # sched: [5:0.50]
@@ -2861,7 +2861,7 @@ define <16 x double> @ubto16f64(<16 x i3
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    movl {{.*}}(%rip), %eax # sched: [5:0.50]
@@ -2878,7 +2878,7 @@ define <16 x double> @ubto16f64(<16 x i3
 
 define <8 x float> @ubto8f32(<8 x i32> %a) {
 ; GENERIC-LABEL: ubto8f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
@@ -2886,7 +2886,7 @@ define <8 x float> @ubto8f32(<8 x i32> %
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto8f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [8:0.50]
@@ -2899,7 +2899,7 @@ define <8 x float> @ubto8f32(<8 x i32> %
 
 define <8 x double> @ubto8f64(<8 x i32> %a) {
 ; GENERIC-LABEL: ubto8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
@@ -2907,7 +2907,7 @@ define <8 x double> @ubto8f64(<8 x i32>
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [8:0.50]
@@ -2920,7 +2920,7 @@ define <8 x double> @ubto8f64(<8 x i32>
 
 define <4 x float> @ubto4f32(<4 x i32> %a) {
 ; GENERIC-LABEL: ubto4f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
@@ -2928,7 +2928,7 @@ define <4 x float> @ubto4f32(<4 x i32> %
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto4f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [7:0.50]
@@ -2941,7 +2941,7 @@ define <4 x float> @ubto4f32(<4 x i32> %
 
 define <4 x double> @ubto4f64(<4 x i32> %a) {
 ; GENERIC-LABEL: ubto4f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
@@ -2949,7 +2949,7 @@ define <4 x double> @ubto4f64(<4 x i32>
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto4f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [7:0.50]
@@ -2962,7 +2962,7 @@ define <4 x double> @ubto4f64(<4 x i32>
 
 define <2 x float> @ubto2f32(<2 x i32> %a) {
 ; GENERIC-LABEL: ubto2f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.50]
 ; GENERIC-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1
@@ -2971,7 +2971,7 @@ define <2 x float> @ubto2f32(<2 x i32> %
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto2f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.33]
 ; SKX-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1 # sched: [3:1.00]
@@ -2985,7 +2985,7 @@ define <2 x float> @ubto2f32(<2 x i32> %
 
 define <2 x double> @ubto2f64(<2 x i32> %a) {
 ; GENERIC-LABEL: ubto2f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.50]
 ; GENERIC-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1
@@ -2994,7 +2994,7 @@ define <2 x double> @ubto2f64(<2 x i32>
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ubto2f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.33]
 ; SKX-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1 # sched: [3:1.00]
@@ -3008,14 +3008,14 @@ define <2 x double> @ubto2f64(<2 x i32>
 
 define <8 x i16> @zext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_8x8mem_to_8x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovzxbw {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x8mem_to_8x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxbw {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [9:1.00]
@@ -3028,14 +3028,14 @@ define <8 x i16> @zext_8x8mem_to_8x16(<8
 
 define <8 x i16> @sext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_8x8mem_to_8x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovsxbw (%rdi), %xmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8x8mem_to_8x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxbw (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -3049,14 +3049,14 @@ define <8 x i16> @sext_8x8mem_to_8x16(<8
 
 define <16 x i16> @zext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_16x8mem_to_16x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16x8mem_to_16x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [10:1.00]
@@ -3069,14 +3069,14 @@ define <16 x i16> @zext_16x8mem_to_16x16
 
 define <16 x i16> @sext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_16x8mem_to_16x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovsxbw (%rdi), %ymm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_16x8mem_to_16x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxbw (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -3089,12 +3089,12 @@ define <16 x i16> @sext_16x8mem_to_16x16
 
 define <16 x i16> @zext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
 ; GENERIC-LABEL: zext_16x8_to_16x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16x8_to_16x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x   = zext <16 x i8> %a to <16 x i16>
@@ -3103,14 +3103,14 @@ define <16 x i16> @zext_16x8_to_16x16(<1
 
 define <16 x i16> @zext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_16x8_to_16x16_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm1, %k1
 ; GENERIC-NEXT:    vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16x8_to_16x16_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
@@ -3122,12 +3122,12 @@ define <16 x i16> @zext_16x8_to_16x16_ma
 
 define <16 x i16> @sext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
 ; GENERIC-LABEL: sext_16x8_to_16x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxbw %xmm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_16x8_to_16x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x   = sext <16 x i8> %a to <16 x i16>
@@ -3136,14 +3136,14 @@ define <16 x i16> @sext_16x8_to_16x16(<1
 
 define <16 x i16> @sext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_16x8_to_16x16_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm1, %k1
 ; GENERIC-NEXT:    vpmovsxbw %xmm0, %ymm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_16x8_to_16x16_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxbw %xmm0, %ymm0 {%k1} {z} # sched: [3:1.00]
@@ -3155,14 +3155,14 @@ define <16 x i16> @sext_16x8_to_16x16_ma
 
 define <32 x i16> @zext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_32x8mem_to_32x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %ymm0, %k1
 ; GENERIC-NEXT:    vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero,mem[16],zero,mem[17],zero,mem[18],zero,mem[19],zero,mem[20],zero,mem[21],zero,mem[22],zero,mem[23],zero,mem[24],zero,mem[25],zero,mem[26],zero,mem[27],zero,mem[28],zero,mem[29],zero,mem[30],zero,mem[31],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_32x8mem_to_32x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %ymm0, %ymm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %ymm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero,mem[16],zero,mem[17],zero,mem[18],zero,mem[19],zero,mem[20],zero,mem[21],zero,mem[22],zero,mem[23],zero,mem[24],zero,mem[25],zero,mem[26],zero,mem[27],zero,mem[28],zero,mem[29],zero,mem[30],zero,mem[31],zero sched: [10:1.00]
@@ -3175,14 +3175,14 @@ define <32 x i16> @zext_32x8mem_to_32x16
 
 define <32 x i16> @sext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_32x8mem_to_32x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %ymm0, %k1
 ; GENERIC-NEXT:    vpmovsxbw (%rdi), %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_32x8mem_to_32x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %ymm0, %ymm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %ymm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxbw (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -3195,12 +3195,12 @@ define <32 x i16> @sext_32x8mem_to_32x16
 
 define <32 x i16> @zext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
 ; GENERIC-LABEL: zext_32x8_to_32x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_32x8_to_32x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x   = zext <32 x i8> %a to <32 x i16>
@@ -3209,14 +3209,14 @@ define <32 x i16> @zext_32x8_to_32x16(<3
 
 define <32 x i16> @zext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_32x8_to_32x16_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %ymm1, %ymm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %ymm1, %k1
 ; GENERIC-NEXT:    vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_32x8_to_32x16_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %ymm1, %ymm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %ymm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero sched: [3:1.00]
@@ -3228,12 +3228,12 @@ define <32 x i16> @zext_32x8_to_32x16_ma
 
 define <32 x i16> @sext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
 ; GENERIC-LABEL: sext_32x8_to_32x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxbw %ymm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_32x8_to_32x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbw %ymm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x   = sext <32 x i8> %a to <32 x i16>
@@ -3242,14 +3242,14 @@ define <32 x i16> @sext_32x8_to_32x16(<3
 
 define <32 x i16> @sext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_32x8_to_32x16_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %ymm1, %ymm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %ymm1, %k1
 ; GENERIC-NEXT:    vpmovsxbw %ymm0, %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_32x8_to_32x16_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %ymm1, %ymm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %ymm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxbw %ymm0, %zmm0 {%k1} {z} # sched: [3:1.00]
@@ -3261,14 +3261,14 @@ define <32 x i16> @sext_32x8_to_32x16_ma
 
 define <4 x i32> @zext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_4x8mem_to_4x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovzxbd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_4x8mem_to_4x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovzxbd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [9:1.00]
@@ -3281,14 +3281,14 @@ define <4 x i32> @zext_4x8mem_to_4x32(<4
 
 define <4 x i32> @sext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_4x8mem_to_4x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovsxbd (%rdi), %xmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_4x8mem_to_4x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovsxbd (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -3301,14 +3301,14 @@ define <4 x i32> @sext_4x8mem_to_4x32(<4
 
 define <8 x i32> @zext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_8x8mem_to_8x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovzxbd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x8mem_to_8x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxbd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [10:1.00]
@@ -3321,14 +3321,14 @@ define <8 x i32> @zext_8x8mem_to_8x32(<8
 
 define <8 x i32> @sext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_8x8mem_to_8x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovsxbd (%rdi), %ymm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8x8mem_to_8x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxbd (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -3341,14 +3341,14 @@ define <8 x i32> @sext_8x8mem_to_8x32(<8
 
 define <16 x i32> @zext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_16x8mem_to_16x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16x8mem_to_16x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero sched: [10:1.00]
@@ -3361,14 +3361,14 @@ define <16 x i32> @zext_16x8mem_to_16x32
 
 define <16 x i32> @sext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_16x8mem_to_16x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovsxbd (%rdi), %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_16x8mem_to_16x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxbd (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -3381,14 +3381,14 @@ define <16 x i32> @sext_16x8mem_to_16x32
 
 define <16 x i32> @zext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_16x8_to_16x32_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm1, %k1
 ; GENERIC-NEXT:    vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16x8_to_16x32_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero sched: [3:1.00]
@@ -3400,14 +3400,14 @@ define <16 x i32> @zext_16x8_to_16x32_ma
 
 define <16 x i32> @sext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_16x8_to_16x32_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm1, %k1
 ; GENERIC-NEXT:    vpmovsxbd %xmm0, %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_16x8_to_16x32_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxbd %xmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
@@ -3419,12 +3419,12 @@ define <16 x i32> @sext_16x8_to_16x32_ma
 
 define <16 x i32> @zext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
 ; GENERIC-LABEL: zext_16x8_to_16x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16x8_to_16x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x = zext <16 x i8> %i to <16 x i32>
@@ -3433,12 +3433,12 @@ define <16 x i32> @zext_16x8_to_16x32(<1
 
 define <16 x i32> @sext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
 ; GENERIC-LABEL: sext_16x8_to_16x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_16x8_to_16x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbd %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x = sext <16 x i8> %i to <16 x i32>
@@ -3447,14 +3447,14 @@ define <16 x i32> @sext_16x8_to_16x32(<1
 
 define <2 x i64> @zext_2x8mem_to_2x64(<2 x i8> *%i , <2 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_2x8mem_to_2x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovzxbq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_2x8mem_to_2x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovzxbq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [9:1.00]
@@ -3466,14 +3466,14 @@ define <2 x i64> @zext_2x8mem_to_2x64(<2
 }
 define <2 x i64> @sext_2x8mem_to_2x64mask(<2 x i8> *%i , <2 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_2x8mem_to_2x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovsxbq (%rdi), %xmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_2x8mem_to_2x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovsxbq (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -3485,12 +3485,12 @@ define <2 x i64> @sext_2x8mem_to_2x64mas
 }
 define <2 x i64> @sext_2x8mem_to_2x64(<2 x i8> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_2x8mem_to_2x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxbq (%rdi), %xmm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_2x8mem_to_2x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbq (%rdi), %xmm0 # sched: [6:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <2 x i8>,<2 x i8> *%i,align 1
@@ -3500,14 +3500,14 @@ define <2 x i64> @sext_2x8mem_to_2x64(<2
 
 define <4 x i64> @zext_4x8mem_to_4x64(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_4x8mem_to_4x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovzxbq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_4x8mem_to_4x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovzxbq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [10:1.00]
@@ -3520,14 +3520,14 @@ define <4 x i64> @zext_4x8mem_to_4x64(<4
 
 define <4 x i64> @sext_4x8mem_to_4x64mask(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_4x8mem_to_4x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovsxbq (%rdi), %ymm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_4x8mem_to_4x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovsxbq (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -3540,12 +3540,12 @@ define <4 x i64> @sext_4x8mem_to_4x64mas
 
 define <4 x i64> @sext_4x8mem_to_4x64(<4 x i8> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_4x8mem_to_4x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxbq (%rdi), %ymm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_4x8mem_to_4x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbq (%rdi), %ymm0 # sched: [8:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <4 x i8>,<4 x i8> *%i,align 1
@@ -3555,14 +3555,14 @@ define <4 x i64> @sext_4x8mem_to_4x64(<4
 
 define <8 x i64> @zext_8x8mem_to_8x64(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_8x8mem_to_8x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovzxbq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x8mem_to_8x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxbq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero sched: [10:1.00]
@@ -3575,14 +3575,14 @@ define <8 x i64> @zext_8x8mem_to_8x64(<8
 
 define <8 x i64> @sext_8x8mem_to_8x64mask(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_8x8mem_to_8x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovsxbq (%rdi), %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8x8mem_to_8x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxbq (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -3595,12 +3595,12 @@ define <8 x i64> @sext_8x8mem_to_8x64mas
 
 define <8 x i64> @sext_8x8mem_to_8x64(<8 x i8> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_8x8mem_to_8x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxbq (%rdi), %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8x8mem_to_8x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbq (%rdi), %zmm0 # sched: [10:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <8 x i8>,<8 x i8> *%i,align 1
@@ -3610,14 +3610,14 @@ define <8 x i64> @sext_8x8mem_to_8x64(<8
 
 define <4 x i32> @zext_4x16mem_to_4x32(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_4x16mem_to_4x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_4x16mem_to_4x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [9:1.00]
@@ -3630,14 +3630,14 @@ define <4 x i32> @zext_4x16mem_to_4x32(<
 
 define <4 x i32> @sext_4x16mem_to_4x32mask(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_4x16mem_to_4x32mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovsxwd (%rdi), %xmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_4x16mem_to_4x32mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovsxwd (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -3650,12 +3650,12 @@ define <4 x i32> @sext_4x16mem_to_4x32ma
 
 define <4 x i32> @sext_4x16mem_to_4x32(<4 x i16> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_4x16mem_to_4x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxwd (%rdi), %xmm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_4x16mem_to_4x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxwd (%rdi), %xmm0 # sched: [6:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <4 x i16>,<4 x i16> *%i,align 1
@@ -3666,14 +3666,14 @@ define <4 x i32> @sext_4x16mem_to_4x32(<
 
 define <8 x i32> @zext_8x16mem_to_8x32(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_8x16mem_to_8x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x16mem_to_8x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [10:1.00]
@@ -3686,14 +3686,14 @@ define <8 x i32> @zext_8x16mem_to_8x32(<
 
 define <8 x i32> @sext_8x16mem_to_8x32mask(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_8x16mem_to_8x32mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovsxwd (%rdi), %ymm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8x16mem_to_8x32mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxwd (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -3706,12 +3706,12 @@ define <8 x i32> @sext_8x16mem_to_8x32ma
 
 define <8 x i32> @sext_8x16mem_to_8x32(<8 x i16> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_8x16mem_to_8x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxwd (%rdi), %ymm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8x16mem_to_8x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxwd (%rdi), %ymm0 # sched: [9:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <8 x i16>,<8 x i16> *%i,align 1
@@ -3721,14 +3721,14 @@ define <8 x i32> @sext_8x16mem_to_8x32(<
 
 define <8 x i32> @zext_8x16_to_8x32mask(<8 x i16> %a , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_8x16_to_8x32mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm1, %k1
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x16_to_8x32mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
@@ -3740,12 +3740,12 @@ define <8 x i32> @zext_8x16_to_8x32mask(
 
 define <8 x i32> @zext_8x16_to_8x32(<8 x i16> %a ) nounwind readnone {
 ; GENERIC-LABEL: zext_8x16_to_8x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x16_to_8x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x   = zext <8 x i16> %a to <8 x i32>
@@ -3754,14 +3754,14 @@ define <8 x i32> @zext_8x16_to_8x32(<8 x
 
 define <16 x i32> @zext_16x16mem_to_16x32(<16 x i16> *%i , <16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_16x16mem_to_16x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16x16mem_to_16x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [10:1.00]
@@ -3774,14 +3774,14 @@ define <16 x i32> @zext_16x16mem_to_16x3
 
 define <16 x i32> @sext_16x16mem_to_16x32mask(<16 x i16> *%i , <16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_16x16mem_to_16x32mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovsxwd (%rdi), %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_16x16mem_to_16x32mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxwd (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -3794,12 +3794,12 @@ define <16 x i32> @sext_16x16mem_to_16x3
 
 define <16 x i32> @sext_16x16mem_to_16x32(<16 x i16> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_16x16mem_to_16x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxwd (%rdi), %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_16x16mem_to_16x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxwd (%rdi), %zmm0 # sched: [10:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <16 x i16>,<16 x i16> *%i,align 1
@@ -3808,14 +3808,14 @@ define <16 x i32> @sext_16x16mem_to_16x3
 }
 define <16 x i32> @zext_16x16_to_16x32mask(<16 x i16> %a , <16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_16x16_to_16x32mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm1, %k1
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16x16_to_16x32mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero sched: [3:1.00]
@@ -3827,12 +3827,12 @@ define <16 x i32> @zext_16x16_to_16x32ma
 
 define <16 x i32> @zext_16x16_to_16x32(<16 x i16> %a ) nounwind readnone {
 ; GENERIC-LABEL: zext_16x16_to_16x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16x16_to_16x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x   = zext <16 x i16> %a to <16 x i32>
@@ -3841,14 +3841,14 @@ define <16 x i32> @zext_16x16_to_16x32(<
 
 define <2 x i64> @zext_2x16mem_to_2x64(<2 x i16> *%i , <2 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_2x16mem_to_2x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovzxwq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_2x16mem_to_2x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovzxwq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [9:1.00]
@@ -3861,14 +3861,14 @@ define <2 x i64> @zext_2x16mem_to_2x64(<
 
 define <2 x i64> @sext_2x16mem_to_2x64mask(<2 x i16> *%i , <2 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_2x16mem_to_2x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovsxwq (%rdi), %xmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_2x16mem_to_2x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovsxwq (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -3881,12 +3881,12 @@ define <2 x i64> @sext_2x16mem_to_2x64ma
 
 define <2 x i64> @sext_2x16mem_to_2x64(<2 x i16> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_2x16mem_to_2x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxwq (%rdi), %xmm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_2x16mem_to_2x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxwq (%rdi), %xmm0 # sched: [6:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <2 x i16>,<2 x i16> *%i,align 1
@@ -3896,14 +3896,14 @@ define <2 x i64> @sext_2x16mem_to_2x64(<
 
 define <4 x i64> @zext_4x16mem_to_4x64(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_4x16mem_to_4x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovzxwq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_4x16mem_to_4x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovzxwq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [10:1.00]
@@ -3916,14 +3916,14 @@ define <4 x i64> @zext_4x16mem_to_4x64(<
 
 define <4 x i64> @sext_4x16mem_to_4x64mask(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_4x16mem_to_4x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovsxwq (%rdi), %ymm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_4x16mem_to_4x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovsxwq (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -3936,12 +3936,12 @@ define <4 x i64> @sext_4x16mem_to_4x64ma
 
 define <4 x i64> @sext_4x16mem_to_4x64(<4 x i16> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_4x16mem_to_4x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxwq (%rdi), %ymm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_4x16mem_to_4x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxwq (%rdi), %ymm0 # sched: [8:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <4 x i16>,<4 x i16> *%i,align 1
@@ -3951,14 +3951,14 @@ define <4 x i64> @sext_4x16mem_to_4x64(<
 
 define <8 x i64> @zext_8x16mem_to_8x64(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_8x16mem_to_8x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x16mem_to_8x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [10:1.00]
@@ -3971,14 +3971,14 @@ define <8 x i64> @zext_8x16mem_to_8x64(<
 
 define <8 x i64> @sext_8x16mem_to_8x64mask(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_8x16mem_to_8x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovsxwq (%rdi), %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8x16mem_to_8x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxwq (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -3991,12 +3991,12 @@ define <8 x i64> @sext_8x16mem_to_8x64ma
 
 define <8 x i64> @sext_8x16mem_to_8x64(<8 x i16> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_8x16mem_to_8x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxwq (%rdi), %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8x16mem_to_8x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxwq (%rdi), %zmm0 # sched: [10:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <8 x i16>,<8 x i16> *%i,align 1
@@ -4006,14 +4006,14 @@ define <8 x i64> @sext_8x16mem_to_8x64(<
 
 define <8 x i64> @zext_8x16_to_8x64mask(<8 x i16> %a , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_8x16_to_8x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm1, %k1
 ; GENERIC-NEXT:    vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x16_to_8x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
@@ -4025,12 +4025,12 @@ define <8 x i64> @zext_8x16_to_8x64mask(
 
 define <8 x i64> @zext_8x16_to_8x64(<8 x i16> %a) nounwind readnone {
 ; GENERIC-LABEL: zext_8x16_to_8x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x16_to_8x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %ret   = zext <8 x i16> %a to <8 x i64>
@@ -4039,14 +4039,14 @@ define <8 x i64> @zext_8x16_to_8x64(<8 x
 
 define <2 x i64> @zext_2x32mem_to_2x64(<2 x i32> *%i , <2 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_2x32mem_to_2x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovzxdq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_2x32mem_to_2x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovzxdq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero sched: [9:1.00]
@@ -4059,14 +4059,14 @@ define <2 x i64> @zext_2x32mem_to_2x64(<
 
 define <2 x i64> @sext_2x32mem_to_2x64mask(<2 x i32> *%i , <2 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_2x32mem_to_2x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovsxdq (%rdi), %xmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_2x32mem_to_2x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovsxdq (%rdi), %xmm0 {%k1} {z} # sched: [9:1.00]
@@ -4079,12 +4079,12 @@ define <2 x i64> @sext_2x32mem_to_2x64ma
 
 define <2 x i64> @sext_2x32mem_to_2x64(<2 x i32> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_2x32mem_to_2x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxdq (%rdi), %xmm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_2x32mem_to_2x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxdq (%rdi), %xmm0 # sched: [6:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <2 x i32>,<2 x i32> *%i,align 1
@@ -4094,14 +4094,14 @@ define <2 x i64> @sext_2x32mem_to_2x64(<
 
 define <4 x i64> @zext_4x32mem_to_4x64(<4 x i32> *%i , <4 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_4x32mem_to_4x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_4x32mem_to_4x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [10:1.00]
@@ -4114,14 +4114,14 @@ define <4 x i64> @zext_4x32mem_to_4x64(<
 
 define <4 x i64> @sext_4x32mem_to_4x64mask(<4 x i32> *%i , <4 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_4x32mem_to_4x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovsxdq (%rdi), %ymm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_4x32mem_to_4x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovsxdq (%rdi), %ymm0 {%k1} {z} # sched: [10:1.00]
@@ -4134,12 +4134,12 @@ define <4 x i64> @sext_4x32mem_to_4x64ma
 
 define <4 x i64> @sext_4x32mem_to_4x64(<4 x i32> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_4x32mem_to_4x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxdq (%rdi), %ymm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_4x32mem_to_4x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxdq (%rdi), %ymm0 # sched: [9:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <4 x i32>,<4 x i32> *%i,align 1
@@ -4149,12 +4149,12 @@ define <4 x i64> @sext_4x32mem_to_4x64(<
 
 define <4 x i64> @sext_4x32_to_4x64(<4 x i32> %a) nounwind readnone {
 ; GENERIC-LABEL: sext_4x32_to_4x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxdq %xmm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_4x32_to_4x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x   = sext <4 x i32> %a to <4 x i64>
@@ -4163,14 +4163,14 @@ define <4 x i64> @sext_4x32_to_4x64(<4 x
 
 define <4 x i64> @zext_4x32_to_4x64mask(<4 x i32> %a , <4 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_4x32_to_4x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm1, %xmm1, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_4x32_to_4x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm1, %xmm1 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm1, %xmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
@@ -4182,14 +4182,14 @@ define <4 x i64> @zext_4x32_to_4x64mask(
 
 define <8 x i64> @zext_8x32mem_to_8x64(<8 x i32> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_8x32mem_to_8x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x32mem_to_8x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [10:1.00]
@@ -4202,14 +4202,14 @@ define <8 x i64> @zext_8x32mem_to_8x64(<
 
 define <8 x i64> @sext_8x32mem_to_8x64mask(<8 x i32> *%i , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: sext_8x32mem_to_8x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k1
 ; GENERIC-NEXT:    vpmovsxdq (%rdi), %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8x32mem_to_8x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovsxdq (%rdi), %zmm0 {%k1} {z} # sched: [10:1.00]
@@ -4222,12 +4222,12 @@ define <8 x i64> @sext_8x32mem_to_8x64ma
 
 define <8 x i64> @sext_8x32mem_to_8x64(<8 x i32> *%i) nounwind readnone {
 ; GENERIC-LABEL: sext_8x32mem_to_8x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxdq (%rdi), %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8x32mem_to_8x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxdq (%rdi), %zmm0 # sched: [10:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a   = load <8 x i32>,<8 x i32> *%i,align 1
@@ -4237,12 +4237,12 @@ define <8 x i64> @sext_8x32mem_to_8x64(<
 
 define <8 x i64> @sext_8x32_to_8x64(<8 x i32> %a) nounwind readnone {
 ; GENERIC-LABEL: sext_8x32_to_8x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxdq %ymm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8x32_to_8x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxdq %ymm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %x   = sext <8 x i32> %a to <8 x i64>
@@ -4251,14 +4251,14 @@ define <8 x i64> @sext_8x32_to_8x64(<8 x
 
 define <8 x i64> @zext_8x32_to_8x64mask(<8 x i32> %a , <8 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: zext_8x32_to_8x64mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm1, %k1
 ; GENERIC-NEXT:    vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x32_to_8x64mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm1, %xmm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero sched: [3:1.00]
@@ -4269,12 +4269,12 @@ define <8 x i64> @zext_8x32_to_8x64mask(
 }
 define <8 x float> @fptrunc_test(<8 x double> %a) nounwind readnone {
 ; GENERIC-LABEL: fptrunc_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtpd2ps %zmm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: fptrunc_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtpd2ps %zmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fptrunc <8 x double> %a to <8 x float>
@@ -4283,12 +4283,12 @@ define <8 x float> @fptrunc_test(<8 x do
 
 define <8 x double> @fpext_test(<8 x float> %a) nounwind readnone {
 ; GENERIC-LABEL: fpext_test:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtps2pd %ymm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: fpext_test:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtps2pd %ymm0, %zmm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = fpext <8 x float> %a to <8 x double>
@@ -4297,13 +4297,13 @@ define <8 x double> @fpext_test(<8 x flo
 
 define   <16 x i32> @zext_16i1_to_16xi32(i16 %b) {
 ; GENERIC-LABEL: zext_16i1_to_16xi32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16i1_to_16xi32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4314,13 +4314,13 @@ define   <16 x i32> @zext_16i1_to_16xi32
 
 define   <8 x i64> @zext_8i1_to_8xi64(i8 %b) {
 ; GENERIC-LABEL: zext_8i1_to_8xi64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8i1_to_8xi64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4331,7 +4331,7 @@ define   <8 x i64> @zext_8i1_to_8xi64(i8
 
 define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
 ; GENERIC-LABEL: trunc_16i8_to_16i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm0, %k0
 ; GENERIC-NEXT:    kmovd %k0, %eax
@@ -4339,7 +4339,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8>
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: trunc_16i8_to_16i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %k0, %eax # sched: [3:1.00]
@@ -4352,7 +4352,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8>
 
 define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
 ; GENERIC-LABEL: trunc_16i32_to_16i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vptestmd %zmm0, %zmm0, %k0 # sched: [1:1.00]
 ; GENERIC-NEXT:    kmovd %k0, %eax
@@ -4361,7 +4361,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i3
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: trunc_16i32_to_16i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %zmm0, %zmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %zmm0, %zmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kmovd %k0, %eax # sched: [3:1.00]
@@ -4375,7 +4375,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i3
 
 define <4 x i32> @trunc_4i32_to_4i1(<4 x i32> %a, <4 x i32> %b) {
 ; GENERIC-LABEL: trunc_4i32_to_4i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpslld $31, %xmm1, %xmm0 # sched: [1:1.00]
@@ -4384,7 +4384,7 @@ define <4 x i32> @trunc_4i32_to_4i1(<4 x
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: trunc_4i32_to_4i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vpslld $31, %xmm1, %xmm0 # sched: [1:0.50]
@@ -4401,7 +4401,7 @@ define <4 x i32> @trunc_4i32_to_4i1(<4 x
 
 define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
 ; GENERIC-LABEL: trunc_8i16_to_8i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k0
 ; GENERIC-NEXT:    kmovd %k0, %eax
@@ -4409,7 +4409,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: trunc_8i16_to_8i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %k0, %eax # sched: [3:1.00]
@@ -4422,13 +4422,13 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %
 
 define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
 ; GENERIC-LABEL: sext_8i1_8i32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpled %ymm0, %ymm1, %k0
 ; GENERIC-NEXT:    vpmovm2d %k0, %ymm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8i1_8i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpled %ymm0, %ymm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %ymm0
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4441,7 +4441,7 @@ define <8 x i32> @sext_8i1_8i32(<8 x i32
 
 define i16 @trunc_i32_to_i1(i32 %a) {
 ; GENERIC-LABEL: trunc_i32_to_i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movw $-4, %ax # sched: [1:0.33]
 ; GENERIC-NEXT:    kmovd %eax, %k0
 ; GENERIC-NEXT:    kshiftrw $1, %k0, %k0
@@ -4454,7 +4454,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: trunc_i32_to_i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movw $-4, %ax # sched: [1:0.25]
 ; SKX-NEXT:    kmovd %eax, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kshiftrw $1, %k0, %k0 # sched: [3:1.00]
@@ -4473,14 +4473,14 @@ define i16 @trunc_i32_to_i1(i32 %a) {
 
 define <8 x i16> @sext_8i1_8i16(<8 x i32> %a1, <8 x i32> %a2) nounwind {
 ; GENERIC-LABEL: sext_8i1_8i16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpgtd %ymm0, %ymm1, %k0
 ; GENERIC-NEXT:    vpmovm2w %k0, %xmm0
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8i1_8i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpgtd %ymm0, %ymm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2w %k0, %xmm0
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
@@ -4492,13 +4492,13 @@ define <8 x i16> @sext_8i1_8i16(<8 x i32
 
 define <16 x i32> @sext_16i1_16i32(<16 x i32> %a1, <16 x i32> %a2) nounwind {
 ; GENERIC-LABEL: sext_16i1_16i32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpgtd %zmm0, %zmm1, %k0
 ; GENERIC-NEXT:    vpmovm2d %k0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_16i1_16i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpgtd %zmm0, %zmm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %zmm0
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4509,13 +4509,13 @@ define <16 x i32> @sext_16i1_16i32(<16 x
 
 define <8 x i64> @sext_8i1_8i64(<8 x i32> %a1, <8 x i32> %a2) nounwind {
 ; GENERIC-LABEL: sext_8i1_8i64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpgtd %ymm0, %ymm1, %k0
 ; GENERIC-NEXT:    vpmovm2q %k0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: sext_8i1_8i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpgtd %ymm0, %ymm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2q %k0, %zmm0
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4526,14 +4526,14 @@ define <8 x i64> @sext_8i1_8i64(<8 x i32
 
 define void @extload_v8i64(<8 x i8>* %a, <8 x i64>* %res) {
 ; GENERIC-LABEL: extload_v8i64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovsxbq (%rdi), %zmm0
 ; GENERIC-NEXT:    vmovdqa64 %zmm0, (%rsi)
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: extload_v8i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbq (%rdi), %zmm0 # sched: [10:1.00]
 ; SKX-NEXT:    vmovdqa64 %zmm0, (%rsi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
@@ -4546,7 +4546,7 @@ define void @extload_v8i64(<8 x i8>* %a,
 
 define <64 x i16> @test21(<64 x i16> %x , <64 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: test21:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %zmm2, %zmm2 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovb2m %zmm2, %k1
 ; GENERIC-NEXT:    vmovdqu16 %zmm0, %zmm0 {%k1} {z}
@@ -4555,7 +4555,7 @@ define <64 x i16> @test21(<64 x i16> %x
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test21:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %zmm2, %zmm2 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %zmm2, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vmovdqu16 %zmm0, %zmm0 {%k1} {z}
@@ -4568,12 +4568,12 @@ define <64 x i16> @test21(<64 x i16> %x
 
 define <16 x i16> @shuffle_zext_16x8_to_16x16(<16 x i8> %a) nounwind readnone {
 ; GENERIC-LABEL: shuffle_zext_16x8_to_16x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: shuffle_zext_16x8_to_16x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %1 = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <32 x i32> <i32 0, i32 16, i32 1, i32 16, i32 2, i32 16, i32 3, i32 16, i32 4, i32 16, i32 5, i32 16, i32 6, i32 16, i32 7, i32 16, i32 8, i32 16, i32 9, i32 16, i32 10, i32 16, i32 11, i32 16, i32 12, i32 16, i32 13, i32 16, i32 14, i32 16, i32 15, i32 16>
@@ -4583,14 +4583,14 @@ define <16 x i16> @shuffle_zext_16x8_to_
 
 define <16 x i16> @shuffle_zext_16x8_to_16x16_mask(<16 x i8> %a, <16 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: shuffle_zext_16x8_to_16x16_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm1, %k1
 ; GENERIC-NEXT:    vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: shuffle_zext_16x8_to_16x16_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm1, %xmm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
@@ -4603,12 +4603,12 @@ define <16 x i16> @shuffle_zext_16x8_to_
 
 define <16 x i16> @zext_32x8_to_16x16(<32 x i8> %a) {
 ; GENERIC-LABEL: zext_32x8_to_16x16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_32x8_to_16x16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 1, i32 32, i32 2, i32 32, i32 3, i32 32, i32 4, i32 32, i32 5, i32 32, i32 6, i32 32, i32 7, i32 32, i32 8, i32 32, i32 9, i32 32, i32 10, i32 32, i32 11, i32 32, i32 12, i32 32, i32 13, i32 32, i32 14, i32 32, i32 15, i32 32>
@@ -4618,12 +4618,12 @@ define <16 x i16> @zext_32x8_to_16x16(<3
 
 define <8 x i32> @zext_32x8_to_8x32(<32 x i8> %a) {
 ; GENERIC-LABEL: zext_32x8_to_8x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_32x8_to_8x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 32, i32 32, i32 1, i32 32, i32 32, i32 32, i32 2, i32 32, i32 32, i32 32, i32 3, i32 32, i32 32, i32 32, i32 4, i32 32, i32 32, i32 32, i32 5, i32 32, i32 32, i32 32, i32 6, i32 32, i32 32, i32 32, i32 7, i32 32, i32 32, i32 32>
@@ -4633,12 +4633,12 @@ define <8 x i32> @zext_32x8_to_8x32(<32
 
 define <4 x i64> @zext_32x8_to_4x64(<32 x i8> %a) {
 ; GENERIC-LABEL: zext_32x8_to_4x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_32x8_to_4x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 1, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 2, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 3, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
@@ -4648,12 +4648,12 @@ define <4 x i64> @zext_32x8_to_4x64(<32
 
 define <8 x i32> @zext_16x16_to_8x32(<16 x i16> %a) {
 ; GENERIC-LABEL: zext_16x16_to_8x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16x16_to_8x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %1 = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 1, i32 16, i32 2, i32 16, i32 3, i32 16, i32 4, i32 16, i32 5, i32 16, i32 6, i32 16, i32 7, i32 16>
@@ -4663,12 +4663,12 @@ define <8 x i32> @zext_16x16_to_8x32(<16
 
 define <4 x i64> @zext_16x16_to_4x64(<16 x i16> %a) {
 ; GENERIC-LABEL: zext_16x16_to_4x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16x16_to_4x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %1 = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 16, i32 16, i32 1, i32 16, i32 16, i32 16, i32 2, i32 16, i32 16, i32 16, i32 3, i32 16, i32 16, i32 16>
@@ -4678,12 +4678,12 @@ define <4 x i64> @zext_16x16_to_4x64(<16
 
 define <4 x i64> @zext_8x32_to_4x64(<8 x i32> %a) {
 ; GENERIC-LABEL: zext_8x32_to_4x64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_8x32_to_4x64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %1 = shufflevector <8 x i32> %a, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 8, i32 1, i32 8, i32 2, i32 8, i32 3, i32 8>
@@ -4693,13 +4693,13 @@ define <4 x i64> @zext_8x32_to_4x64(<8 x
 
 define <64 x i8> @zext_64xi1_to_64xi8(<64 x i8> %x, <64 x i8> %y) #0 {
 ; GENERIC-LABEL: zext_64xi1_to_64xi8:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpeqb %zmm1, %zmm0, %k1
 ; GENERIC-NEXT:    vmovdqu8 {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_64xi1_to_64xi8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpeqb %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqu8 {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4710,13 +4710,13 @@ define <64 x i8> @zext_64xi1_to_64xi8(<6
 
 define <32 x i16> @zext_32xi1_to_32xi16(<32 x i16> %x, <32 x i16> %y) #0 {
 ; GENERIC-LABEL: zext_32xi1_to_32xi16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpeqw %zmm1, %zmm0, %k1
 ; GENERIC-NEXT:    vmovdqu16 {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_32xi1_to_32xi16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpeqw %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqu16 {{.*}}(%rip), %zmm0 {%k1} {z} # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4727,13 +4727,13 @@ define <32 x i16> @zext_32xi1_to_32xi16(
 
 define <16 x i16> @zext_16xi1_to_16xi16(<16 x i16> %x, <16 x i16> %y) #0 {
 ; GENERIC-LABEL: zext_16xi1_to_16xi16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpeqw %ymm1, %ymm0, %k1
 ; GENERIC-NEXT:    vmovdqu16 {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_16xi1_to_16xi16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpeqw %ymm1, %ymm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqu16 {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4745,13 +4745,13 @@ define <16 x i16> @zext_16xi1_to_16xi16(
 
 define <32 x i8> @zext_32xi1_to_32xi8(<32 x i16> %x, <32 x i16> %y) #0 {
 ; GENERIC-LABEL: zext_32xi1_to_32xi8:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpeqw %zmm1, %zmm0, %k1
 ; GENERIC-NEXT:    vmovdqu8 {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_32xi1_to_32xi8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpeqw %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqu8 {{.*}}(%rip), %ymm0 {%k1} {z} # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4762,7 +4762,7 @@ define <32 x i8> @zext_32xi1_to_32xi8(<3
 
 define <4 x i32> @zext_4xi1_to_4x32(<4 x i8> %x, <4 x i8> %y) #0 {
 ; GENERIC-LABEL: zext_4xi1_to_4x32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] sched: [6:0.50]
 ; GENERIC-NEXT:    vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33]
@@ -4771,7 +4771,7 @@ define <4 x i32> @zext_4xi1_to_4x32(<4 x
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_4xi1_to_4x32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] sched: [6:0.50]
 ; SKX-NEXT:    vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33]
@@ -4785,7 +4785,7 @@ define <4 x i32> @zext_4xi1_to_4x32(<4 x
 
 define <2 x i64> @zext_2xi1_to_2xi64(<2 x i8> %x, <2 x i8> %y) #0 {
 ; GENERIC-LABEL: zext_2xi1_to_2xi64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] sched: [6:0.50]
 ; GENERIC-NEXT:    vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33]
@@ -4794,7 +4794,7 @@ define <2 x i64> @zext_2xi1_to_2xi64(<2
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_2xi1_to_2xi64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] sched: [6:0.50]
 ; SKX-NEXT:    vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33]
@@ -4808,13 +4808,13 @@ define <2 x i64> @zext_2xi1_to_2xi64(<2
 
 define <16 x float> @test_x86_fmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
 ; GENERIC-LABEL: test_x86_fmadd_ps_z:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulps %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    vaddps %zmm2, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fmadd_ps_z:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vaddps %zmm2, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4825,13 +4825,13 @@ define <16 x float> @test_x86_fmadd_ps_z
 
 define <16 x float> @test_x86_fmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
 ; GENERIC-LABEL: test_x86_fmsub_ps_z:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulps %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    vsubps %zmm2, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fmsub_ps_z:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vsubps %zmm2, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4842,13 +4842,13 @@ define <16 x float> @test_x86_fmsub_ps_z
 
 define <16 x float> @test_x86_fnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
 ; GENERIC-LABEL: test_x86_fnmadd_ps_z:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulps %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    vsubps %zmm0, %zmm2, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fnmadd_ps_z:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vsubps %zmm0, %zmm2, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4859,14 +4859,14 @@ define <16 x float> @test_x86_fnmadd_ps_
 
 define <16 x float> @test_x86_fnmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
 ; GENERIC-LABEL: test_x86_fnmsub_ps_z:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulps %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    vsubps %zmm2, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fnmsub_ps_z:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    vsubps %zmm2, %zmm0, %zmm0 # sched: [4:0.33]
@@ -4882,13 +4882,13 @@ define <16 x float> @test_x86_fnmsub_ps_
 
 define <8 x double> @test_x86_fmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
 ; GENERIC-LABEL: test_x86_fmadd_pd_z:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulpd %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    vaddpd %zmm2, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fmadd_pd_z:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulpd %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vaddpd %zmm2, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4899,13 +4899,13 @@ define <8 x double> @test_x86_fmadd_pd_z
 
 define <8 x double> @test_x86_fmsub_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
 ; GENERIC-LABEL: test_x86_fmsub_pd_z:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulpd %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    vsubpd %zmm2, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fmsub_pd_z:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulpd %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vsubpd %zmm2, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4916,13 +4916,13 @@ define <8 x double> @test_x86_fmsub_pd_z
 
 define double @test_x86_fmsub_213(double %a0, double %a1, double %a2) {
 ; GENERIC-LABEL: test_x86_fmsub_213:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    vsubsd %xmm2, %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fmsub_213:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vsubsd %xmm2, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4933,13 +4933,13 @@ define double @test_x86_fmsub_213(double
 
 define double @test_x86_fmsub_213_m(double %a0, double %a1, double * %a2_ptr) {
 ; GENERIC-LABEL: test_x86_fmsub_213_m:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    vsubsd (%rdi), %xmm0, %xmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fmsub_213_m:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vsubsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4951,13 +4951,13 @@ define double @test_x86_fmsub_213_m(doub
 
 define double @test_x86_fmsub_231_m(double %a0, double %a1, double * %a2_ptr) {
 ; GENERIC-LABEL: test_x86_fmsub_231_m:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulsd (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
 ; GENERIC-NEXT:    vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fmsub_231_m:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulsd (%rdi), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4969,13 +4969,13 @@ define double @test_x86_fmsub_231_m(doub
 
 define <16 x float> @test231_br(<16 x float> %a1, <16 x float> %a2) nounwind {
 ; GENERIC-LABEL: test231_br:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    vaddps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test231_br:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [11:0.50]
 ; SKX-NEXT:    vaddps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -4986,13 +4986,13 @@ define <16 x float> @test231_br(<16 x fl
 
 define <16 x float> @test213_br(<16 x float> %a1, <16 x float> %a2) nounwind {
 ; GENERIC-LABEL: test213_br:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulps %zmm1, %zmm0, %zmm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test213_br:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulps %zmm1, %zmm0, %zmm0 # sched: [4:0.33]
 ; SKX-NEXT:    vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5004,7 +5004,7 @@ define <16 x float> @test213_br(<16 x fl
 ;mask (a*c+b , a)
 define <16 x float> @test_x86_fmadd132_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
 ; GENERIC-LABEL: test_x86_fmadd132_ps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm2, %xmm2 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm2, %k1
 ; GENERIC-NEXT:    vmulps (%rdi), %zmm0, %zmm2 # sched: [9:1.00]
@@ -5012,7 +5012,7 @@ define <16 x float> @test_x86_fmadd132_p
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fmadd132_ps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm2, %xmm2 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm2, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vmulps (%rdi), %zmm0, %zmm2 # sched: [11:0.50]
@@ -5028,7 +5028,7 @@ define <16 x float> @test_x86_fmadd132_p
 ;mask (a*c+b , b)
 define <16 x float> @test_x86_fmadd231_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
 ; GENERIC-LABEL: test_x86_fmadd231_ps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm2, %xmm2 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm2, %k1
 ; GENERIC-NEXT:    vmulps (%rdi), %zmm0, %zmm0 # sched: [9:1.00]
@@ -5037,7 +5037,7 @@ define <16 x float> @test_x86_fmadd231_p
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fmadd231_ps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm2, %xmm2 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm2, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vmulps (%rdi), %zmm0, %zmm0 # sched: [11:0.50]
@@ -5054,7 +5054,7 @@ define <16 x float> @test_x86_fmadd231_p
 ;mask (b*a+c , b)
 define <16 x float> @test_x86_fmadd213_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
 ; GENERIC-LABEL: test_x86_fmadd213_ps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm2, %xmm2 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm2, %k1
 ; GENERIC-NEXT:    vmulps %zmm0, %zmm1, %zmm0 # sched: [5:1.00]
@@ -5063,7 +5063,7 @@ define <16 x float> @test_x86_fmadd213_p
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_x86_fmadd213_ps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm2, %xmm2 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm2, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vmulps %zmm0, %zmm1, %zmm0 # sched: [4:0.33]
@@ -5079,13 +5079,13 @@ define <16 x float> @test_x86_fmadd213_p
 
 define <16 x i32> @vpandd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
 ; GENERIC-LABEL: vpandd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpandd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    vpandq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5099,13 +5099,13 @@ entry:
 
 define <16 x i32> @vpandnd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
 ; GENERIC-LABEL: vpandnd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    vpandnq %zmm0, %zmm1, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpandnd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    vpandnq %zmm0, %zmm1, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5121,13 +5121,13 @@ entry:
 
 define <16 x i32> @vpord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
 ; GENERIC-LABEL: vpord:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    vporq %zmm1, %zmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpord:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    vporq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5141,13 +5141,13 @@ entry:
 
 define <16 x i32> @vpxord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
 ; GENERIC-LABEL: vpxord:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpxord:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    vpxorq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5161,13 +5161,13 @@ entry:
 
 define <8 x i64> @vpandq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
 ; GENERIC-LABEL: vpandq:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpandq:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    vpandq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5180,13 +5180,13 @@ entry:
 
 define <8 x i64> @vpandnq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
 ; GENERIC-LABEL: vpandnq:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    vpandnq %zmm0, %zmm1, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpandnq:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    vpandnq %zmm0, %zmm1, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5200,13 +5200,13 @@ entry:
 
 define <8 x i64> @vporq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
 ; GENERIC-LABEL: vporq:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    vporq %zmm1, %zmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vporq:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    vporq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5219,13 +5219,13 @@ entry:
 
 define <8 x i64> @vpxorq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
 ; GENERIC-LABEL: vpxorq:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpxorq:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    vpxorq %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5238,12 +5238,12 @@ entry:
 
 define <64 x i8> @and_v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; GENERIC-LABEL: and_v64i8:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vandps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: and_v64i8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = and <64 x i8> %a, %b
@@ -5252,12 +5252,12 @@ define <64 x i8> @and_v64i8(<64 x i8> %a
 
 define <64 x i8> @andn_v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; GENERIC-LABEL: andn_v64i8:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vandnps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: andn_v64i8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandnps %zmm0, %zmm1, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b2 = xor <64 x i8> %b, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
@@ -5270,12 +5270,12 @@ define <64 x i8> @andn_v64i8(<64 x i8> %
 
 define <64 x i8> @or_v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; GENERIC-LABEL: or_v64i8:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vorps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: or_v64i8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vorps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = or <64 x i8> %a, %b
@@ -5284,12 +5284,12 @@ define <64 x i8> @or_v64i8(<64 x i8> %a,
 
 define <64 x i8> @xor_v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; GENERIC-LABEL: xor_v64i8:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: xor_v64i8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = xor <64 x i8> %a, %b
@@ -5298,12 +5298,12 @@ define <64 x i8> @xor_v64i8(<64 x i8> %a
 
 define <32 x i16> @and_v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; GENERIC-LABEL: and_v32i16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vandps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: and_v32i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = and <32 x i16> %a, %b
@@ -5312,12 +5312,12 @@ define <32 x i16> @and_v32i16(<32 x i16>
 
 define <32 x i16> @andn_v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; GENERIC-LABEL: andn_v32i16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vandnps %zmm0, %zmm1, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: andn_v32i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandnps %zmm0, %zmm1, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b2 = xor <32 x i16> %b, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1,
@@ -5328,12 +5328,12 @@ define <32 x i16> @andn_v32i16(<32 x i16
 
 define <32 x i16> @or_v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; GENERIC-LABEL: or_v32i16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vorps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: or_v32i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vorps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = or <32 x i16> %a, %b
@@ -5342,12 +5342,12 @@ define <32 x i16> @or_v32i16(<32 x i16>
 
 define <32 x i16> @xor_v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; GENERIC-LABEL: xor_v32i16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps %zmm1, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: xor_v32i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps %zmm1, %zmm0, %zmm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = xor <32 x i16> %a, %b
@@ -5356,14 +5356,14 @@ define <32 x i16> @xor_v32i16(<32 x i16>
 
 define <16 x float> @masked_and_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
 ; GENERIC-LABEL: masked_and_v16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddps %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: masked_and_v16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    vaddps %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5381,14 +5381,14 @@ define <16 x float> @masked_and_v16f32(<
 
 define <16 x float> @masked_or_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
 ; GENERIC-LABEL: masked_or_v16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddps %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: masked_or_v16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    vaddps %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5406,14 +5406,14 @@ define <16 x float> @masked_or_v16f32(<1
 
 define <16 x float> @masked_xor_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask, <16 x float> %c) {
 ; GENERIC-LABEL: masked_xor_v16f32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddps %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: masked_xor_v16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandps %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    vaddps %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5431,14 +5431,14 @@ define <16 x float> @masked_xor_v16f32(<
 
 define <8 x double> @masked_and_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
 ; GENERIC-LABEL: masked_and_v8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddpd %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: masked_and_v8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    vaddpd %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5456,14 +5456,14 @@ define <8 x double> @masked_and_v8f64(<8
 
 define <8 x double> @masked_or_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
 ; GENERIC-LABEL: masked_or_v8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddpd %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: masked_or_v8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    vaddpd %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5481,14 +5481,14 @@ define <8 x double> @masked_or_v8f64(<8
 
 define <8 x double> @masked_xor_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %passThru, i8 %mask, <8 x double> %c) {
 ; GENERIC-LABEL: masked_xor_v8f64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddpd %zmm2, %zmm3, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: masked_xor_v8f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandpd %zmm1, %zmm0, %zmm2 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    vaddpd %zmm2, %zmm3, %zmm0 # sched: [4:0.33]
@@ -5506,13 +5506,13 @@ define <8 x double> @masked_xor_v8f64(<8
 
 define <8 x i64> @test_mm512_mask_and_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
 ; GENERIC-LABEL: test_mm512_mask_and_epi32:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_mask_and_epi32:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandps %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5528,13 +5528,13 @@ entry:
 
 define <8 x i64> @test_mm512_mask_or_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
 ; GENERIC-LABEL: test_mm512_mask_or_epi32:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_mask_or_epi32:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5550,13 +5550,13 @@ entry:
 
 define <8 x i64> @test_mm512_mask_xor_epi32(<8 x i64> %__src, i16 zeroext %__k, <8 x i64> %__a, <8 x i64> %__b) {
 ; GENERIC-LABEL: test_mm512_mask_xor_epi32:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vxorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_mask_xor_epi32:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vxorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5572,13 +5572,13 @@ entry:
 
 define <8 x double> @test_mm512_mask_xor_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; GENERIC-LABEL: test_mm512_mask_xor_pd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vxorpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_mask_xor_pd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vxorpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5594,13 +5594,13 @@ entry:
 
 define <8 x double> @test_mm512_maskz_xor_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; GENERIC-LABEL: test_mm512_maskz_xor_pd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vxorpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_maskz_xor_pd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vxorpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5616,13 +5616,13 @@ entry:
 
 define <16 x float> @test_mm512_mask_xor_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; GENERIC-LABEL: test_mm512_mask_xor_ps:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vxorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_mask_xor_ps:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vxorps %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5638,13 +5638,13 @@ entry:
 
 define <16 x float> @test_mm512_maskz_xor_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; GENERIC-LABEL: test_mm512_maskz_xor_ps:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vxorps %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_maskz_xor_ps:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vxorps %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5660,13 +5660,13 @@ entry:
 
 define <8 x double> @test_mm512_mask_or_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; GENERIC-LABEL: test_mm512_mask_or_pd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vorpd %zmm1, %zmm2, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_mask_or_pd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vorpd %zmm1, %zmm2, %zmm0 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5682,13 +5682,13 @@ entry:
 
 define <8 x double> @test_mm512_maskz_or_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; GENERIC-LABEL: test_mm512_maskz_or_pd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vorpd %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_maskz_or_pd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vorpd %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5704,13 +5704,13 @@ entry:
 
 define <16 x float> @test_mm512_mask_or_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; GENERIC-LABEL: test_mm512_mask_or_ps:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vorps %zmm1, %zmm2, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_mask_or_ps:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vorps %zmm1, %zmm2, %zmm0 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5726,13 +5726,13 @@ entry:
 
 define <16 x float> @test_mm512_maskz_or_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; GENERIC-LABEL: test_mm512_maskz_or_ps:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vorps %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_maskz_or_ps:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vorps %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5748,13 +5748,13 @@ entry:
 
 define <8 x double> @test_mm512_mask_and_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; GENERIC-LABEL: test_mm512_mask_and_pd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandpd %zmm1, %zmm2, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_mask_and_pd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandpd %zmm1, %zmm2, %zmm0 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5770,13 +5770,13 @@ entry:
 
 define <8 x double> @test_mm512_maskz_and_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; GENERIC-LABEL: test_mm512_maskz_and_pd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandpd %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_maskz_and_pd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandpd %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5792,13 +5792,13 @@ entry:
 
 define <16 x float> @test_mm512_mask_and_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; GENERIC-LABEL: test_mm512_mask_and_ps:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandps %zmm1, %zmm2, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_mask_and_ps:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandps %zmm1, %zmm2, %zmm0 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5814,13 +5814,13 @@ entry:
 
 define <16 x float> @test_mm512_maskz_and_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; GENERIC-LABEL: test_mm512_maskz_and_ps:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandps %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_maskz_and_ps:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandps %zmm0, %zmm1, %zmm0 {%k1} {z} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5836,13 +5836,13 @@ entry:
 
 define <8 x double> @test_mm512_mask_andnot_pd(<8 x double> %__W, i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; GENERIC-LABEL: test_mm512_mask_andnot_pd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandnpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_mask_andnot_pd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandnpd %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5859,13 +5859,13 @@ entry:
 
 define <8 x double> @test_mm512_maskz_andnot_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B) {
 ; GENERIC-LABEL: test_mm512_maskz_andnot_pd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandnpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_maskz_andnot_pd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandnpd %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5882,13 +5882,13 @@ entry:
 
 define <16 x float> @test_mm512_mask_andnot_ps(<16 x float> %__W, i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; GENERIC-LABEL: test_mm512_mask_andnot_ps:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandnps %zmm2, %zmm1, %zmm0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_mask_andnot_ps:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandnps %zmm2, %zmm1, %zmm0 {%k1} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5905,13 +5905,13 @@ entry:
 
 define <16 x float> @test_mm512_maskz_andnot_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B) {
 ; GENERIC-LABEL: test_mm512_maskz_andnot_ps:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    vandnps %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_maskz_andnot_ps:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vandnps %zmm1, %zmm0, %zmm0 {%k1} {z} # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -5928,12 +5928,12 @@ entry:
 
 define i32 @mov_test1(float %x) {
 ; GENERIC-LABEL: mov_test1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovd %xmm0, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovd %xmm0, %eax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %res = bitcast float %x to i32
@@ -5942,12 +5942,12 @@ define i32 @mov_test1(float %x) {
 
 define <4 x i32> @mov_test2(i32 %x) {
 ; GENERIC-LABEL: mov_test2:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovd %edi, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test2:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovd %edi, %xmm0 # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %res = insertelement <4 x i32>undef, i32 %x, i32 0
@@ -5956,12 +5956,12 @@ define <4 x i32> @mov_test2(i32 %x) {
 
 define <2 x i64> @mov_test3(i64 %x) {
 ; GENERIC-LABEL: mov_test3:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovq %rdi, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test3:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovq %rdi, %xmm0 # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %res = insertelement <2 x i64>undef, i64 %x, i32 0
@@ -5970,12 +5970,12 @@ define <2 x i64> @mov_test3(i64 %x) {
 
 define <4 x i32> @mov_test4(i32* %x) {
 ; GENERIC-LABEL: mov_test4:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test4:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %y = load i32, i32* %x
@@ -5985,12 +5985,12 @@ define <4 x i32> @mov_test4(i32* %x) {
 
 define void @mov_test5(float %x, float* %y) {
 ; GENERIC-LABEL: mov_test5:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovss %xmm0, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test5:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovss %xmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    store float %x, float* %y, align 4
@@ -5999,12 +5999,12 @@ define void @mov_test5(float %x, float*
 
 define void @mov_test6(double %x, double* %y) {
 ; GENERIC-LABEL: mov_test6:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovsd %xmm0, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test6:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovsd %xmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    store double %x, double* %y, align 8
@@ -6013,12 +6013,12 @@ define void @mov_test6(double %x, double
 
 define float @mov_test7(i32* %x) {
 ; GENERIC-LABEL: mov_test7:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test7:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %y = load i32, i32* %x
@@ -6028,12 +6028,12 @@ define float @mov_test7(i32* %x) {
 
 define i32 @mov_test8(<4 x i32> %x) {
 ; GENERIC-LABEL: mov_test8:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovd %xmm0, %eax # sched: [2:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovd %xmm0, %eax # sched: [2:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %res = extractelement <4 x i32> %x, i32 0
@@ -6042,12 +6042,12 @@ define i32 @mov_test8(<4 x i32> %x) {
 
 define i64 @mov_test9(<2 x i64> %x) {
 ; GENERIC-LABEL: mov_test9:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovq %xmm0, %rax # sched: [2:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test9:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovq %xmm0, %rax # sched: [2:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %res = extractelement <2 x i64> %x, i32 0
@@ -6056,12 +6056,12 @@ define i64 @mov_test9(<2 x i64> %x) {
 
 define <4 x i32> @mov_test10(i32* %x) {
 ; GENERIC-LABEL: mov_test10:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test10:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %y = load i32, i32* %x, align 4
@@ -6071,12 +6071,12 @@ define <4 x i32> @mov_test10(i32* %x) {
 
 define <4 x float> @mov_test11(float* %x) {
 ; GENERIC-LABEL: mov_test11:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test11:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %y = load float, float* %x, align 4
@@ -6086,12 +6086,12 @@ define <4 x float> @mov_test11(float* %x
 
 define <2 x double> @mov_test12(double* %x) {
 ; GENERIC-LABEL: mov_test12:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [6:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test12:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [5:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %y = load double, double* %x, align 8
@@ -6101,12 +6101,12 @@ define <2 x double> @mov_test12(double*
 
 define <2 x i64> @mov_test13(i64 %x) {
 ; GENERIC-LABEL: mov_test13:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovq %rdi, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test13:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovq %rdi, %xmm0 # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %res = insertelement <2 x i64>zeroinitializer, i64 %x, i32 0
@@ -6115,12 +6115,12 @@ define <2 x i64> @mov_test13(i64 %x) {
 
 define <4 x i32> @mov_test14(i32 %x) {
 ; GENERIC-LABEL: mov_test14:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovd %edi, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test14:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovd %edi, %xmm0 # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %res = insertelement <4 x i32>zeroinitializer, i32 %x, i32 0
@@ -6129,12 +6129,12 @@ define <4 x i32> @mov_test14(i32 %x) {
 
 define <4 x i32> @mov_test15(i32* %x) {
 ; GENERIC-LABEL: mov_test15:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [6:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test15:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
    %y = load i32, i32* %x, align 4
@@ -6144,12 +6144,12 @@ define <4 x i32> @mov_test15(i32* %x) {
 
 define <16 x i32> @mov_test16(i8 * %addr) {
 ; GENERIC-LABEL: mov_test16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovups (%rdi), %zmm0 # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %vaddr = bitcast i8* %addr to <16 x i32>*
@@ -6159,12 +6159,12 @@ define <16 x i32> @mov_test16(i8 * %addr
 
 define <16 x i32> @mov_test17(i8 * %addr) {
 ; GENERIC-LABEL: mov_test17:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovaps (%rdi), %zmm0 # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test17:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps (%rdi), %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %vaddr = bitcast i8* %addr to <16 x i32>*
@@ -6174,13 +6174,13 @@ define <16 x i32> @mov_test17(i8 * %addr
 
 define void @mov_test18(i8 * %addr, <8 x i64> %data) {
 ; GENERIC-LABEL: mov_test18:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovaps %zmm0, (%rdi)
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test18:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps %zmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -6191,13 +6191,13 @@ define void @mov_test18(i8 * %addr, <8 x
 
 define void @mov_test19(i8 * %addr, <16 x i32> %data) {
 ; GENERIC-LABEL: mov_test19:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovups %zmm0, (%rdi)
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test19:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups %zmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -6208,13 +6208,13 @@ define void @mov_test19(i8 * %addr, <16
 
 define void @mov_test20(i8 * %addr, <16 x i32> %data) {
 ; GENERIC-LABEL: mov_test20:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovaps %zmm0, (%rdi)
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test20:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps %zmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -6225,12 +6225,12 @@ define void @mov_test20(i8 * %addr, <16
 
 define  <8 x i64> @mov_test21(i8 * %addr) {
 ; GENERIC-LABEL: mov_test21:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovaps (%rdi), %zmm0 # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test21:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps (%rdi), %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %vaddr = bitcast i8* %addr to <8 x i64>*
@@ -6240,13 +6240,13 @@ define  <8 x i64> @mov_test21(i8 * %addr
 
 define void @mov_test22(i8 * %addr, <8 x i64> %data) {
 ; GENERIC-LABEL: mov_test22:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovups %zmm0, (%rdi)
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test22:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups %zmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -6257,12 +6257,12 @@ define void @mov_test22(i8 * %addr, <8 x
 
 define <8 x i64> @mov_test23(i8 * %addr) {
 ; GENERIC-LABEL: mov_test23:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovups (%rdi), %zmm0 # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test23:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %vaddr = bitcast i8* %addr to <8 x i64>*
@@ -6272,13 +6272,13 @@ define <8 x i64> @mov_test23(i8 * %addr)
 
 define void @mov_test24(i8 * %addr, <8 x double> %data) {
 ; GENERIC-LABEL: mov_test24:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovaps %zmm0, (%rdi)
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test24:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps %zmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -6289,12 +6289,12 @@ define void @mov_test24(i8 * %addr, <8 x
 
 define <8 x double> @mov_test25(i8 * %addr) {
 ; GENERIC-LABEL: mov_test25:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovaps (%rdi), %zmm0 # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test25:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps (%rdi), %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %vaddr = bitcast i8* %addr to <8 x double>*
@@ -6304,13 +6304,13 @@ define <8 x double> @mov_test25(i8 * %ad
 
 define void @mov_test26(i8 * %addr, <16 x float> %data) {
 ; GENERIC-LABEL: mov_test26:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovaps %zmm0, (%rdi)
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test26:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps %zmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -6321,12 +6321,12 @@ define void @mov_test26(i8 * %addr, <16
 
 define <16 x float> @mov_test27(i8 * %addr) {
 ; GENERIC-LABEL: mov_test27:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovaps (%rdi), %zmm0 # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test27:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps (%rdi), %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %vaddr = bitcast i8* %addr to <16 x float>*
@@ -6336,13 +6336,13 @@ define <16 x float> @mov_test27(i8 * %ad
 
 define void @mov_test28(i8 * %addr, <8 x double> %data) {
 ; GENERIC-LABEL: mov_test28:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovups %zmm0, (%rdi)
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test28:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups %zmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -6353,12 +6353,12 @@ define void @mov_test28(i8 * %addr, <8 x
 
 define <8 x double> @mov_test29(i8 * %addr) {
 ; GENERIC-LABEL: mov_test29:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovups (%rdi), %zmm0 # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test29:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %vaddr = bitcast i8* %addr to <8 x double>*
@@ -6368,13 +6368,13 @@ define <8 x double> @mov_test29(i8 * %ad
 
 define void @mov_test30(i8 * %addr, <16 x float> %data) {
 ; GENERIC-LABEL: mov_test30:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovups %zmm0, (%rdi)
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test30:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups %zmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -6385,12 +6385,12 @@ define void @mov_test30(i8 * %addr, <16
 
 define <16 x float> @mov_test31(i8 * %addr) {
 ; GENERIC-LABEL: mov_test31:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovups (%rdi), %zmm0 # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test31:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %vaddr = bitcast i8* %addr to <16 x float>*
@@ -6400,14 +6400,14 @@ define <16 x float> @mov_test31(i8 * %ad
 
 define <16 x i32> @mov_test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
 ; GENERIC-LABEL: mov_test32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vmovdqa32 (%rdi), %zmm0 {%k1} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqa32 (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6421,14 +6421,14 @@ define <16 x i32> @mov_test32(i8 * %addr
 
 define <16 x i32> @mov_test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
 ; GENERIC-LABEL: mov_test33:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test33:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6442,14 +6442,14 @@ define <16 x i32> @mov_test33(i8 * %addr
 
 define <16 x i32> @mov_test34(i8 * %addr, <16 x i32> %mask1) {
 ; GENERIC-LABEL: mov_test34:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1
 ; GENERIC-NEXT:    vmovdqa32 (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test34:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqa32 (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6463,14 +6463,14 @@ define <16 x i32> @mov_test34(i8 * %addr
 
 define <16 x i32> @mov_test35(i8 * %addr, <16 x i32> %mask1) {
 ; GENERIC-LABEL: mov_test35:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1
 ; GENERIC-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test35:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqu32 (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6484,14 +6484,14 @@ define <16 x i32> @mov_test35(i8 * %addr
 
 define <8 x i64> @mov_test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
 ; GENERIC-LABEL: mov_test36:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vmovdqa64 (%rdi), %zmm0 {%k1} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test36:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqa64 (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6505,14 +6505,14 @@ define <8 x i64> @mov_test36(i8 * %addr,
 
 define <8 x i64> @mov_test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
 ; GENERIC-LABEL: mov_test37:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test37:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqq %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6526,14 +6526,14 @@ define <8 x i64> @mov_test37(i8 * %addr,
 
 define <8 x i64> @mov_test38(i8 * %addr, <8 x i64> %mask1) {
 ; GENERIC-LABEL: mov_test38:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqq %zmm1, %zmm0, %k1
 ; GENERIC-NEXT:    vmovdqa64 (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test38:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqa64 (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6547,14 +6547,14 @@ define <8 x i64> @mov_test38(i8 * %addr,
 
 define <8 x i64> @mov_test39(i8 * %addr, <8 x i64> %mask1) {
 ; GENERIC-LABEL: mov_test39:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqq %zmm1, %zmm0, %k1
 ; GENERIC-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test39:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqq %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6568,14 +6568,14 @@ define <8 x i64> @mov_test39(i8 * %addr,
 
 define <16 x float> @mov_test40(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
 ; GENERIC-LABEL: mov_test40:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps %xmm2, %xmm2, %xmm2 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpneq_oqps %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovaps (%rdi), %zmm0 {%k1} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test40:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpneq_oqps %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovaps (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6589,14 +6589,14 @@ define <16 x float> @mov_test40(i8 * %ad
 
 define <16 x float> @mov_test41(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
 ; GENERIC-LABEL: mov_test41:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps %xmm2, %xmm2, %xmm2 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpneq_oqps %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovups (%rdi), %zmm0 {%k1} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test41:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpneq_oqps %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovups (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6610,14 +6610,14 @@ define <16 x float> @mov_test41(i8 * %ad
 
 define <16 x float> @mov_test42(i8 * %addr, <16 x float> %mask1) {
 ; GENERIC-LABEL: mov_test42:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpneq_oqps %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovaps (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test42:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpneq_oqps %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovaps (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6631,14 +6631,14 @@ define <16 x float> @mov_test42(i8 * %ad
 
 define <16 x float> @mov_test43(i8 * %addr, <16 x float> %mask1) {
 ; GENERIC-LABEL: mov_test43:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpneq_oqps %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovups (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test43:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpneq_oqps %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovups (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6652,14 +6652,14 @@ define <16 x float> @mov_test43(i8 * %ad
 
 define <8 x double> @mov_test44(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
 ; GENERIC-LABEL: mov_test44:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpneq_oqpd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovapd (%rdi), %zmm0 {%k1} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test44:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpneq_oqpd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovapd (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6673,14 +6673,14 @@ define <8 x double> @mov_test44(i8 * %ad
 
 define <8 x double> @mov_test45(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
 ; GENERIC-LABEL: mov_test45:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpneq_oqpd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovupd (%rdi), %zmm0 {%k1} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test45:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorpd %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpneq_oqpd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovupd (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -6694,14 +6694,14 @@ define <8 x double> @mov_test45(i8 * %ad
 
 define <8 x double> @mov_test46(i8 * %addr, <8 x double> %mask1) {
 ; GENERIC-LABEL: mov_test46:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpneq_oqpd %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovapd (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test46:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpneq_oqpd %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovapd (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6715,14 +6715,14 @@ define <8 x double> @mov_test46(i8 * %ad
 
 define <8 x double> @mov_test47(i8 * %addr, <8 x double> %mask1) {
 ; GENERIC-LABEL: mov_test47:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpneq_oqpd %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovupd (%rdi), %zmm0 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mov_test47:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpneq_oqpd %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovupd (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -6736,7 +6736,7 @@ define <8 x double> @mov_test47(i8 * %ad
 
 define i16 @mask16(i16 %x) {
 ; GENERIC-LABEL: mask16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    knotw %k0, %k0
 ; GENERIC-NEXT:    kmovd %k0, %eax
@@ -6744,7 +6744,7 @@ define i16 @mask16(i16 %x) {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mask16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    knotw %k0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %k0, %eax # sched: [3:1.00]
@@ -6758,14 +6758,14 @@ define i16 @mask16(i16 %x) {
 
 define i32 @mask16_zext(i16 %x) {
 ; GENERIC-LABEL: mask16_zext:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    knotw %k0, %k0
 ; GENERIC-NEXT:    kmovw %k0, %eax
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mask16_zext:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    knotw %k0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovw %k0, %eax # sched: [3:1.00]
@@ -6779,7 +6779,7 @@ define i32 @mask16_zext(i16 %x) {
 
 define i8 @mask8(i8 %x) {
 ; GENERIC-LABEL: mask8:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    knotb %k0, %k0
 ; GENERIC-NEXT:    kmovd %k0, %eax
@@ -6787,7 +6787,7 @@ define i8 @mask8(i8 %x) {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mask8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    knotb %k0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %k0, %eax # sched: [3:1.00]
@@ -6801,14 +6801,14 @@ define i8 @mask8(i8 %x) {
 
 define i32 @mask8_zext(i8 %x) {
 ; GENERIC-LABEL: mask8_zext:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    knotb %k0, %k0
 ; GENERIC-NEXT:    kmovb %k0, %eax
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mask8_zext:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    knotb %k0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovb %k0, %eax # sched: [3:1.00]
@@ -6822,14 +6822,14 @@ define i32 @mask8_zext(i8 %x) {
 
 define void @mask16_mem(i16* %ptr) {
 ; GENERIC-LABEL: mask16_mem:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovw (%rdi), %k0
 ; GENERIC-NEXT:    knotw %k0, %k0
 ; GENERIC-NEXT:    kmovw %k0, (%rdi)
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mask16_mem:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovw (%rdi), %k0 # sched: [7:1.00]
 ; SKX-NEXT:    knotw %k0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovw %k0, (%rdi) # sched: [1:1.00]
@@ -6844,14 +6844,14 @@ define void @mask16_mem(i16* %ptr) {
 
 define void @mask8_mem(i8* %ptr) {
 ; GENERIC-LABEL: mask8_mem:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovb (%rdi), %k0
 ; GENERIC-NEXT:    knotb %k0, %k0
 ; GENERIC-NEXT:    kmovb %k0, (%rdi)
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mask8_mem:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovb (%rdi), %k0 # sched: [7:1.00]
 ; SKX-NEXT:    knotb %k0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovb %k0, (%rdi) # sched: [1:1.00]
@@ -6866,7 +6866,7 @@ define void @mask8_mem(i8* %ptr) {
 
 define i16 @mand16(i16 %x, i16 %y) {
 ; GENERIC-LABEL: mand16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movl %edi, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    xorl %esi, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    andl %esi, %edi # sched: [1:0.33]
@@ -6875,7 +6875,7 @@ define i16 @mand16(i16 %x, i16 %y) {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mand16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movl %edi, %eax # sched: [1:0.25]
 ; SKX-NEXT:    xorl %esi, %eax # sched: [1:0.25]
 ; SKX-NEXT:    andl %esi, %edi # sched: [1:0.25]
@@ -6893,7 +6893,7 @@ define i16 @mand16(i16 %x, i16 %y) {
 
 define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
 ; GENERIC-LABEL: mand16_mem:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovw (%rdi), %k0
 ; GENERIC-NEXT:    kmovw (%rsi), %k1
 ; GENERIC-NEXT:    kandw %k1, %k0, %k2
@@ -6904,7 +6904,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: mand16_mem:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovw (%rdi), %k0 # sched: [7:1.00]
 ; SKX-NEXT:    kmovw (%rsi), %k1 # sched: [7:1.00]
 ; SKX-NEXT:    kandw %k1, %k0, %k2 # sched: [1:1.00]
@@ -6924,7 +6924,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
 
 define i8 @shuf_test1(i16 %v) nounwind {
 ; GENERIC-LABEL: shuf_test1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    kshiftrw $8, %k0, %k0
 ; GENERIC-NEXT:    kmovd %k0, %eax
@@ -6932,7 +6932,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: shuf_test1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kshiftrw $8, %k0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kmovd %k0, %eax # sched: [3:1.00]
@@ -6946,7 +6946,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
 
 define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
 ; GENERIC-LABEL: zext_test1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; GENERIC-NEXT:    kshiftlw $10, %k0, %k0
 ; GENERIC-NEXT:    kshiftrw $15, %k0, %k0
@@ -6956,7 +6956,7 @@ define i32 @zext_test1(<16 x i32> %a, <1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_test1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kshiftlw $10, %k0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kshiftrw $15, %k0, %k0 # sched: [3:1.00]
@@ -6972,7 +6972,7 @@ define i32 @zext_test1(<16 x i32> %a, <1
 
 define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
 ; GENERIC-LABEL: zext_test2:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; GENERIC-NEXT:    kshiftlw $10, %k0, %k0
 ; GENERIC-NEXT:    kshiftrw $15, %k0, %k0
@@ -6983,7 +6983,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_test2:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kshiftlw $10, %k0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kshiftrw $15, %k0, %k0 # sched: [3:1.00]
@@ -7000,7 +7000,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
 
 define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
 ; GENERIC-LABEL: zext_test3:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0
 ; GENERIC-NEXT:    kshiftlw $10, %k0, %k0
 ; GENERIC-NEXT:    kshiftrw $15, %k0, %k0
@@ -7011,7 +7011,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: zext_test3:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpnleud %zmm1, %zmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kshiftlw $10, %k0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kshiftrw $15, %k0, %k0 # sched: [3:1.00]
@@ -7028,7 +7028,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
 
 define i8 @conv1(<8 x i1>* %R) {
 ; GENERIC-LABEL: conv1:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    kxnorw %k0, %k0, %k0
 ; GENERIC-NEXT:    kmovb %k0, (%rdi)
 ; GENERIC-NEXT:    movb $-2, -{{[0-9]+}}(%rsp) # sched: [5:1.00]
@@ -7036,7 +7036,7 @@ define i8 @conv1(<8 x i1>* %R) {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: conv1:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    kxnorw %k0, %k0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovb %k0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    movb $-2, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
@@ -7054,7 +7054,7 @@ entry:
 
 define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1) {
 ; GENERIC-LABEL: test4:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpgtq %ymm1, %ymm0, %k0
 ; GENERIC-NEXT:    vpcmpgtq %ymm3, %ymm2, %k1
 ; GENERIC-NEXT:    kandnw %k0, %k1, %k0
@@ -7063,7 +7063,7 @@ define <4 x i32> @test4(<4 x i64> %x, <4
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test4:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpgtq %ymm1, %ymm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpcmpgtq %ymm3, %ymm2, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    kandnw %k0, %k1, %k0 # sched: [1:1.00]
@@ -7079,7 +7079,7 @@ define <4 x i32> @test4(<4 x i64> %x, <4
 
 define <2 x i64> @vcmp_test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1) {
 ; GENERIC-LABEL: vcmp_test5:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpcmpgtq %xmm0, %xmm1, %k0
 ; GENERIC-NEXT:    vpcmpgtq %xmm3, %xmm2, %k1
 ; GENERIC-NEXT:    kandnw %k1, %k0, %k0
@@ -7087,7 +7087,7 @@ define <2 x i64> @vcmp_test5(<2 x i64> %
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vcmp_test5:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpcmpgtq %xmm0, %xmm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpcmpgtq %xmm3, %xmm2, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    kandnw %k1, %k0, %k0 # sched: [1:1.00]
@@ -7113,7 +7113,7 @@ false:
 }
 define void @vcmp_test7(<8 x i1> %mask)  {
 ; GENERIC-LABEL: vcmp_test7:
-; GENERIC:       # BB#0: # %allocas
+; GENERIC:       # %bb.0: # %allocas
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k0
 ; GENERIC-NEXT:    movb $85, %al # sched: [1:0.33]
@@ -7123,7 +7123,7 @@ define void @vcmp_test7(<8 x i1> %mask)
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vcmp_test7:
-; SKX:       # BB#0: # %allocas
+; SKX:       # %bb.0: # %allocas
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    movb $85, %al # sched: [1:0.25]
@@ -7145,11 +7145,11 @@ false:
 }
 define <16 x i8> @vcmp_test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
 ; GENERIC-LABEL: vcmp_test8:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    cmpl %esi, %edi # sched: [1:0.33]
 ; GENERIC-NEXT:    jg .LBB386_1 # sched: [1:1.00]
-; GENERIC-NEXT:  # BB#2:
+; GENERIC-NEXT:  # %bb.2:
 ; GENERIC-NEXT:    vpcmpltud %zmm2, %zmm1, %k0
 ; GENERIC-NEXT:    vpmovm2b %k0, %xmm0
 ; GENERIC-NEXT:    vzeroupper
@@ -7161,11 +7161,11 @@ define <16 x i8> @vcmp_test8(<16 x i32>%
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vcmp_test8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    cmpl %esi, %edi # sched: [1:0.25]
 ; SKX-NEXT:    jg .LBB386_1 # sched: [1:0.50]
-; SKX-NEXT:  # BB#2:
+; SKX-NEXT:  # %bb.2:
 ; SKX-NEXT:    vpcmpltud %zmm2, %zmm1, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2b %k0, %xmm0
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
@@ -7184,10 +7184,10 @@ define <16 x i8> @vcmp_test8(<16 x i32>%
 }
 define <16 x i1> @vpmov_test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
 ; GENERIC-LABEL: vpmov_test9:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    cmpl %esi, %edi # sched: [1:0.33]
 ; GENERIC-NEXT:    jg .LBB387_1 # sched: [1:1.00]
-; GENERIC-NEXT:  # BB#2:
+; GENERIC-NEXT:  # %bb.2:
 ; GENERIC-NEXT:    vpsllw $7, %xmm1, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    jmp .LBB387_3 # sched: [1:1.00]
 ; GENERIC-NEXT:  .LBB387_1:
@@ -7198,10 +7198,10 @@ define <16 x i1> @vpmov_test9(<16 x i1>%
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vpmov_test9:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    cmpl %esi, %edi # sched: [1:0.25]
 ; SKX-NEXT:    jg .LBB387_1 # sched: [1:0.50]
-; SKX-NEXT:  # BB#2:
+; SKX-NEXT:  # %bb.2:
 ; SKX-NEXT:    vpsllw $7, %xmm1, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    jmp .LBB387_3 # sched: [1:0.50]
 ; SKX-NEXT:  .LBB387_1:
@@ -7221,10 +7221,10 @@ define <16 x i1> @vpmov_test9(<16 x i1>%
 
 define <4 x i1> @vmov_test11(<4 x i1>%a, <4 x i1>%b, i32 %a1, i32 %b1) {
 ; GENERIC-LABEL: vmov_test11:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    cmpl %esi, %edi # sched: [1:0.33]
 ; GENERIC-NEXT:    jg .LBB389_1 # sched: [1:1.00]
-; GENERIC-NEXT:  # BB#2:
+; GENERIC-NEXT:  # %bb.2:
 ; GENERIC-NEXT:    vpslld $31, %xmm1, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    jmp .LBB389_3 # sched: [1:1.00]
 ; GENERIC-NEXT:  .LBB389_1:
@@ -7235,10 +7235,10 @@ define <4 x i1> @vmov_test11(<4 x i1>%a,
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vmov_test11:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    cmpl %esi, %edi # sched: [1:0.25]
 ; SKX-NEXT:    jg .LBB389_1 # sched: [1:0.50]
-; SKX-NEXT:  # BB#2:
+; SKX-NEXT:  # %bb.2:
 ; SKX-NEXT:    vpslld $31, %xmm1, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    jmp .LBB389_3 # sched: [1:0.50]
 ; SKX-NEXT:  .LBB389_1:
@@ -7254,12 +7254,12 @@ define <4 x i1> @vmov_test11(<4 x i1>%a,
 
 define i32 @vmov_test12(i32 %x, i32 %y)  {
 ; GENERIC-LABEL: vmov_test12:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movl %edi, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vmov_test12:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movl %edi, %eax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a = bitcast i16 21845 to <16 x i1>
@@ -7270,12 +7270,12 @@ define i32 @vmov_test12(i32 %x, i32 %y)
 
 define i32 @vmov_test13(i32 %x, i32 %y)  {
 ; GENERIC-LABEL: vmov_test13:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movl %esi, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vmov_test13:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movl %esi, %eax # sched: [1:0.25]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a = bitcast i16 21845 to <16 x i1>
@@ -7291,7 +7291,7 @@ define i32 @vmov_test13(i32 %x, i32 %y)
 
 define <16 x i1> @vmov_test15(i32 %x, i32 %y)  {
 ; GENERIC-LABEL: vmov_test15:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    cmpl %esi, %edi # sched: [1:0.33]
 ; GENERIC-NEXT:    movw $21845, %ax # imm = 0x5555
 ; GENERIC-NEXT:    # sched: [1:0.33]
@@ -7302,7 +7302,7 @@ define <16 x i1> @vmov_test15(i32 %x, i3
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vmov_test15:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    cmpl %esi, %edi # sched: [1:0.25]
 ; SKX-NEXT:    movw $21845, %ax # imm = 0x5555
 ; SKX-NEXT:    # sched: [1:0.25]
@@ -7321,7 +7321,7 @@ define <16 x i1> @vmov_test15(i32 %x, i3
 define <64 x i8> @vmov_test16(i64 %x) {
 ;
 ; GENERIC-LABEL: vmov_test16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovq %rdi, %k0
 ; GENERIC-NEXT:    movb $1, %al # sched: [1:0.33]
 ; GENERIC-NEXT:    kmovd %eax, %k1
@@ -7337,7 +7337,7 @@ define <64 x i8> @vmov_test16(i64 %x) {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vmov_test16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovq %rdi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    movb $1, %al # sched: [1:0.25]
 ; SKX-NEXT:    kmovd %eax, %k1 # sched: [1:1.00]
@@ -7360,7 +7360,7 @@ define <64 x i8> @vmov_test16(i64 %x) {
 define <64 x i8> @vmov_test17(i64 %x, i32 %y, i32 %z) {
 ;
 ; GENERIC-LABEL: vmov_test17:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovq %rdi, %k0
 ; GENERIC-NEXT:    cmpl %edx, %esi # sched: [1:0.33]
 ; GENERIC-NEXT:    setg %al # sched: [1:0.50]
@@ -7377,7 +7377,7 @@ define <64 x i8> @vmov_test17(i64 %x, i3
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vmov_test17:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovq %rdi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    cmpl %edx, %esi # sched: [1:0.25]
 ; SKX-NEXT:    setg %al # sched: [1:0.50]
@@ -7401,7 +7401,7 @@ define <64 x i8> @vmov_test17(i64 %x, i3
 
 define <8 x i1> @vmov_test18(i8 %a, i16 %y) {
 ; GENERIC-LABEL: vmov_test18:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k1
 ; GENERIC-NEXT:    kmovd %esi, %k2
 ; GENERIC-NEXT:    kshiftlw $7, %k2, %k0
@@ -7422,7 +7422,7 @@ define <8 x i1> @vmov_test18(i8 %a, i16
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vmov_test18:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %esi, %k2 # sched: [1:1.00]
 ; SKX-NEXT:    kshiftlw $7, %k2, %k0 # sched: [3:1.00]
@@ -7451,14 +7451,14 @@ define <8 x i1> @vmov_test18(i8 %a, i16
 }
 define <32 x i16> @vmov_test21(<32 x i16> %x , <32 x i1> %mask) nounwind readnone {
 ; GENERIC-LABEL: vmov_test21:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %ymm1, %ymm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %ymm1, %k1
 ; GENERIC-NEXT:    vmovdqu16 %zmm0, %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vmov_test21:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %ymm1, %ymm1 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %ymm1, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    vmovdqu16 %zmm0, %zmm0 {%k1} {z}
@@ -7469,14 +7469,14 @@ define <32 x i16> @vmov_test21(<32 x i16
 
 define void @vmov_test22(<4 x i1> %a, <4 x i1>* %addr) {
 ; GENERIC-LABEL: vmov_test22:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k0 # sched: [1:1.00]
 ; GENERIC-NEXT:    kmovb %k0, (%rdi)
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vmov_test22:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kmovb %k0, (%rdi) # sched: [1:1.00]
@@ -7487,14 +7487,14 @@ define void @vmov_test22(<4 x i1> %a, <4
 
 define void @vmov_test23(<2 x i1> %a, <2 x i1>* %addr) {
 ; GENERIC-LABEL: vmov_test23:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmq %xmm0, %xmm0, %k0 # sched: [1:1.00]
 ; GENERIC-NEXT:    kmovb %k0, (%rdi)
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: vmov_test23:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmq %xmm0, %xmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kmovb %k0, (%rdi) # sched: [1:1.00]
@@ -7505,7 +7505,7 @@ define void @vmov_test23(<2 x i1> %a, <2
 
 define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
 ; GENERIC-LABEL: store_v1i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    kxnorw %k0, %k0, %k1
 ; GENERIC-NEXT:    kxorw %k1, %k0, %k0
@@ -7513,7 +7513,7 @@ define void @store_v1i1(<1 x i1> %c , <1
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_v1i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kxnorw %k0, %k0, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    kxorw %k1, %k0, %k0 # sched: [1:1.00]
@@ -7526,7 +7526,7 @@ define void @store_v1i1(<1 x i1> %c , <1
 
 define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) {
 ; GENERIC-LABEL: store_v2i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmq %xmm0, %xmm0, %k0 # sched: [1:1.00]
 ; GENERIC-NEXT:    knotw %k0, %k0
@@ -7534,7 +7534,7 @@ define void @store_v2i1(<2 x i1> %c , <2
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_v2i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmq %xmm0, %xmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    knotw %k0, %k0 # sched: [1:1.00]
@@ -7547,7 +7547,7 @@ define void @store_v2i1(<2 x i1> %c , <2
 
 define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) {
 ; GENERIC-LABEL: store_v4i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vptestmd %xmm0, %xmm0, %k0 # sched: [1:1.00]
 ; GENERIC-NEXT:    knotw %k0, %k0
@@ -7555,7 +7555,7 @@ define void @store_v4i1(<4 x i1> %c , <4
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_v4i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vptestmd %xmm0, %xmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    knotw %k0, %k0 # sched: [1:1.00]
@@ -7568,7 +7568,7 @@ define void @store_v4i1(<4 x i1> %c , <4
 
 define void @store_v8i1(<8 x i1> %c , <8 x i1>* %ptr) {
 ; GENERIC-LABEL: store_v8i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k0
 ; GENERIC-NEXT:    knotb %k0, %k0
@@ -7576,7 +7576,7 @@ define void @store_v8i1(<8 x i1> %c , <8
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_v8i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    knotb %k0, %k0 # sched: [1:1.00]
@@ -7589,7 +7589,7 @@ define void @store_v8i1(<8 x i1> %c , <8
 
 define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
 ; GENERIC-LABEL: store_v16i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm0, %k0
 ; GENERIC-NEXT:    knotw %k0, %k0
@@ -7597,7 +7597,7 @@ define void @store_v16i1(<16 x i1> %c ,
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_v16i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    knotw %k0, %k0 # sched: [1:1.00]
@@ -7623,14 +7623,14 @@ define void @store_v16i1(<16 x i1> %c ,
 
 define void @f1(i32 %c) {
 ; GENERIC-LABEL: f1:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    movzbl {{.*}}(%rip), %edi # sched: [5:0.50]
 ; GENERIC-NEXT:    xorl $1, %edi # sched: [1:0.33]
 ; GENERIC-NEXT:    movb %dil, {{.*}}(%rip) # sched: [5:1.00]
 ; GENERIC-NEXT:    jmp f2 # TAILCALL
 ;
 ; SKX-LABEL: f1:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    movzbl {{.*}}(%rip), %edi # sched: [5:0.50]
 ; SKX-NEXT:    xorl $1, %edi # sched: [1:0.25]
 ; SKX-NEXT:    movb %dil, {{.*}}(%rip) # sched: [1:1.00]
@@ -7648,13 +7648,13 @@ declare void @f2(i32) #1
 
 define void @store_i16_i1(i16 %x, i1 *%y) {
 ; GENERIC-LABEL: store_i16_i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    andl $1, %edi # sched: [1:0.33]
 ; GENERIC-NEXT:    movb %dil, (%rsi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_i16_i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    andl $1, %edi # sched: [1:0.25]
 ; SKX-NEXT:    movb %dil, (%rsi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -7665,13 +7665,13 @@ define void @store_i16_i1(i16 %x, i1 *%y
 
 define void @store_i8_i1(i8 %x, i1 *%y) {
 ; GENERIC-LABEL: store_i8_i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    andl $1, %edi # sched: [1:0.33]
 ; GENERIC-NEXT:    movb %dil, (%rsi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_i8_i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    andl $1, %edi # sched: [1:0.25]
 ; SKX-NEXT:    movb %dil, (%rsi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -7682,7 +7682,7 @@ define void @store_i8_i1(i8 %x, i1 *%y)
 
 define <32 x i16> @test_build_vec_v32i1(<32 x i16> %x) {
 ; GENERIC-LABEL: test_build_vec_v32i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    movl $1497715861, %eax # imm = 0x59455495
 ; GENERIC-NEXT:    # sched: [1:0.33]
 ; GENERIC-NEXT:    kmovd %eax, %k1
@@ -7690,7 +7690,7 @@ define <32 x i16> @test_build_vec_v32i1(
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_build_vec_v32i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movl $1497715861, %eax # imm = 0x59455495
 ; SKX-NEXT:    # sched: [1:0.25]
 ; SKX-NEXT:    kmovd %eax, %k1 # sched: [1:1.00]
@@ -7702,12 +7702,12 @@ define <32 x i16> @test_build_vec_v32i1(
 
 define <64 x i8> @test_build_vec_v64i1(<64 x i8> %x) {
 ; GENERIC-LABEL: test_build_vec_v64i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zmm0[2],zero,zero,zero,zmm0[6],zero,zmm0[8],zero,zmm0[10],zero,zmm0[12],zero,zero,zmm0[15],zero,zero,zmm0[18],zero,zmm0[20],zero,zmm0[22],zero,zmm0[24],zero,zero,zmm0[27],zero,zero,zmm0[30],zero,zmm0[32],zero,zmm0[34],zero,zero,zero,zmm0[38],zero,zmm0[40],zero,zero,zmm0[43,44],zero,zmm0[46],zero,zmm0[48],zero,zmm0[50],zero,zero,zero,zmm0[54],zero,zmm0[56],zero,zero,zmm0[59,60],zero,zmm0[62],zero sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_build_vec_v64i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zmm0[2],zero,zero,zero,zmm0[6],zero,zmm0[8],zero,zmm0[10],zero,zmm0[12],zero,zero,zmm0[15],zero,zero,zmm0[18],zero,zmm0[20],zero,zmm0[22],zero,zmm0[24],zero,zero,zmm0[27],zero,zero,zmm0[30],zero,zmm0[32],zero,zmm0[34],zero,zero,zero,zmm0[38],zero,zmm0[40],zero,zero,zmm0[43,44],zero,zmm0[46],zero,zmm0[48],zero,zmm0[50],zero,zero,zero,zmm0[54],zero,zmm0[56],zero,zero,zmm0[59,60],zero,zmm0[62],zero sched: [8:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %ret = select <64 x i1> <i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false>, <64 x i8> %x, <64 x i8> zeroinitializer
@@ -7716,14 +7716,14 @@ define <64 x i8> @test_build_vec_v64i1(<
 
 define void @ktest_1(<8 x double> %in, double * %base) {
 ; GENERIC-LABEL: ktest_1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovupd (%rdi), %zmm1 # sched: [4:0.50]
 ; GENERIC-NEXT:    vcmpltpd %zmm0, %zmm1, %k1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovupd 8(%rdi), %zmm1 {%k1} {z} # sched: [4:0.50]
 ; GENERIC-NEXT:    vcmpltpd %zmm1, %zmm0, %k0 {%k1} # sched: [3:1.00]
 ; GENERIC-NEXT:    ktestb %k0, %k0
 ; GENERIC-NEXT:    je .LBB410_2 # sched: [1:1.00]
-; GENERIC-NEXT:  # BB#1: # %L1
+; GENERIC-NEXT:  # %bb.1: # %L1
 ; GENERIC-NEXT:    vmovapd %zmm0, (%rdi)
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -7733,14 +7733,14 @@ define void @ktest_1(<8 x double> %in, d
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ktest_1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovupd (%rdi), %zmm1 # sched: [8:0.50]
 ; SKX-NEXT:    vcmpltpd %zmm0, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vmovupd 8(%rdi), %zmm1 {%k1} {z} # sched: [8:0.50]
 ; SKX-NEXT:    vcmpltpd %zmm1, %zmm0, %k0 {%k1} # sched: [3:1.00]
 ; SKX-NEXT:    ktestb %k0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    je .LBB410_2 # sched: [1:0.50]
-; SKX-NEXT:  # BB#1: # %L1
+; SKX-NEXT:  # %bb.1: # %L1
 ; SKX-NEXT:    vmovapd %zmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -7778,7 +7778,7 @@ End:
 define void @ktest_2(<32 x float> %in, float * %base) {
 ;
 ; GENERIC-LABEL: ktest_2:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovups (%rdi), %zmm2 # sched: [4:0.50]
 ; GENERIC-NEXT:    vmovups 64(%rdi), %zmm3 # sched: [4:0.50]
 ; GENERIC-NEXT:    vcmpltps %zmm0, %zmm2, %k1 # sched: [3:1.00]
@@ -7792,7 +7792,7 @@ define void @ktest_2(<32 x float> %in, f
 ; GENERIC-NEXT:    kord %k1, %k0, %k0
 ; GENERIC-NEXT:    ktestd %k0, %k0
 ; GENERIC-NEXT:    je .LBB411_2 # sched: [1:1.00]
-; GENERIC-NEXT:  # BB#1: # %L1
+; GENERIC-NEXT:  # %bb.1: # %L1
 ; GENERIC-NEXT:    vmovaps %zmm0, (%rdi)
 ; GENERIC-NEXT:    vmovaps %zmm1, 64(%rdi)
 ; GENERIC-NEXT:    vzeroupper
@@ -7804,7 +7804,7 @@ define void @ktest_2(<32 x float> %in, f
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: ktest_2:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %zmm2 # sched: [8:0.50]
 ; SKX-NEXT:    vmovups 64(%rdi), %zmm3 # sched: [8:0.50]
 ; SKX-NEXT:    vcmpltps %zmm0, %zmm2, %k1 # sched: [3:1.00]
@@ -7818,7 +7818,7 @@ define void @ktest_2(<32 x float> %in, f
 ; SKX-NEXT:    kord %k1, %k0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    ktestd %k0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    je .LBB411_2 # sched: [1:0.50]
-; SKX-NEXT:  # BB#1: # %L1
+; SKX-NEXT:  # %bb.1: # %L1
 ; SKX-NEXT:    vmovaps %zmm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vmovaps %zmm1, 64(%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
@@ -7857,13 +7857,13 @@ End:
 
 define <8 x i64> @load_8i1(<8 x i1>* %a) {
 ; GENERIC-LABEL: load_8i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovb (%rdi), %k0
 ; GENERIC-NEXT:    vpmovm2q %k0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: load_8i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovb (%rdi), %k0 # sched: [7:1.00]
 ; SKX-NEXT:    vpmovm2q %k0, %zmm0
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -7874,13 +7874,13 @@ define <8 x i64> @load_8i1(<8 x i1>* %a)
 
 define <16 x i32> @load_16i1(<16 x i1>* %a) {
 ; GENERIC-LABEL: load_16i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovw (%rdi), %k0
 ; GENERIC-NEXT:    vpmovm2d %k0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: load_16i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovw (%rdi), %k0 # sched: [7:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %zmm0
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -7891,13 +7891,13 @@ define <16 x i32> @load_16i1(<16 x i1>*
 
 define <2 x i16> @load_2i1(<2 x i1>* %a) {
 ; GENERIC-LABEL: load_2i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovb (%rdi), %k0
 ; GENERIC-NEXT:    vpmovm2q %k0, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: load_2i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovb (%rdi), %k0 # sched: [7:1.00]
 ; SKX-NEXT:    vpmovm2q %k0, %xmm0
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -7908,13 +7908,13 @@ define <2 x i16> @load_2i1(<2 x i1>* %a)
 
 define <4 x i16> @load_4i1(<4 x i1>* %a) {
 ; GENERIC-LABEL: load_4i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovb (%rdi), %k0
 ; GENERIC-NEXT:    vpmovm2d %k0, %xmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: load_4i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovb (%rdi), %k0 # sched: [7:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %xmm0
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -7925,13 +7925,13 @@ define <4 x i16> @load_4i1(<4 x i1>* %a)
 
 define <32 x i16> @load_32i1(<32 x i1>* %a) {
 ; GENERIC-LABEL: load_32i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd (%rdi), %k0
 ; GENERIC-NEXT:    vpmovm2w %k0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: load_32i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd (%rdi), %k0 # sched: [7:1.00]
 ; SKX-NEXT:    vpmovm2w %k0, %zmm0
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -7942,13 +7942,13 @@ define <32 x i16> @load_32i1(<32 x i1>*
 
 define <64 x i8> @load_64i1(<64 x i1>* %a) {
 ; GENERIC-LABEL: load_64i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovq (%rdi), %k0
 ; GENERIC-NEXT:    vpmovm2b %k0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: load_64i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovq (%rdi), %k0 # sched: [7:1.00]
 ; SKX-NEXT:    vpmovm2b %k0, %zmm0
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -7959,14 +7959,14 @@ define <64 x i8> @load_64i1(<64 x i1>* %
 
 define void @store_8i1(<8 x i1>* %a, <8 x i1> %v) {
 ; GENERIC-LABEL: store_8i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k0
 ; GENERIC-NEXT:    kmovb %k0, (%rdi)
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_8i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovb %k0, (%rdi) # sched: [1:1.00]
@@ -7977,14 +7977,14 @@ define void @store_8i1(<8 x i1>* %a, <8
 
 define void @store_8i1_1(<8 x i1>* %a, <8 x i16> %v) {
 ; GENERIC-LABEL: store_8i1_1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovw2m %xmm0, %k0
 ; GENERIC-NEXT:    kmovb %k0, (%rdi)
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_8i1_1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %xmm0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovb %k0, (%rdi) # sched: [1:1.00]
@@ -7996,14 +7996,14 @@ define void @store_8i1_1(<8 x i1>* %a, <
 
 define void @store_16i1(<16 x i1>* %a, <16 x i1> %v) {
 ; GENERIC-LABEL: store_16i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %xmm0, %k0
 ; GENERIC-NEXT:    kmovw %k0, (%rdi)
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_16i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %xmm0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovw %k0, (%rdi) # sched: [1:1.00]
@@ -8014,7 +8014,7 @@ define void @store_16i1(<16 x i1>* %a, <
 
 define void @store_32i1(<32 x i1>* %a, <32 x i1> %v) {
 ; GENERIC-LABEL: store_32i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpmovb2m %ymm0, %k0
 ; GENERIC-NEXT:    kmovd %k0, (%rdi)
@@ -8022,7 +8022,7 @@ define void @store_32i1(<32 x i1>* %a, <
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_32i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %ymm0, %ymm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %ymm0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %k0, (%rdi) # sched: [1:1.00]
@@ -8034,7 +8034,7 @@ define void @store_32i1(<32 x i1>* %a, <
 
 define void @store_32i1_1(<32 x i1>* %a, <32 x i16> %v) {
 ; GENERIC-LABEL: store_32i1_1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $15, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovw2m %zmm0, %k0
 ; GENERIC-NEXT:    kmovd %k0, (%rdi)
@@ -8042,7 +8042,7 @@ define void @store_32i1_1(<32 x i1>* %a,
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_32i1_1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $15, %zmm0, %zmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovw2m %zmm0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %k0, (%rdi) # sched: [1:1.00]
@@ -8057,7 +8057,7 @@ define void @store_32i1_1(<32 x i1>* %a,
 define void @store_64i1(<64 x i1>* %a, <64 x i1> %v) {
 ;
 ; GENERIC-LABEL: store_64i1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpsllw $7, %zmm0, %zmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovb2m %zmm0, %k0
 ; GENERIC-NEXT:    kmovq %k0, (%rdi)
@@ -8065,7 +8065,7 @@ define void @store_64i1(<64 x i1>* %a, <
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: store_64i1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllw $7, %zmm0, %zmm0 # sched: [1:0.50]
 ; SKX-NEXT:    vpmovb2m %zmm0, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovq %k0, (%rdi) # sched: [1:1.00]
@@ -8077,7 +8077,7 @@ define void @store_64i1(<64 x i1>* %a, <
 
 define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
 ; GENERIC-LABEL: test_bitcast_v8i1_zext:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; GENERIC-NEXT:    kmovb %k0, %eax
@@ -8086,7 +8086,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_bitcast_v8i1_zext:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kmovb %k0, %eax # sched: [3:1.00]
@@ -8103,7 +8103,7 @@ define i32 @test_bitcast_v8i1_zext(<16 x
 
 define i32 @test_bitcast_v16i1_zext(<16 x i32> %a) {
 ; GENERIC-LABEL: test_bitcast_v16i1_zext:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
 ; GENERIC-NEXT:    kmovw %k0, %eax
@@ -8112,7 +8112,7 @@ define i32 @test_bitcast_v16i1_zext(<16
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_bitcast_v16i1_zext:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    kmovw %k0, %eax # sched: [3:1.00]
@@ -8128,7 +8128,7 @@ define i32 @test_bitcast_v16i1_zext(<16
 
 define i16 @test_v16i1_add(i16 %x, i16 %y) {
 ; GENERIC-LABEL: test_v16i1_add:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    kmovd %esi, %k1
 ; GENERIC-NEXT:    kxorw %k1, %k0, %k0
@@ -8137,7 +8137,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_v16i1_add:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %esi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    kxorw %k1, %k0, %k0 # sched: [1:1.00]
@@ -8153,7 +8153,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
 
 define i16 @test_v16i1_sub(i16 %x, i16 %y) {
 ; GENERIC-LABEL: test_v16i1_sub:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    kmovd %esi, %k1
 ; GENERIC-NEXT:    kxorw %k1, %k0, %k0
@@ -8162,7 +8162,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_v16i1_sub:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %esi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    kxorw %k1, %k0, %k0 # sched: [1:1.00]
@@ -8178,7 +8178,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
 
 define i16 @test_v16i1_mul(i16 %x, i16 %y) {
 ; GENERIC-LABEL: test_v16i1_mul:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    kmovd %esi, %k1
 ; GENERIC-NEXT:    kandw %k1, %k0, %k0
@@ -8187,7 +8187,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_v16i1_mul:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %esi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    kandw %k1, %k0, %k0 # sched: [1:1.00]
@@ -8203,7 +8203,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
 
 define i8 @test_v8i1_add(i8 %x, i8 %y) {
 ; GENERIC-LABEL: test_v8i1_add:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    kmovd %esi, %k1
 ; GENERIC-NEXT:    kxorb %k1, %k0, %k0
@@ -8212,7 +8212,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_v8i1_add:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %esi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    kxorb %k1, %k0, %k0 # sched: [1:1.00]
@@ -8228,7 +8228,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
 
 define i8 @test_v8i1_sub(i8 %x, i8 %y) {
 ; GENERIC-LABEL: test_v8i1_sub:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    kmovd %esi, %k1
 ; GENERIC-NEXT:    kxorb %k1, %k0, %k0
@@ -8237,7 +8237,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_v8i1_sub:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %esi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    kxorb %k1, %k0, %k0 # sched: [1:1.00]
@@ -8253,7 +8253,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
 
 define i8 @test_v8i1_mul(i8 %x, i8 %y) {
 ; GENERIC-LABEL: test_v8i1_mul:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    kmovd %edi, %k0
 ; GENERIC-NEXT:    kmovd %esi, %k1
 ; GENERIC-NEXT:    kandb %k1, %k0, %k0
@@ -8262,7 +8262,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_v8i1_mul:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovd %edi, %k0 # sched: [1:1.00]
 ; SKX-NEXT:    kmovd %esi, %k1 # sched: [1:1.00]
 ; SKX-NEXT:    kandb %k1, %k0, %k0 # sched: [1:1.00]
@@ -8278,12 +8278,12 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
 
 define   <16 x i32> @_inreg16xi32(i32 %a) {
 ; GENERIC-LABEL: _inreg16xi32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpbroadcastd %edi, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _inreg16xi32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpbroadcastd %edi, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = insertelement <16 x i32> undef, i32 %a, i32 0
@@ -8293,12 +8293,12 @@ define   <16 x i32> @_inreg16xi32(i32 %a
 
 define   <8 x i64> @_inreg8xi64(i64 %a) {
 ; GENERIC-LABEL: _inreg8xi64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpbroadcastq %rdi, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _inreg8xi64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpbroadcastq %rdi, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = insertelement <8 x i64> undef, i64 %a, i32 0
@@ -8308,12 +8308,12 @@ define   <8 x i64> @_inreg8xi64(i64 %a)
 
 define   <16 x float> @_ss16xfloat_v4(<4 x float> %a) {
 ; GENERIC-LABEL: _ss16xfloat_v4:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastss %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _ss16xfloat_v4:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = shufflevector <4 x float> %a, <4 x float> undef, <16 x i32> zeroinitializer
@@ -8322,12 +8322,12 @@ define   <16 x float> @_ss16xfloat_v4(<4
 
 define   <16 x float> @_inreg16xfloat(float %a) {
 ; GENERIC-LABEL: _inreg16xfloat:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastss %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _inreg16xfloat:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = insertelement <16 x float> undef, float %a, i32 0
@@ -8337,7 +8337,7 @@ define   <16 x float> @_inreg16xfloat(fl
 
 define   <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %mask1) {
 ; GENERIC-LABEL: _ss16xfloat_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1
 ; GENERIC-NEXT:    vbroadcastss %xmm0, %zmm1 {%k1}
@@ -8345,7 +8345,7 @@ define   <16 x float> @_ss16xfloat_mask(
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _ss16xfloat_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vbroadcastss %xmm0, %zmm1 {%k1} # sched: [3:1.00]
@@ -8360,14 +8360,14 @@ define   <16 x float> @_ss16xfloat_mask(
 
 define   <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
 ; GENERIC-LABEL: _ss16xfloat_maskz:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vbroadcastss %xmm0, %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _ss16xfloat_maskz:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vbroadcastss %xmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
@@ -8381,12 +8381,12 @@ define   <16 x float> @_ss16xfloat_maskz
 
 define   <16 x float> @_ss16xfloat_load(float* %a.ptr) {
 ; GENERIC-LABEL: _ss16xfloat_load:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastss (%rdi), %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _ss16xfloat_load:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastss (%rdi), %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a = load float, float* %a.ptr
@@ -8397,14 +8397,14 @@ define   <16 x float> @_ss16xfloat_load(
 
 define   <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16 x i32> %mask1) {
 ; GENERIC-LABEL: _ss16xfloat_mask_load:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
 ; GENERIC-NEXT:    vbroadcastss (%rdi), %zmm0 {%k1}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _ss16xfloat_mask_load:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vbroadcastss (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -8419,14 +8419,14 @@ define   <16 x float> @_ss16xfloat_mask_
 
 define   <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1) {
 ; GENERIC-LABEL: _ss16xfloat_maskz_load:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1
 ; GENERIC-NEXT:    vbroadcastss (%rdi), %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _ss16xfloat_maskz_load:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vbroadcastss (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -8441,12 +8441,12 @@ define   <16 x float> @_ss16xfloat_maskz
 
 define   <8 x double> @_inreg8xdouble(double %a) {
 ; GENERIC-LABEL: _inreg8xdouble:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastsd %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _inreg8xdouble:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastsd %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = insertelement <8 x double> undef, double %a, i32 0
@@ -8456,7 +8456,7 @@ define   <8 x double> @_inreg8xdouble(do
 
 define   <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
 ; GENERIC-LABEL: _sd8xdouble_mask:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %ymm3, %ymm2, %k1
 ; GENERIC-NEXT:    vbroadcastsd %xmm0, %zmm1 {%k1}
@@ -8464,7 +8464,7 @@ define   <8 x double> @_sd8xdouble_mask(
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _sd8xdouble_mask:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm3, %xmm3, %xmm3 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %ymm3, %ymm2, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vbroadcastsd %xmm0, %zmm1 {%k1} # sched: [3:1.00]
@@ -8479,14 +8479,14 @@ define   <8 x double> @_sd8xdouble_mask(
 
 define   <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
 ; GENERIC-LABEL: _sd8xdouble_maskz:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1
 ; GENERIC-NEXT:    vbroadcastsd %xmm0, %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _sd8xdouble_maskz:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vbroadcastsd %xmm0, %zmm0 {%k1} {z} # sched: [3:1.00]
@@ -8500,12 +8500,12 @@ define   <8 x double> @_sd8xdouble_maskz
 
 define   <8 x double> @_sd8xdouble_load(double* %a.ptr) {
 ; GENERIC-LABEL: _sd8xdouble_load:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastsd (%rdi), %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _sd8xdouble_load:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastsd (%rdi), %zmm0 # sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %a = load double, double* %a.ptr
@@ -8516,14 +8516,14 @@ define   <8 x double> @_sd8xdouble_load(
 
 define   <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
 ; GENERIC-LABEL: _sd8xdouble_mask_load:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1
 ; GENERIC-NEXT:    vbroadcastsd (%rdi), %zmm0 {%k1}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _sd8xdouble_mask_load:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %ymm2, %ymm1, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vbroadcastsd (%rdi), %zmm0 {%k1} # sched: [8:0.50]
@@ -8538,14 +8538,14 @@ define   <8 x double> @_sd8xdouble_mask_
 
 define   <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
 ; GENERIC-LABEL: _sd8xdouble_maskz_load:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; GENERIC-NEXT:    vpcmpneqd %ymm1, %ymm0, %k1
 ; GENERIC-NEXT:    vbroadcastsd (%rdi), %zmm0 {%k1} {z}
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _sd8xdouble_maskz_load:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
 ; SKX-NEXT:    vpcmpneqd %ymm1, %ymm0, %k1 # sched: [3:1.00]
 ; SKX-NEXT:    vbroadcastsd (%rdi), %zmm0 {%k1} {z} # sched: [8:0.50]
@@ -8560,12 +8560,12 @@ define   <8 x double> @_sd8xdouble_maskz
 
 define   <16 x i32> @_xmm16xi32(<16 x i32> %a) {
 ; GENERIC-LABEL: _xmm16xi32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastss %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _xmm16xi32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> zeroinitializer
@@ -8574,12 +8574,12 @@ define   <16 x i32> @_xmm16xi32(<16 x i3
 
 define   <16 x float> @_xmm16xfloat(<16 x float> %a) {
 ; GENERIC-LABEL: _xmm16xfloat:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastss %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _xmm16xfloat:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %b = shufflevector <16 x float> %a, <16 x float> undef, <16 x i32> zeroinitializer
@@ -8588,7 +8588,7 @@ define   <16 x float> @_xmm16xfloat(<16
 
 define <16 x i32> @test_vbroadcast() {
 ; GENERIC-LABEL: test_vbroadcast:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vxorps %xmm0, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vcmpunordps %zmm0, %zmm0, %k0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vpmovm2d %k0, %zmm0
@@ -8597,7 +8597,7 @@ define <16 x i32> @test_vbroadcast() {
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_vbroadcast:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vxorps %xmm0, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKX-NEXT:    vcmpunordps %zmm0, %zmm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %zmm0
@@ -8616,12 +8616,12 @@ entry:
 ; IR generated will produce broadcasts at the end.
 define <8 x double> @test_set1_pd(double %d) #2 {
 ; GENERIC-LABEL: test_set1_pd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vbroadcastsd %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_set1_pd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vbroadcastsd %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -8638,12 +8638,12 @@ entry:
 
 define <8 x i64> @test_set1_epi64(i64 %d) #2 {
 ; GENERIC-LABEL: test_set1_epi64:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vpbroadcastq %rdi, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_set1_epi64:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vpbroadcastq %rdi, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -8660,12 +8660,12 @@ entry:
 
 define <16 x float> @test_set1_ps(float %f) #2 {
 ; GENERIC-LABEL: test_set1_ps:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vbroadcastss %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_set1_ps:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -8690,12 +8690,12 @@ entry:
 
 define <16 x i32> @test_set1_epi32(i32 %f) #2 {
 ; GENERIC-LABEL: test_set1_epi32:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vpbroadcastd %edi, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_set1_epi32:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vpbroadcastd %edi, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -8722,12 +8722,12 @@ entry:
 ; Verify that the IR generated will produce the broadcast at the end.
 define <8 x double> @test_mm512_broadcastsd_pd(<2 x double> %a) {
 ; GENERIC-LABEL: test_mm512_broadcastsd_pd:
-; GENERIC:       # BB#0: # %entry
+; GENERIC:       # %bb.0: # %entry
 ; GENERIC-NEXT:    vbroadcastsd %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: test_mm512_broadcastsd_pd:
-; SKX:       # BB#0: # %entry
+; SKX:       # %bb.0: # %entry
 ; SKX-NEXT:    vbroadcastsd %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 entry:
@@ -8745,12 +8745,12 @@ entry:
 
 define <16 x float> @suff_test1(<8 x float>%a)  {
 ; GENERIC-LABEL: suff_test1:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastss %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: suff_test1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = shufflevector <8 x float> %a, <8 x float> undef, <16 x i32> zeroinitializer
@@ -8759,12 +8759,12 @@ define <16 x float> @suff_test1(<8 x flo
 
 define <8 x double> @suff_test2(<4 x double>%a)  {
 ; GENERIC-LABEL: suff_test2:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastsd %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: suff_test2:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastsd %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = shufflevector <4 x double> %a, <4 x double> undef, <8 x i32> zeroinitializer
@@ -8773,12 +8773,12 @@ define <8 x double> @suff_test2(<4 x dou
 
 define <64 x i8> @_invec32xi8(<32 x i8>%a)  {
 ; GENERIC-LABEL: _invec32xi8:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpbroadcastb %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _invec32xi8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpbroadcastb %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = shufflevector <32 x i8> %a, <32 x i8> undef, <64 x i32> zeroinitializer
@@ -8787,12 +8787,12 @@ define <64 x i8> @_invec32xi8(<32 x i8>%
 
 define <32 x i16> @_invec16xi16(<16 x i16>%a)  {
 ; GENERIC-LABEL: _invec16xi16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpbroadcastw %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _invec16xi16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpbroadcastw %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = shufflevector <16 x i16> %a, <16 x i16> undef, <32 x i32> zeroinitializer
@@ -8801,12 +8801,12 @@ define <32 x i16> @_invec16xi16(<16 x i1
 
 define <16 x i32> @_invec8xi32(<8 x i32>%a)  {
 ; GENERIC-LABEL: _invec8xi32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastss %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _invec8xi32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastss %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = shufflevector <8 x i32> %a, <8 x i32> undef, <16 x i32> zeroinitializer
@@ -8815,12 +8815,12 @@ define <16 x i32> @_invec8xi32(<8 x i32>
 
 define <8 x i64> @_invec4xi64(<4 x i64>%a)  {
 ; GENERIC-LABEL: _invec4xi64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastsd %xmm0, %zmm0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: _invec4xi64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastsd %xmm0, %zmm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %res = shufflevector <4 x i64> %a, <4 x i64> undef, <8 x i32> zeroinitializer
@@ -8830,7 +8830,7 @@ define <8 x i64> @_invec4xi64(<4 x i64>%
 declare void @func_f32(float)
 define <16 x float> @broadcast_ss_spill(float %x) {
 ; GENERIC-LABEL: broadcast_ss_spill:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    subq $24, %rsp # sched: [1:0.33]
 ; GENERIC-NEXT:    .cfi_def_cfa_offset 32
 ; GENERIC-NEXT:    vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
@@ -8842,7 +8842,7 @@ define <16 x float> @broadcast_ss_spill(
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: broadcast_ss_spill:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    subq $24, %rsp # sched: [1:0.25]
 ; SKX-NEXT:    .cfi_def_cfa_offset 32
 ; SKX-NEXT:    vaddss %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
@@ -8863,7 +8863,7 @@ define <16 x float> @broadcast_ss_spill(
 declare void @func_f64(double)
 define <8 x double> @broadcast_sd_spill(double %x) {
 ; GENERIC-LABEL: broadcast_sd_spill:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    subq $24, %rsp # sched: [1:0.33]
 ; GENERIC-NEXT:    .cfi_def_cfa_offset 32
 ; GENERIC-NEXT:    vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
@@ -8875,7 +8875,7 @@ define <8 x double> @broadcast_sd_spill(
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SKX-LABEL: broadcast_sd_spill:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    subq $24, %rsp # sched: [1:0.25]
 ; SKX-NEXT:    .cfi_def_cfa_offset 32
 ; SKX-NEXT:    vaddsd %xmm0, %xmm0, %xmm0 # sched: [4:0.33]

Modified: llvm/trunk/test/CodeGen/X86/avx512-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-select.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-select.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-select.ll Mon Dec  4 09:18:51 2017
@@ -4,22 +4,22 @@
 
 define <16 x i32> @select00(i32 %a, <16 x i32> %b) nounwind {
 ; X86-LABEL: select00:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    cmpl $255, {{[0-9]+}}(%esp)
 ; X86-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    je .LBB0_2
-; X86-NEXT:  # BB#1:
+; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm1
 ; X86-NEXT:  .LBB0_2:
 ; X86-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select00:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    cmpl $255, %edi
 ; X64-NEXT:    je .LBB0_2
-; X64-NEXT:  # BB#1:
+; X64-NEXT:  # %bb.1:
 ; X64-NEXT:    vmovdqa64 %zmm0, %zmm1
 ; X64-NEXT:  .LBB0_2:
 ; X64-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
@@ -32,22 +32,22 @@ define <16 x i32> @select00(i32 %a, <16
 
 define <8 x i64> @select01(i32 %a, <8 x i64> %b) nounwind {
 ; X86-LABEL: select01:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    cmpl $255, {{[0-9]+}}(%esp)
 ; X86-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    je .LBB1_2
-; X86-NEXT:  # BB#1:
+; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm1
 ; X86-NEXT:  .LBB1_2:
 ; X86-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select01:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    cmpl $255, %edi
 ; X64-NEXT:    je .LBB1_2
-; X64-NEXT:  # BB#1:
+; X64-NEXT:  # %bb.1:
 ; X64-NEXT:    vmovdqa64 %zmm0, %zmm1
 ; X64-NEXT:  .LBB1_2:
 ; X64-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
@@ -60,7 +60,7 @@ define <8 x i64> @select01(i32 %a, <8 x
 
 define float @select02(float %a, float %b, float %c, float %eps) {
 ; X86-LABEL: select02:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X86-NEXT:    vucomiss {{[0-9]+}}(%esp), %xmm0
 ; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
@@ -70,7 +70,7 @@ define float @select02(float %a, float %
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select02:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcmpless %xmm0, %xmm3, %k1
 ; X64-NEXT:    vmovss %xmm2, %xmm0, %xmm1 {%k1}
 ; X64-NEXT:    vmovaps %xmm1, %xmm0
@@ -82,7 +82,7 @@ define float @select02(float %a, float %
 
 define double @select03(double %a, double %b, double %c, double %eps) {
 ; X86-LABEL: select03:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X86-NEXT:    vucomisd {{[0-9]+}}(%esp), %xmm0
 ; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
@@ -92,7 +92,7 @@ define double @select03(double %a, doubl
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select03:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcmplesd %xmm0, %xmm3, %k1
 ; X64-NEXT:    vmovsd %xmm2, %xmm0, %xmm1 {%k1}
 ; X64-NEXT:    vmovapd %xmm1, %xmm0
@@ -104,7 +104,7 @@ define double @select03(double %a, doubl
 
 define <16 x double> @select04(<16 x double> %a, <16 x double> %b) {
 ; X86-LABEL: select04:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %ebp, -8
@@ -118,7 +118,7 @@ define <16 x double> @select04(<16 x dou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select04:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %zmm3, %zmm1
 ; X64-NEXT:    retq
   %sel = select <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <16 x double> %a, <16 x double> %b
@@ -127,13 +127,13 @@ define <16 x double> @select04(<16 x dou
 
 define i8 @select05(i8 %a.0, i8 %m) {
 ; X86-LABEL: select05:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    orb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select05:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    orl %esi, %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
@@ -146,7 +146,7 @@ define i8 @select05(i8 %a.0, i8 %m) {
 
 define i8 @select05_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
 ; X86-LABEL: select05_mem:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzbl (%ecx), %ecx
@@ -159,7 +159,7 @@ define i8 @select05_mem(<8 x i1>* %a.0,
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select05_mem:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl (%rsi), %eax
 ; X64-NEXT:    kmovw %eax, %k0
 ; X64-NEXT:    movzbl (%rdi), %eax
@@ -177,13 +177,13 @@ define i8 @select05_mem(<8 x i1>* %a.0,
 
 define i8 @select06(i8 %a.0, i8 %m) {
 ; X86-LABEL: select06:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    andb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select06:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andl %esi, %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
@@ -196,7 +196,7 @@ define i8 @select06(i8 %a.0, i8 %m) {
 
 define i8 @select06_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
 ; X86-LABEL: select06_mem:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzbl (%ecx), %ecx
@@ -209,7 +209,7 @@ define i8 @select06_mem(<8 x i1>* %a.0,
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select06_mem:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl (%rsi), %eax
 ; X64-NEXT:    kmovw %eax, %k0
 ; X64-NEXT:    movzbl (%rdi), %eax
@@ -226,7 +226,7 @@ define i8 @select06_mem(<8 x i1>* %a.0,
 }
 define i8 @select07(i8 %a.0, i8 %b.0, i8 %m) {
 ; X86-LABEL: select07:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k0
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
@@ -241,7 +241,7 @@ define i8 @select07(i8 %a.0, i8 %b.0, i8
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select07:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edx, %k0
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    kmovw %esi, %k2
@@ -261,13 +261,13 @@ define i8 @select07(i8 %a.0, i8 %b.0, i8
 
 define i64 @pr30249() {
 ; X86-LABEL: pr30249:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl $2, %eax
 ; X86-NEXT:    xorl %edx, %edx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: pr30249:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl $2, %eax
 ; X64-NEXT:    retq
   %v = select i1 undef , i64 1, i64 2
@@ -276,7 +276,7 @@ define i64 @pr30249() {
 
 define double @pr30561_f64(double %b, double %a, i1 %c) {
 ; X86-LABEL: pr30561_f64:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
@@ -285,7 +285,7 @@ define double @pr30561_f64(double %b, do
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: pr30561_f64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 {%k1}
 ; X64-NEXT:    retq
@@ -295,7 +295,7 @@ define double @pr30561_f64(double %b, do
 
 define float @pr30561_f32(float %b, float %a, i1 %c) {
 ; X86-LABEL: pr30561_f32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
@@ -304,7 +304,7 @@ define float @pr30561_f32(float %b, floa
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: pr30561_f32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vmovss %xmm1, %xmm0, %xmm0 {%k1}
 ; X64-NEXT:    retq
@@ -314,7 +314,7 @@ define float @pr30561_f32(float %b, floa
 
 define <16 x i16> @pr31515(<16 x i1> %a, <16 x i1> %b, <16 x i16> %c) nounwind {
 ; X86-LABEL: pr31515:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; X86-NEXT:    vpslld $31, %zmm1, %zmm1
 ; X86-NEXT:    vpmovsxbd %xmm0, %zmm0
@@ -327,7 +327,7 @@ define <16 x i16> @pr31515(<16 x i1> %a,
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: pr31515:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; X64-NEXT:    vpslld $31, %zmm1, %zmm1
 ; X64-NEXT:    vpmovsxbd %xmm0, %zmm0

Modified: llvm/trunk/test/CodeGen/X86/avx512-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-shift.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-shift.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <16 x i32> @shift_16_i32(<16 x i32> %a) {
 ; CHECK-LABEL: shift_16_i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrld $1, %zmm0, %zmm0
 ; CHECK-NEXT:    vpslld $12, %zmm0, %zmm0
 ; CHECK-NEXT:    vpsrad $12, %zmm0, %zmm0
@@ -17,7 +17,7 @@ define <16 x i32> @shift_16_i32(<16 x i3
 
 define <8 x i64> @shift_8_i64(<8 x i64> %a) {
 ; CHECK-LABEL: shift_8_i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlq $1, %zmm0, %zmm0
 ; CHECK-NEXT:    vpsllq $12, %zmm0, %zmm0
 ; CHECK-NEXT:    vpsraq $12, %zmm0, %zmm0
@@ -30,7 +30,7 @@ define <8 x i64> @shift_8_i64(<8 x i64>
 
 define <4 x i64> @shift_4_i64(<4 x i64> %a) {
 ; KNL-LABEL: shift_4_i64:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vpsrlq $1, %ymm0, %ymm0
 ; KNL-NEXT:    vpsllq $12, %ymm0, %ymm0
 ; KNL-NEXT:    vpsraq $12, %zmm0, %zmm0
@@ -38,7 +38,7 @@ define <4 x i64> @shift_4_i64(<4 x i64>
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: shift_4_i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsrlq $1, %ymm0, %ymm0
 ; SKX-NEXT:    vpsllq $12, %ymm0, %ymm0
 ; SKX-NEXT:    vpsraq $12, %ymm0, %ymm0
@@ -51,7 +51,7 @@ define <4 x i64> @shift_4_i64(<4 x i64>
 
 define <8 x i64> @variable_shl4(<8 x i64> %x, <8 x i64> %y) {
 ; CHECK-LABEL: variable_shl4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsllvq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %k = shl <8 x i64> %x, %y
@@ -60,7 +60,7 @@ define <8 x i64> @variable_shl4(<8 x i64
 
 define <16 x i32> @variable_shl5(<16 x i32> %x, <16 x i32> %y) {
 ; CHECK-LABEL: variable_shl5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %k = shl <16 x i32> %x, %y
@@ -69,7 +69,7 @@ define <16 x i32> @variable_shl5(<16 x i
 
 define <16 x i32> @variable_srl0(<16 x i32> %x, <16 x i32> %y) {
 ; CHECK-LABEL: variable_srl0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %k = lshr <16 x i32> %x, %y
@@ -78,7 +78,7 @@ define <16 x i32> @variable_srl0(<16 x i
 
 define <8 x i64> @variable_srl2(<8 x i64> %x, <8 x i64> %y) {
 ; CHECK-LABEL: variable_srl2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %k = lshr <8 x i64> %x, %y
@@ -87,7 +87,7 @@ define <8 x i64> @variable_srl2(<8 x i64
 
 define <16 x i32> @variable_sra1(<16 x i32> %x, <16 x i32> %y) {
 ; CHECK-LABEL: variable_sra1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsravd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %k = ashr <16 x i32> %x, %y
@@ -96,7 +96,7 @@ define <16 x i32> @variable_sra1(<16 x i
 
 define <8 x i64> @variable_sra2(<8 x i64> %x, <8 x i64> %y) {
 ; CHECK-LABEL: variable_sra2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsravq %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %k = ashr <8 x i64> %x, %y
@@ -105,7 +105,7 @@ define <8 x i64> @variable_sra2(<8 x i64
 
 define <4 x i64> @variable_sra3(<4 x i64> %x, <4 x i64> %y) {
 ; KNL-LABEL: variable_sra3:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; KNL-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; KNL-NEXT:    vpsravq %zmm1, %zmm0, %zmm0
@@ -113,7 +113,7 @@ define <4 x i64> @variable_sra3(<4 x i64
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: variable_sra3:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsravq %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
   %k = ashr <4 x i64> %x, %y
@@ -122,7 +122,7 @@ define <4 x i64> @variable_sra3(<4 x i64
 
 define <8 x i16> @variable_sra4(<8 x i16> %x, <8 x i16> %y) {
 ; KNL-LABEL: variable_sra4:
-; KNL:       # BB#0:
+; KNL:       # %bb.0:
 ; KNL-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; KNL-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; KNL-NEXT:    vpsravd %ymm1, %ymm0, %ymm0
@@ -131,7 +131,7 @@ define <8 x i16> @variable_sra4(<8 x i16
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: variable_sra4:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsravw %xmm1, %xmm0, %xmm0
 ; SKX-NEXT:    retq
   %k = ashr <8 x i16> %x, %y
@@ -140,7 +140,7 @@ define <8 x i16> @variable_sra4(<8 x i16
 
 define <16 x i32> @variable_sra01_load(<16 x i32> %x, <16 x i32>* %y) {
 ; CHECK-LABEL: variable_sra01_load:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsravd (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %y1 = load <16 x i32>, <16 x i32>* %y
@@ -150,7 +150,7 @@ define <16 x i32> @variable_sra01_load(<
 
 define <16 x i32> @variable_shl1_load(<16 x i32> %x, <16 x i32>* %y) {
 ; CHECK-LABEL: variable_shl1_load:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsllvd (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %y1 = load <16 x i32>, <16 x i32>* %y
@@ -160,7 +160,7 @@ define <16 x i32> @variable_shl1_load(<1
 
 define <16 x i32> @variable_srl0_load(<16 x i32> %x, <16 x i32>* %y) {
 ; CHECK-LABEL: variable_srl0_load:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlvd (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %y1 = load <16 x i32>, <16 x i32>* %y
@@ -170,7 +170,7 @@ define <16 x i32> @variable_srl0_load(<1
 
 define <8 x i64> @variable_srl3_load(<8 x i64> %x, <8 x i64>* %y) {
 ; CHECK-LABEL: variable_srl3_load:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlvq (%rdi), %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %y1 = load <8 x i64>, <8 x i64>* %y




More information about the llvm-commits mailing list