[llvm] r319665 - [CodeGen] Unify MBB reference format in both MIR and debug output

Francis Visoiu Mistrih via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 4 09:18:56 PST 2017


Modified: llvm/trunk/test/CodeGen/X86/fold-rmw-ops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-rmw-ops.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-rmw-ops.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-rmw-ops.ll Mon Dec  4 09:18:51 2017
@@ -13,13 +13,13 @@ declare void @b()
 
 define void @add64_imm32_br() nounwind {
 ; CHECK-LABEL: add64_imm32_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq $16777214, {{.*}}(%rip) # encoding: [0x48,0x81,0x05,A,A,A,A,0xfe,0xff,0xff,0x00]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0xFFFFFE
 ; CHECK-NEXT:    js .LBB0_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB0_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -46,13 +46,13 @@ b:
 
 define void @add64_sext_imm32_br() nounwind {
 ; CHECK-LABEL: add64_sext_imm32_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x05,A,A,A,A,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    js .LBB1_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB1_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -79,13 +79,13 @@ b:
 
 define void @add64_imm32_via_sub_br() nounwind {
 ; CHECK-LABEL: add64_imm32_via_sub_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x2d,A,A,A,A,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    js .LBB2_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB2_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -113,14 +113,14 @@ b:
 
 define void @add64_no_imm32_via_sub_due_to_cf_br() nounwind {
 ; CHECK-LABEL: add64_no_imm32_via_sub_due_to_cf_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $2147483648, %eax # encoding: [0xb8,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    addq %rax, {{.*}}(%rip) # encoding: [0x48,0x01,0x05,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    jae .LBB3_2 # encoding: [0x73,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB3_2-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#1: # %a
+; CHECK-NEXT:  # %bb.1: # %a
 ; CHECK-NEXT:    jmp a # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: a-1, kind: FK_PCRel_1
@@ -149,14 +149,14 @@ b:
 
 define void @add64_too_large_imm32_br() nounwind {
 ; CHECK-LABEL: add64_too_large_imm32_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $2147483649, %eax # encoding: [0xb8,0x01,0x00,0x00,0x80]
 ; CHECK-NEXT:    # imm = 0x80000001
 ; CHECK-NEXT:    addq %rax, {{.*}}(%rip) # encoding: [0x48,0x01,0x05,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB4_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB4_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -184,12 +184,12 @@ b:
 
 define void @add64_imm8_via_sub_br() nounwind {
 ; CHECK-LABEL: add64_imm8_via_sub_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subq $-128, {{.*}}(%rip) # encoding: [0x48,0x83,0x2d,A,A,A,A,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB5_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB5_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -217,12 +217,12 @@ b:
 
 define void @add64_imm8_br() nounwind {
 ; CHECK-LABEL: add64_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq $42, {{.*}}(%rip) # encoding: [0x48,0x83,0x05,A,A,A,A,0x2a]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB6_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB6_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -248,12 +248,12 @@ b:
 
 define void @add64_imm8_neg_br() nounwind {
 ; CHECK-LABEL: add64_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq $-42, {{.*}}(%rip) # encoding: [0x48,0x83,0x05,A,A,A,A,0xd6]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB7_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB7_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -279,13 +279,13 @@ b:
 
 define void @add32_imm_br() nounwind {
 ; CHECK-LABEL: add32_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addl $-2147483648, {{.*}}(%rip) # encoding: [0x81,0x05,A,A,A,A,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    js .LBB8_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB8_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -312,12 +312,12 @@ b:
 
 define void @add32_imm8_br() nounwind {
 ; CHECK-LABEL: add32_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addl $42, {{.*}}(%rip) # encoding: [0x83,0x05,A,A,A,A,0x2a]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB9_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB9_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -343,12 +343,12 @@ b:
 
 define void @add32_imm8_neg_br() nounwind {
 ; CHECK-LABEL: add32_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addl $-42, {{.*}}(%rip) # encoding: [0x83,0x05,A,A,A,A,0xd6]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB10_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB10_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -374,13 +374,13 @@ b:
 
 define void @add16_imm_br() nounwind {
 ; CHECK-LABEL: add16_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addw $-32768, {{.*}}(%rip) # encoding: [0x66,0x81,0x05,A,A,A,A,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-6, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x8000
 ; CHECK-NEXT:    js .LBB11_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB11_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -407,12 +407,12 @@ b:
 
 define void @add16_imm8_br() nounwind {
 ; CHECK-LABEL: add16_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addw $42, {{.*}}(%rip) # encoding: [0x66,0x83,0x05,A,A,A,A,0x2a]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB12_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB12_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -438,12 +438,12 @@ b:
 
 define void @add16_imm8_neg_br() nounwind {
 ; CHECK-LABEL: add16_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addw $-42, {{.*}}(%rip) # encoding: [0x66,0x83,0x05,A,A,A,A,0xd6]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB13_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB13_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -469,12 +469,12 @@ b:
 
 define void @add8_imm_br() nounwind {
 ; CHECK-LABEL: add8_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addb $-2, {{.*}}(%rip) # encoding: [0x80,0x05,A,A,A,A,0xfe]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g8-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB14_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB14_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -500,12 +500,12 @@ b:
 
 define void @add64_reg_br(i64 %arg) nounwind {
 ; CHECK-LABEL: add64_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq %rdi, {{.*}}(%rip) # encoding: [0x48,0x01,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB15_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB15_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -531,12 +531,12 @@ b:
 
 define void @add32_reg_br(i32 %arg) nounwind {
 ; CHECK-LABEL: add32_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addl %edi, {{.*}}(%rip) # encoding: [0x01,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB16_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB16_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -562,12 +562,12 @@ b:
 
 define void @add16_reg_br(i16 %arg) nounwind {
 ; CHECK-LABEL: add16_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addw %di, {{.*}}(%rip) # encoding: [0x66,0x01,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB17_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB17_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -593,12 +593,12 @@ b:
 
 define void @add8_reg_br(i8 %arg) nounwind {
 ; CHECK-LABEL: add8_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addb %dil, {{.*}}(%rip) # encoding: [0x40,0x00,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g8-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB18_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB18_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -624,13 +624,13 @@ b:
 
 define void @sub64_imm32_br() nounwind {
 ; CHECK-LABEL: sub64_imm32_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x2d,A,A,A,A,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    js .LBB19_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB19_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -658,14 +658,14 @@ b:
 
 define void @sub64_too_large_imm32_br() nounwind {
 ; CHECK-LABEL: sub64_too_large_imm32_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movabsq $-4294967295, %rax # encoding: [0x48,0xb8,0x01,0x00,0x00,0x00,0xff,0xff,0xff,0xff]
 ; CHECK-NEXT:    # imm = 0xFFFFFFFF00000001
 ; CHECK-NEXT:    addq %rax, {{.*}}(%rip) # encoding: [0x48,0x01,0x05,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB20_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB20_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -693,12 +693,12 @@ b:
 
 define void @sub64_imm8_br() nounwind {
 ; CHECK-LABEL: sub64_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subq $-128, {{.*}}(%rip) # encoding: [0x48,0x83,0x2d,A,A,A,A,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB21_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB21_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -726,13 +726,13 @@ b:
 
 define void @sub32_imm_br() nounwind {
 ; CHECK-LABEL: sub32_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addl $-2147483648, {{.*}}(%rip) # encoding: [0x81,0x05,A,A,A,A,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    js .LBB22_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB22_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -760,12 +760,12 @@ b:
 
 define void @sub32_imm8_br() nounwind {
 ; CHECK-LABEL: sub32_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subl $-128, {{.*}}(%rip) # encoding: [0x83,0x2d,A,A,A,A,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB23_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB23_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -793,13 +793,13 @@ b:
 
 define void @sub16_imm_br() nounwind {
 ; CHECK-LABEL: sub16_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addw $-32768, {{.*}}(%rip) # encoding: [0x66,0x81,0x05,A,A,A,A,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-6, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x8000
 ; CHECK-NEXT:    js .LBB24_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB24_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -827,12 +827,12 @@ b:
 
 define void @sub16_imm8_br() nounwind {
 ; CHECK-LABEL: sub16_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subw $-128, {{.*}}(%rip) # encoding: [0x66,0x83,0x2d,A,A,A,A,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB25_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB25_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -860,12 +860,12 @@ b:
 
 define void @sub8_imm_br() nounwind {
 ; CHECK-LABEL: sub8_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addb $-128, {{.*}}(%rip) # encoding: [0x80,0x05,A,A,A,A,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g8-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB26_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB26_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -893,12 +893,12 @@ b:
 
 define void @sub64_reg_br(i64 %arg) nounwind {
 ; CHECK-LABEL: sub64_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subq %rdi, {{.*}}(%rip) # encoding: [0x48,0x29,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB27_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB27_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -924,12 +924,12 @@ b:
 
 define void @sub32_reg_br(i32 %arg) nounwind {
 ; CHECK-LABEL: sub32_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subl %edi, {{.*}}(%rip) # encoding: [0x29,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB28_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB28_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -955,12 +955,12 @@ b:
 
 define void @sub16_reg_br(i16 %arg) nounwind {
 ; CHECK-LABEL: sub16_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subw %di, {{.*}}(%rip) # encoding: [0x66,0x29,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB29_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB29_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -986,12 +986,12 @@ b:
 
 define void @sub8_reg_br(i8 %arg) nounwind {
 ; CHECK-LABEL: sub8_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subb %dil, {{.*}}(%rip) # encoding: [0x40,0x28,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g8-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    js .LBB30_1 # encoding: [0x78,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB30_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1017,13 +1017,13 @@ b:
 
 define void @and64_imm32_br() nounwind {
 ; CHECK-LABEL: and64_imm32_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andq $16777215, {{.*}}(%rip) # encoding: [0x48,0x81,0x25,A,A,A,A,0xff,0xff,0xff,0x00]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0xFFFFFF
 ; CHECK-NEXT:    je .LBB31_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB31_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1050,13 +1050,13 @@ b:
 
 define void @and64_sext_imm32_br() nounwind {
 ; CHECK-LABEL: and64_sext_imm32_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x25,A,A,A,A,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    je .LBB32_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB32_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1083,12 +1083,12 @@ b:
 
 define void @and64_imm8_br() nounwind {
 ; CHECK-LABEL: and64_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andq $15, {{.*}}(%rip) # encoding: [0x48,0x83,0x25,A,A,A,A,0x0f]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB33_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB33_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1114,12 +1114,12 @@ b:
 
 define void @and64_imm8_neg_br() nounwind {
 ; CHECK-LABEL: and64_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andq $-4, {{.*}}(%rip) # encoding: [0x48,0x83,0x25,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB34_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB34_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1145,7 +1145,7 @@ b:
 
 define void @and32_imm_br() nounwind {
 ; CHECK-LABEL: and32_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $-2147483648, %eax # encoding: [0xb8,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    andl {{.*}}(%rip), %eax # encoding: [0x23,0x05,A,A,A,A]
@@ -1154,7 +1154,7 @@ define void @and32_imm_br() nounwind {
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    jne .LBB35_2 # encoding: [0x75,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB35_2-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#1: # %a
+; CHECK-NEXT:  # %bb.1: # %a
 ; CHECK-NEXT:    jmp a # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: a-1, kind: FK_PCRel_1
@@ -1181,12 +1181,12 @@ b:
 
 define void @and32_imm8_br() nounwind {
 ; CHECK-LABEL: and32_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andl $15, {{.*}}(%rip) # encoding: [0x83,0x25,A,A,A,A,0x0f]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB36_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB36_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1212,12 +1212,12 @@ b:
 
 define void @and32_imm8_neg_br() nounwind {
 ; CHECK-LABEL: and32_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andl $-4, {{.*}}(%rip) # encoding: [0x83,0x25,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB37_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB37_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1243,7 +1243,7 @@ b:
 
 define void @and16_imm_br() nounwind {
 ; CHECK-LABEL: and16_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movzwl {{.*}}(%rip), %eax # encoding: [0x0f,0xb7,0x05,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    andl $32768, %eax # encoding: [0x25,0x00,0x80,0x00,0x00]
@@ -1253,7 +1253,7 @@ define void @and16_imm_br() nounwind {
 ; CHECK-NEXT:    testw %ax, %ax # encoding: [0x66,0x85,0xc0]
 ; CHECK-NEXT:    jne .LBB38_2 # encoding: [0x75,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB38_2-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#1: # %a
+; CHECK-NEXT:  # %bb.1: # %a
 ; CHECK-NEXT:    jmp a # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: a-1, kind: FK_PCRel_1
@@ -1279,12 +1279,12 @@ b:
 
 define void @and16_imm8_br() nounwind {
 ; CHECK-LABEL: and16_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andw $15, {{.*}}(%rip) # encoding: [0x66,0x83,0x25,A,A,A,A,0x0f]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB39_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB39_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1310,12 +1310,12 @@ b:
 
 define void @and16_imm8_neg_br() nounwind {
 ; CHECK-LABEL: and16_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andw $-4, {{.*}}(%rip) # encoding: [0x66,0x83,0x25,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB40_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB40_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1341,12 +1341,12 @@ b:
 
 define void @and8_imm_br() nounwind {
 ; CHECK-LABEL: and8_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andb $-4, {{.*}}(%rip) # encoding: [0x80,0x25,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g8-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB41_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB41_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1372,12 +1372,12 @@ b:
 
 define void @and64_reg_br(i64 %arg) nounwind {
 ; CHECK-LABEL: and64_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andq %rdi, {{.*}}(%rip) # encoding: [0x48,0x21,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB42_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB42_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1403,12 +1403,12 @@ b:
 
 define void @and32_reg_br(i32 %arg) nounwind {
 ; CHECK-LABEL: and32_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andl %edi, {{.*}}(%rip) # encoding: [0x21,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB43_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB43_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1434,12 +1434,12 @@ b:
 
 define void @and16_reg_br(i16 %arg) nounwind {
 ; CHECK-LABEL: and16_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andw %di, {{.*}}(%rip) # encoding: [0x66,0x21,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB44_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB44_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1465,12 +1465,12 @@ b:
 
 define void @and8_reg_br(i8 %arg) nounwind {
 ; CHECK-LABEL: and8_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andb %dil, {{.*}}(%rip) # encoding: [0x40,0x20,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g8-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB45_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB45_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1496,13 +1496,13 @@ b:
 
 define void @or64_imm32_br() nounwind {
 ; CHECK-LABEL: or64_imm32_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orq $16777215, {{.*}}(%rip) # encoding: [0x48,0x81,0x0d,A,A,A,A,0xff,0xff,0xff,0x00]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0xFFFFFF
 ; CHECK-NEXT:    je .LBB46_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB46_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1529,13 +1529,13 @@ b:
 
 define void @or64_sext_imm32_br() nounwind {
 ; CHECK-LABEL: or64_sext_imm32_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x0d,A,A,A,A,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    je .LBB47_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB47_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1562,12 +1562,12 @@ b:
 
 define void @or64_imm8_br() nounwind {
 ; CHECK-LABEL: or64_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orq $15, {{.*}}(%rip) # encoding: [0x48,0x83,0x0d,A,A,A,A,0x0f]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB48_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB48_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1593,12 +1593,12 @@ b:
 
 define void @or64_imm8_neg_br() nounwind {
 ; CHECK-LABEL: or64_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orq $-4, {{.*}}(%rip) # encoding: [0x48,0x83,0x0d,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB49_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB49_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1624,13 +1624,13 @@ b:
 
 define void @or32_imm_br() nounwind {
 ; CHECK-LABEL: or32_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orl $-2147483648, {{.*}}(%rip) # encoding: [0x81,0x0d,A,A,A,A,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    je .LBB50_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB50_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1657,12 +1657,12 @@ b:
 
 define void @or32_imm8_br() nounwind {
 ; CHECK-LABEL: or32_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orl $15, {{.*}}(%rip) # encoding: [0x83,0x0d,A,A,A,A,0x0f]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB51_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB51_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1688,12 +1688,12 @@ b:
 
 define void @or32_imm8_neg_br() nounwind {
 ; CHECK-LABEL: or32_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orl $-4, {{.*}}(%rip) # encoding: [0x83,0x0d,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB52_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB52_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1719,13 +1719,13 @@ b:
 
 define void @or16_imm_br() nounwind {
 ; CHECK-LABEL: or16_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orw $-32768, {{.*}}(%rip) # encoding: [0x66,0x81,0x0d,A,A,A,A,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-6, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x8000
 ; CHECK-NEXT:    je .LBB53_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB53_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1751,12 +1751,12 @@ b:
 
 define void @or16_imm8_br() nounwind {
 ; CHECK-LABEL: or16_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orw $15, {{.*}}(%rip) # encoding: [0x66,0x83,0x0d,A,A,A,A,0x0f]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB54_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB54_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1782,12 +1782,12 @@ b:
 
 define void @or16_imm8_neg_br() nounwind {
 ; CHECK-LABEL: or16_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orw $-4, {{.*}}(%rip) # encoding: [0x66,0x83,0x0d,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB55_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB55_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1813,12 +1813,12 @@ b:
 
 define void @or8_imm_br() nounwind {
 ; CHECK-LABEL: or8_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orb $-4, {{.*}}(%rip) # encoding: [0x80,0x0d,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g8-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB56_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB56_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1844,12 +1844,12 @@ b:
 
 define void @or64_reg_br(i64 %arg) nounwind {
 ; CHECK-LABEL: or64_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orq %rdi, {{.*}}(%rip) # encoding: [0x48,0x09,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB57_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB57_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1875,12 +1875,12 @@ b:
 
 define void @or32_reg_br(i32 %arg) nounwind {
 ; CHECK-LABEL: or32_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orl %edi, {{.*}}(%rip) # encoding: [0x09,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB58_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB58_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1906,12 +1906,12 @@ b:
 
 define void @or16_reg_br(i16 %arg) nounwind {
 ; CHECK-LABEL: or16_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orw %di, {{.*}}(%rip) # encoding: [0x66,0x09,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB59_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB59_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1937,12 +1937,12 @@ b:
 
 define void @or8_reg_br(i8 %arg) nounwind {
 ; CHECK-LABEL: or8_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    orb %dil, {{.*}}(%rip) # encoding: [0x40,0x08,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g8-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB60_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB60_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -1968,13 +1968,13 @@ b:
 
 define void @xor64_imm32_br() nounwind {
 ; CHECK-LABEL: xor64_imm32_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorq $16777215, {{.*}}(%rip) # encoding: [0x48,0x81,0x35,A,A,A,A,0xff,0xff,0xff,0x00]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0xFFFFFF
 ; CHECK-NEXT:    je .LBB61_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB61_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2001,13 +2001,13 @@ b:
 
 define void @xor64_sext_imm32_br() nounwind {
 ; CHECK-LABEL: xor64_sext_imm32_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorq $-2147483648, {{.*}}(%rip) # encoding: [0x48,0x81,0x35,A,A,A,A,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    je .LBB62_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB62_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2034,12 +2034,12 @@ b:
 
 define void @xor64_imm8_br() nounwind {
 ; CHECK-LABEL: xor64_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorq $15, {{.*}}(%rip) # encoding: [0x48,0x83,0x35,A,A,A,A,0x0f]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB63_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB63_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2065,12 +2065,12 @@ b:
 
 define void @xor64_imm8_neg_br() nounwind {
 ; CHECK-LABEL: xor64_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorq $-4, {{.*}}(%rip) # encoding: [0x48,0x83,0x35,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB64_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB64_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2096,13 +2096,13 @@ b:
 
 define void @xor32_imm_br() nounwind {
 ; CHECK-LABEL: xor32_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl $-2147483648, {{.*}}(%rip) # encoding: [0x81,0x35,A,A,A,A,0x00,0x00,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-8, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x80000000
 ; CHECK-NEXT:    je .LBB65_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB65_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2129,12 +2129,12 @@ b:
 
 define void @xor32_imm8_br() nounwind {
 ; CHECK-LABEL: xor32_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl $15, {{.*}}(%rip) # encoding: [0x83,0x35,A,A,A,A,0x0f]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB66_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB66_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2160,12 +2160,12 @@ b:
 
 define void @xor32_imm8_neg_br() nounwind {
 ; CHECK-LABEL: xor32_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl $-4, {{.*}}(%rip) # encoding: [0x83,0x35,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB67_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB67_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2191,13 +2191,13 @@ b:
 
 define void @xor16_imm_br() nounwind {
 ; CHECK-LABEL: xor16_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorw $-32768, {{.*}}(%rip) # encoding: [0x66,0x81,0x35,A,A,A,A,0x00,0x80]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-6, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    # imm = 0x8000
 ; CHECK-NEXT:    je .LBB68_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB68_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2223,12 +2223,12 @@ b:
 
 define void @xor16_imm8_br() nounwind {
 ; CHECK-LABEL: xor16_imm8_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorw $15, {{.*}}(%rip) # encoding: [0x66,0x83,0x35,A,A,A,A,0x0f]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB69_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB69_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2254,12 +2254,12 @@ b:
 
 define void @xor16_imm8_neg_br() nounwind {
 ; CHECK-LABEL: xor16_imm8_neg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorw $-4, {{.*}}(%rip) # encoding: [0x66,0x83,0x35,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB70_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB70_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2285,12 +2285,12 @@ b:
 
 define void @xor8_imm_br() nounwind {
 ; CHECK-LABEL: xor8_imm_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorb $-4, {{.*}}(%rip) # encoding: [0x80,0x35,A,A,A,A,0xfc]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g8-5, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB71_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB71_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2316,12 +2316,12 @@ b:
 
 define void @xor64_reg_br(i64 %arg) nounwind {
 ; CHECK-LABEL: xor64_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorq %rdi, {{.*}}(%rip) # encoding: [0x48,0x31,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g64-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB72_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB72_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2347,12 +2347,12 @@ b:
 
 define void @xor32_reg_br(i32 %arg) nounwind {
 ; CHECK-LABEL: xor32_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %edi, {{.*}}(%rip) # encoding: [0x31,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 2, value: g32-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB73_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB73_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2378,12 +2378,12 @@ b:
 
 define void @xor16_reg_br(i16 %arg) nounwind {
 ; CHECK-LABEL: xor16_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorw %di, {{.*}}(%rip) # encoding: [0x66,0x31,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g16-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB74_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB74_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1
@@ -2409,12 +2409,12 @@ b:
 
 define void @xor8_reg_br(i8 %arg) nounwind {
 ; CHECK-LABEL: xor8_reg_br:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorb %dil, {{.*}}(%rip) # encoding: [0x40,0x30,0x3d,A,A,A,A]
 ; CHECK-NEXT:    # fixup A - offset: 3, value: g8-4, kind: reloc_riprel_4byte
 ; CHECK-NEXT:    je .LBB75_1 # encoding: [0x74,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: .LBB75_1-1, kind: FK_PCRel_1
-; CHECK-NEXT:  # BB#2: # %b
+; CHECK-NEXT:  # %bb.2: # %b
 ; CHECK-NEXT:    jmp b # TAILCALL
 ; CHECK-NEXT:    # encoding: [0xeb,A]
 ; CHECK-NEXT:    # fixup A - offset: 1, value: b-1, kind: FK_PCRel_1

Modified: llvm/trunk/test/CodeGen/X86/fold-vector-sext-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-vector-sext-crash.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-vector-sext-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-vector-sext-crash.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define <4 x i64> @foo(<4 x i64> %A) {
 ; CHECK-LABEL: foo:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vmovdqa %xmm1, %xmm1
 ; CHECK-NEXT:    vandps %ymm0, %ymm1, %ymm0

Modified: llvm/trunk/test/CodeGen/X86/fold-vector-sext-crash2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-vector-sext-crash2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-vector-sext-crash2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-vector-sext-crash2.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define <2 x i256> @test_sext1() {
 ; X32-LABEL: test_sext1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl $-1, 60(%eax)
 ; X32-NEXT:    movl $-1, 56(%eax)
@@ -27,7 +27,7 @@ define <2 x i256> @test_sext1() {
 ; X32-NEXT:    retl $4
 ;
 ; X64-LABEL: test_sext1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    movaps %xmm0, 16(%rdi)
 ; X64-NEXT:    movaps %xmm0, (%rdi)
@@ -44,7 +44,7 @@ define <2 x i256> @test_sext1() {
 
 define <2 x i256> @test_sext2() {
 ; X32-LABEL: test_sext2:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl $-1, 60(%eax)
 ; X32-NEXT:    movl $-1, 56(%eax)
@@ -65,7 +65,7 @@ define <2 x i256> @test_sext2() {
 ; X32-NEXT:    retl $4
 ;
 ; X64-LABEL: test_sext2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    movaps %xmm0, 16(%rdi)
 ; X64-NEXT:    movaps %xmm0, (%rdi)
@@ -82,7 +82,7 @@ define <2 x i256> @test_sext2() {
 
 define <2 x i256> @test_zext1() {
 ; X32-LABEL: test_zext1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl $0, 60(%eax)
 ; X32-NEXT:    movl $0, 56(%eax)
@@ -103,7 +103,7 @@ define <2 x i256> @test_zext1() {
 ; X32-NEXT:    retl $4
 ;
 ; X64-LABEL: test_zext1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    movaps %xmm0, 48(%rdi)
 ; X64-NEXT:    movaps %xmm0, 16(%rdi)
@@ -119,7 +119,7 @@ define <2 x i256> @test_zext1() {
 
 define <2 x i256> @test_zext2() {
 ; X32-LABEL: test_zext2:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl $0, 60(%eax)
 ; X32-NEXT:    movl $0, 56(%eax)
@@ -140,7 +140,7 @@ define <2 x i256> @test_zext2() {
 ; X32-NEXT:    retl $4
 ;
 ; X64-LABEL: test_zext2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    movaps %xmm0, 48(%rdi)
 ; X64-NEXT:    movaps %xmm0, 16(%rdi)

Modified: llvm/trunk/test/CodeGen/X86/fold-vector-sext-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-vector-sext-zext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-vector-sext-zext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-vector-sext-zext.ll Mon Dec  4 09:18:51 2017
@@ -10,12 +10,12 @@
 
 define <4 x i16> @test_sext_4i8_4i16() {
 ; X32-LABEL: test_sext_4i8_4i16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,4294967295,2,4294967293]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [0,4294967295,2,4294967293]
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -28,12 +28,12 @@ define <4 x i16> @test_sext_4i8_4i16() {
 
 define <4 x i16> @test_sext_4i8_4i16_undef() {
 ; X32-LABEL: test_sext_4i8_4i16_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <u,4294967295,u,4294967293>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i16_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <u,4294967295,u,4294967293>
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 undef, i32 0
@@ -46,12 +46,12 @@ define <4 x i16> @test_sext_4i8_4i16_und
 
 define <4 x i32> @test_sext_4i8_4i32() {
 ; X32-LABEL: test_sext_4i8_4i32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,4294967295,2,4294967293]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [0,4294967295,2,4294967293]
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -64,12 +64,12 @@ define <4 x i32> @test_sext_4i8_4i32() {
 
 define <4 x i32> @test_sext_4i8_4i32_undef() {
 ; X32-LABEL: test_sext_4i8_4i32_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <u,4294967295,u,4294967293>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i32_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <u,4294967295,u,4294967293>
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 undef, i32 0
@@ -82,12 +82,12 @@ define <4 x i32> @test_sext_4i8_4i32_und
 
 define <4 x i64> @test_sext_4i8_4i64() {
 ; X32-LABEL: test_sext_4i8_4i64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,4294967295,4294967295,2,0,4294967293,4294967295]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [0,18446744073709551615,2,18446744073709551613]
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -100,12 +100,12 @@ define <4 x i64> @test_sext_4i8_4i64() {
 
 define <4 x i64> @test_sext_4i8_4i64_undef() {
 ; X32-LABEL: test_sext_4i8_4i64_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = <u,u,4294967295,4294967295,u,u,4294967293,4294967295>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_4i8_4i64_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = <u,18446744073709551615,u,18446744073709551613>
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 undef, i32 0
@@ -118,12 +118,12 @@ define <4 x i64> @test_sext_4i8_4i64_und
 
 define <8 x i16> @test_sext_8i8_8i16() {
 ; X32-LABEL: test_sext_8i8_8i16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <0,65535,2,65533,u,u,u,u>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <0,65535,2,65533,u,u,u,u>
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 0, i32 0
@@ -140,12 +140,12 @@ define <8 x i16> @test_sext_8i8_8i16() {
 
 define <8 x i32> @test_sext_8i8_8i32() {
 ; X32-LABEL: test_sext_8i8_8i32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = <0,4294967295,2,4294967293,u,u,u,u>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = <0,4294967295,2,4294967293,u,u,u,u>
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 0, i32 0
@@ -162,12 +162,12 @@ define <8 x i32> @test_sext_8i8_8i32() {
 
 define <8 x i16> @test_sext_8i8_8i16_undef() {
 ; X32-LABEL: test_sext_8i8_8i16_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <u,65535,u,65533,u,u,u,u>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i16_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <u,65535,u,65533,u,u,u,u>
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -184,12 +184,12 @@ define <8 x i16> @test_sext_8i8_8i16_und
 
 define <8 x i32> @test_sext_8i8_8i32_undef() {
 ; X32-LABEL: test_sext_8i8_8i32_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = <0,u,2,u,u,u,u,u>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_sext_8i8_8i32_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = <0,u,2,u,u,u,u,u>
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 0, i32 0
@@ -206,12 +206,12 @@ define <8 x i32> @test_sext_8i8_8i32_und
 
 define <4 x i16> @test_zext_4i8_4i16() {
 ; X32-LABEL: test_zext_4i8_4i16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,2,253]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,2,253]
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -224,12 +224,12 @@ define <4 x i16> @test_zext_4i8_4i16() {
 
 define <4 x i32> @test_zext_4i8_4i32() {
 ; X32-LABEL: test_zext_4i8_4i32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,2,253]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,2,253]
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -242,12 +242,12 @@ define <4 x i32> @test_zext_4i8_4i32() {
 
 define <4 x i64> @test_zext_4i8_4i64() {
 ; X32-LABEL: test_zext_4i8_4i64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,255,0,2,0,253,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [0,255,2,253]
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -260,12 +260,12 @@ define <4 x i64> @test_zext_4i8_4i64() {
 
 define <4 x i16> @test_zext_4i8_4i16_undef() {
 ; X32-LABEL: test_zext_4i8_4i16_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <u,255,u,253>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i16_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <u,255,u,253>
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 undef, i32 0
@@ -278,12 +278,12 @@ define <4 x i16> @test_zext_4i8_4i16_und
 
 define <4 x i32> @test_zext_4i8_4i32_undef() {
 ; X32-LABEL: test_zext_4i8_4i32_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <0,u,2,u>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i32_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <0,u,2,u>
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 0, i32 0
@@ -296,12 +296,12 @@ define <4 x i32> @test_zext_4i8_4i32_und
 
 define <4 x i64> @test_zext_4i8_4i64_undef() {
 ; X32-LABEL: test_zext_4i8_4i64_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = <u,u,255,0,2,0,u,u>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_4i8_4i64_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = <u,255,2,u>
 ; X64-NEXT:    retq
   %1 = insertelement <4 x i8> undef, i8 undef, i32 0
@@ -314,12 +314,12 @@ define <4 x i64> @test_zext_4i8_4i64_und
 
 define <8 x i16> @test_zext_8i8_8i16() {
 ; X32-LABEL: test_zext_8i8_8i16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,2,253,4,251,6,249]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_8i8_8i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,2,253,4,251,6,249]
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 0, i32 0
@@ -336,12 +336,12 @@ define <8 x i16> @test_zext_8i8_8i16() {
 
 define <8 x i32> @test_zext_8i8_8i32() {
 ; X32-LABEL: test_zext_8i8_8i32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [0,255,2,253,4,251,6,249]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_8i8_8i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [0,255,2,253,4,251,6,249]
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 0, i32 0
@@ -358,12 +358,12 @@ define <8 x i32> @test_zext_8i8_8i32() {
 
 define <8 x i16> @test_zext_8i8_8i16_undef() {
 ; X32-LABEL: test_zext_8i8_8i16_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = <u,255,u,253,u,251,u,249>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_8i8_8i16_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = <u,255,u,253,u,251,u,249>
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -380,12 +380,12 @@ define <8 x i16> @test_zext_8i8_8i16_und
 
 define <8 x i32> @test_zext_8i8_8i32_undef() {
 ; X32-LABEL: test_zext_8i8_8i32_undef:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = <0,u,2,253,4,u,6,u>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_zext_8i8_8i32_undef:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = <0,u,2,253,4,u,6,u>
 ; X64-NEXT:    retq
   %1 = insertelement <8 x i8> undef, i8 0, i32 0

Modified: llvm/trunk/test/CodeGen/X86/fp-fast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-fast.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-fast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-fast.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define float @test1(float %a) {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t1 = fadd float %a, %a
@@ -13,7 +13,7 @@ define float @test1(float %a) {
 
 define float @test2(float %a) {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t1 = fmul float 4.0, %a
@@ -24,7 +24,7 @@ define float @test2(float %a) {
 
 define float @test3(float %a) {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t1 = fmul float %a, 4.0
@@ -35,7 +35,7 @@ define float @test3(float %a) {
 
 define float @test4(float %a) {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t1 = fadd float %a, %a
@@ -46,7 +46,7 @@ define float @test4(float %a) {
 
 define float @test5(float %a) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t1 = fadd float %a, %a
@@ -57,7 +57,7 @@ define float @test5(float %a) {
 
 define float @test6(float %a) {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t1 = fmul float 2.0, %a
@@ -68,7 +68,7 @@ define float @test6(float %a) {
 
 define float @test7(float %a) {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t1 = fmul float %a, 2.0
@@ -79,7 +79,7 @@ define float @test7(float %a) {
 
 define float @test8(float %a) {
 ; CHECK-LABEL: test8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %t1 = fmul float %a, 0.0
   %t2 = fadd float %a, %t1
@@ -88,7 +88,7 @@ define float @test8(float %a) {
 
 define float @test9(float %a) {
 ; CHECK-LABEL: test9:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %t1 = fmul float 0.0, %a
   %t2 = fadd float %t1, %a
@@ -97,7 +97,7 @@ define float @test9(float %a) {
 
 define float @test10(float %a) {
 ; CHECK-LABEL: test10:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t1 = fsub float -0.0, %a
@@ -107,7 +107,7 @@ define float @test10(float %a) {
 
 define float @test11(float %a) {
 ; CHECK-LABEL: test11:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %t1 = fsub float -0.0, %a

Modified: llvm/trunk/test/CodeGen/X86/fp-load-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-load-trunc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-load-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-load-trunc.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <1 x float> @test1(<1 x double>* %p) nounwind {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushl %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
@@ -15,7 +15,7 @@ define <1 x float> @test1(<1 x double>*
 ; CHECK-NEXT:    retl
 ;
 ; AVX-LABEL: test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    pushl %eax
 ; AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -31,13 +31,13 @@ define <1 x float> @test1(<1 x double>*
 
 define <2 x float> @test2(<2 x double>* %p) nounwind {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    cvtpd2ps (%eax), %xmm0
 ; CHECK-NEXT:    retl
 ;
 ; AVX-LABEL: test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-NEXT:    vcvtpd2psx (%eax), %xmm0
 ; AVX-NEXT:    retl
@@ -48,7 +48,7 @@ define <2 x float> @test2(<2 x double>*
 
 define <4 x float> @test3(<4 x double>* %p) nounwind {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    cvtpd2ps 16(%eax), %xmm1
 ; CHECK-NEXT:    cvtpd2ps (%eax), %xmm0
@@ -56,7 +56,7 @@ define <4 x float> @test3(<4 x double>*
 ; CHECK-NEXT:    retl
 ;
 ; AVX-LABEL: test3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-NEXT:    vcvtpd2psy (%eax), %xmm0
 ; AVX-NEXT:    retl
@@ -67,7 +67,7 @@ define <4 x float> @test3(<4 x double>*
 
 define <8 x float> @test4(<8 x double>* %p) nounwind {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    cvtpd2ps 16(%eax), %xmm1
 ; CHECK-NEXT:    cvtpd2ps (%eax), %xmm0
@@ -78,7 +78,7 @@ define <8 x float> @test4(<8 x double>*
 ; CHECK-NEXT:    retl
 ;
 ; AVX-LABEL: test4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-NEXT:    vcvtpd2psy (%eax), %xmm0
 ; AVX-NEXT:    vcvtpd2psy 32(%eax), %xmm1

Modified: llvm/trunk/test/CodeGen/X86/fp-logic-replace.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-logic-replace.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-logic-replace.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-logic-replace.ll Mon Dec  4 09:18:51 2017
@@ -11,17 +11,17 @@
 
 define double @FsANDPSrr(double %x, double %y) {
 ; SSE-LABEL: FsANDPSrr:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andps %xmm1, %xmm0 # encoding: [0x0f,0x54,0xc1]
 ; SSE-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX-LABEL: FsANDPSrr:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x54,0xc1]
 ; AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512DQ-LABEL: FsANDPSrr:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vandps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x54,0xc1]
 ; AVX512DQ-NEXT:    retq # encoding: [0xc3]
   %bc1 = bitcast double %x to i64
@@ -33,18 +33,18 @@ define double @FsANDPSrr(double %x, doub
 
 define double @FsANDNPSrr(double %x, double %y) {
 ; SSE-LABEL: FsANDNPSrr:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    andnps %xmm0, %xmm1 # encoding: [0x0f,0x55,0xc8]
 ; SSE-NEXT:    movaps %xmm1, %xmm0 # encoding: [0x0f,0x28,0xc1]
 ; SSE-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX-LABEL: FsANDNPSrr:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandnps %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf0,0x55,0xc0]
 ; AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512DQ-LABEL: FsANDNPSrr:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vandnps %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x55,0xc0]
 ; AVX512DQ-NEXT:    retq # encoding: [0xc3]
   %bc1 = bitcast double %x to i64
@@ -57,17 +57,17 @@ define double @FsANDNPSrr(double %x, dou
 
 define double @FsORPSrr(double %x, double %y) {
 ; SSE-LABEL: FsORPSrr:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    orps %xmm1, %xmm0 # encoding: [0x0f,0x56,0xc1]
 ; SSE-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX-LABEL: FsORPSrr:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x56,0xc1]
 ; AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512DQ-LABEL: FsORPSrr:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x56,0xc1]
 ; AVX512DQ-NEXT:    retq # encoding: [0xc3]
   %bc1 = bitcast double %x to i64
@@ -79,17 +79,17 @@ define double @FsORPSrr(double %x, doubl
 
 define double @FsXORPSrr(double %x, double %y) {
 ; SSE-LABEL: FsXORPSrr:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm1, %xmm0 # encoding: [0x0f,0x57,0xc1]
 ; SSE-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX-LABEL: FsXORPSrr:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc1]
 ; AVX-NEXT:    retq # encoding: [0xc3]
 ;
 ; AVX512DQ-LABEL: FsXORPSrr:
-; AVX512DQ:       # BB#0:
+; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc1]
 ; AVX512DQ-NEXT:    retq # encoding: [0xc3]
   %bc1 = bitcast double %x to i64

Modified: llvm/trunk/test/CodeGen/X86/fp-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-logic.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-logic.ll Mon Dec  4 09:18:51 2017
@@ -18,7 +18,7 @@
 
 define i32 @f1(float %x, i32 %y) {
 ; CHECK-LABEL: f1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movd %xmm0, %eax
 ; CHECK-NEXT:    andl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -31,7 +31,7 @@ define i32 @f1(float %x, i32 %y) {
 
 define i32 @f2(float %x, i32 %y) {
 ; CHECK-LABEL: f2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movd %xmm0, %eax
 ; CHECK-NEXT:    andl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -44,7 +44,7 @@ define i32 @f2(float %x, i32 %y) {
 
 define i32 @f3(float %x) {
 ; CHECK-LABEL: f3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movd %xmm0, %eax
 ; CHECK-NEXT:    andl $1, %eax
 ; CHECK-NEXT:    retq
@@ -57,7 +57,7 @@ define i32 @f3(float %x) {
 
 define i32 @f4(float %x) {
 ; CHECK-LABEL: f4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movd %xmm0, %eax
 ; CHECK-NEXT:    andl $2, %eax
 ; CHECK-NEXT:    retq
@@ -70,7 +70,7 @@ define i32 @f4(float %x) {
 
 define float @f5(float %x, i32 %y) {
 ; CHECK-LABEL: f5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movd %edi, %xmm1
 ; CHECK-NEXT:    pand %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -84,7 +84,7 @@ define float @f5(float %x, i32 %y) {
 
 define float @f6(float %x, i32 %y) {
 ; CHECK-LABEL: f6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movd %edi, %xmm1
 ; CHECK-NEXT:    pand %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -98,7 +98,7 @@ define float @f6(float %x, i32 %y) {
 
 define float @f7(float %x) {
 ; CHECK-LABEL: f7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    andps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -112,7 +112,7 @@ define float @f7(float %x) {
 
 define float @f8(float %x) {
 ; CHECK-LABEL: f8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    andps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -126,7 +126,7 @@ define float @f8(float %x) {
 
 define i32 @f9(float %x, float %y) {
 ; CHECK-LABEL: f9:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pand %xmm1, %xmm0
 ; CHECK-NEXT:    movd %xmm0, %eax
 ; CHECK-NEXT:    retq
@@ -140,7 +140,7 @@ define i32 @f9(float %x, float %y) {
 
 define float @f10(float %x, float %y) {
 ; CHECK-LABEL: f10:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast float %x to i32
@@ -152,7 +152,7 @@ define float @f10(float %x, float %y) {
 
 define float @or(float %x, float %y) {
 ; CHECK-LABEL: or:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    orps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast float %x to i32
@@ -164,7 +164,7 @@ define float @or(float %x, float %y) {
 
 define float @xor(float %x, float %y) {
 ; CHECK-LABEL: xor:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast float %x to i32
@@ -176,7 +176,7 @@ define float @xor(float %x, float %y) {
 
 define float @f7_or(float %x) {
 ; CHECK-LABEL: f7_or:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    orps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -188,7 +188,7 @@ define float @f7_or(float %x) {
 
 define float @f7_xor(float %x) {
 ; CHECK-LABEL: f7_xor:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    xorps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -202,7 +202,7 @@ define float @f7_xor(float %x) {
 
 define double @doubles(double %x, double %y) {
 ; CHECK-LABEL: doubles:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast double %x to i64
@@ -214,7 +214,7 @@ define double @doubles(double %x, double
 
 define double @f7_double(double %x) {
 ; CHECK-LABEL: f7_double:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; CHECK-NEXT:    andps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -230,7 +230,7 @@ define double @f7_double(double %x) {
 
 define float @movmsk(float %x) {
 ; CHECK-LABEL: movmsk:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    andps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -242,7 +242,7 @@ define float @movmsk(float %x) {
 
 define double @bitcast_fabs(double %x) {
 ; CHECK-LABEL: bitcast_fabs:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast double %x to i64
@@ -253,7 +253,7 @@ define double @bitcast_fabs(double %x) {
 
 define float @bitcast_fneg(float %x) {
 ; CHECK-LABEL: bitcast_fneg:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast float %x to i32
@@ -264,7 +264,7 @@ define float @bitcast_fneg(float %x) {
 
 define <2 x double> @bitcast_fabs_vec(<2 x double> %x) {
 ; CHECK-LABEL: bitcast_fabs_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast <2 x double> %x to <2 x i64>
@@ -275,7 +275,7 @@ define <2 x double> @bitcast_fabs_vec(<2
 
 define <4 x float> @bitcast_fneg_vec(<4 x float> %x) {
 ; CHECK-LABEL: bitcast_fneg_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast <4 x float> %x to <4 x i32>

Modified: llvm/trunk/test/CodeGen/X86/fp-select-cmp-and.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-select-cmp-and.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-select-cmp-and.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-select-cmp-and.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define double @test1(double %a, double %b, double %eps) {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpltsd %xmm2, %xmm0
 ; CHECK-NEXT:    andpd %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -14,7 +14,7 @@ define double @test1(double %a, double %
 
 define double @test2(double %a, double %b, double %eps) {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmplesd %xmm2, %xmm0
 ; CHECK-NEXT:    andpd %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -25,7 +25,7 @@ define double @test2(double %a, double %
 
 define double @test3(double %a, double %b, double %eps) {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpltsd %xmm0, %xmm2
 ; CHECK-NEXT:    andpd %xmm1, %xmm2
 ; CHECK-NEXT:    movapd %xmm2, %xmm0
@@ -37,7 +37,7 @@ define double @test3(double %a, double %
 
 define double @test4(double %a, double %b, double %eps) {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmplesd %xmm0, %xmm2
 ; CHECK-NEXT:    andpd %xmm1, %xmm2
 ; CHECK-NEXT:    movapd %xmm2, %xmm0
@@ -49,7 +49,7 @@ define double @test4(double %a, double %
 
 define double @test5(double %a, double %b, double %eps) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpltsd %xmm2, %xmm0
 ; CHECK-NEXT:    andnpd %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -60,7 +60,7 @@ define double @test5(double %a, double %
 
 define double @test6(double %a, double %b, double %eps) {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmplesd %xmm2, %xmm0
 ; CHECK-NEXT:    andnpd %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -71,7 +71,7 @@ define double @test6(double %a, double %
 
 define double @test7(double %a, double %b, double %eps) {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpltsd %xmm0, %xmm2
 ; CHECK-NEXT:    andnpd %xmm1, %xmm2
 ; CHECK-NEXT:    movapd %xmm2, %xmm0
@@ -83,7 +83,7 @@ define double @test7(double %a, double %
 
 define double @test8(double %a, double %b, double %eps) {
 ; CHECK-LABEL: test8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmplesd %xmm0, %xmm2
 ; CHECK-NEXT:    andnpd %xmm1, %xmm2
 ; CHECK-NEXT:    movapd %xmm2, %xmm0
@@ -95,7 +95,7 @@ define double @test8(double %a, double %
 
 define float @test9(float %a, float %b, float %eps) {
 ; CHECK-LABEL: test9:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpltss %xmm2, %xmm0
 ; CHECK-NEXT:    andps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -106,7 +106,7 @@ define float @test9(float %a, float %b,
 
 define float @test10(float %a, float %b, float %eps) {
 ; CHECK-LABEL: test10:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpless %xmm2, %xmm0
 ; CHECK-NEXT:    andps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -117,7 +117,7 @@ define float @test10(float %a, float %b,
 
 define float @test11(float %a, float %b, float %eps) {
 ; CHECK-LABEL: test11:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpltss %xmm0, %xmm2
 ; CHECK-NEXT:    andps %xmm1, %xmm2
 ; CHECK-NEXT:    movaps %xmm2, %xmm0
@@ -129,7 +129,7 @@ define float @test11(float %a, float %b,
 
 define float @test12(float %a, float %b, float %eps) {
 ; CHECK-LABEL: test12:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpless %xmm0, %xmm2
 ; CHECK-NEXT:    andps %xmm1, %xmm2
 ; CHECK-NEXT:    movaps %xmm2, %xmm0
@@ -141,7 +141,7 @@ define float @test12(float %a, float %b,
 
 define float @test13(float %a, float %b, float %eps) {
 ; CHECK-LABEL: test13:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpltss %xmm2, %xmm0
 ; CHECK-NEXT:    andnps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -152,7 +152,7 @@ define float @test13(float %a, float %b,
 
 define float @test14(float %a, float %b, float %eps) {
 ; CHECK-LABEL: test14:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpless %xmm2, %xmm0
 ; CHECK-NEXT:    andnps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
@@ -163,7 +163,7 @@ define float @test14(float %a, float %b,
 
 define float @test15(float %a, float %b, float %eps) {
 ; CHECK-LABEL: test15:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpltss %xmm0, %xmm2
 ; CHECK-NEXT:    andnps %xmm1, %xmm2
 ; CHECK-NEXT:    movaps %xmm2, %xmm0
@@ -175,7 +175,7 @@ define float @test15(float %a, float %b,
 
 define float @test16(float %a, float %b, float %eps) {
 ; CHECK-LABEL: test16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpless %xmm0, %xmm2
 ; CHECK-NEXT:    andnps %xmm1, %xmm2
 ; CHECK-NEXT:    movaps %xmm2, %xmm0
@@ -187,7 +187,7 @@ define float @test16(float %a, float %b,
 
 define float @test17(float %a, float %b, float %c, float %eps) {
 ; CHECK-LABEL: test17:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpless %xmm0, %xmm3
 ; CHECK-NEXT:    andps %xmm3, %xmm2
 ; CHECK-NEXT:    andnps %xmm1, %xmm3
@@ -201,7 +201,7 @@ define float @test17(float %a, float %b,
 
 define double @test18(double %a, double %b, double %c, double %eps) {
 ; CHECK-LABEL: test18:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmplesd %xmm0, %xmm3
 ; CHECK-NEXT:    andpd %xmm3, %xmm2
 ; CHECK-NEXT:    andnpd %xmm1, %xmm3

Modified: llvm/trunk/test/CodeGen/X86/fp-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-trunc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-trunc.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <1 x float> @test1(<1 x double> %x) nounwind {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushl %eax
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    cvtsd2ss %xmm0, %xmm0
@@ -14,7 +14,7 @@ define <1 x float> @test1(<1 x double> %
 ; CHECK-NEXT:    retl
 ;
 ; AVX-LABEL: test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    pushl %eax
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0
@@ -28,12 +28,12 @@ define <1 x float> @test1(<1 x double> %
 
 define <2 x float> @test2(<2 x double> %x) nounwind {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cvtpd2ps %xmm0, %xmm0
 ; CHECK-NEXT:    retl
 ;
 ; AVX-LABEL: test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvtpd2ps %xmm0, %xmm0
 ; AVX-NEXT:    retl
   %y = fptrunc <2 x double> %x to <2 x float>
@@ -42,14 +42,14 @@ define <2 x float> @test2(<2 x double> %
 
 define <4 x float> @test3(<4 x double> %x) nounwind {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cvtpd2ps %xmm1, %xmm1
 ; CHECK-NEXT:    cvtpd2ps %xmm0, %xmm0
 ; CHECK-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; CHECK-NEXT:    retl
 ;
 ; AVX-LABEL: test3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvtpd2ps %ymm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retl
@@ -59,7 +59,7 @@ define <4 x float> @test3(<4 x double> %
 
 define <8 x float> @test4(<8 x double> %x) nounwind {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subl $12, %esp
 ; CHECK-NEXT:    cvtpd2ps %xmm1, %xmm1
 ; CHECK-NEXT:    cvtpd2ps %xmm0, %xmm0
@@ -71,7 +71,7 @@ define <8 x float> @test4(<8 x double> %
 ; CHECK-NEXT:    retl
 ;
 ; AVX-LABEL: test4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvtpd2ps %ymm0, %xmm0
 ; AVX-NEXT:    vcvtpd2ps %ymm1, %xmm1
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0

Modified: llvm/trunk/test/CodeGen/X86/fp-une-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-une-cmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-une-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-une-cmp.ll Mon Dec  4 09:18:51 2017
@@ -23,13 +23,13 @@
 
 define double @rdar_7859988(double %x, double %y) nounwind readnone optsize ssp {
 ; CHECK-LABEL: rdar_7859988:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mulsd %xmm1, %xmm0
 ; CHECK-NEXT:    xorpd %xmm1, %xmm1
 ; CHECK-NEXT:    ucomisd %xmm1, %xmm0
 ; CHECK-NEXT:    jne .LBB0_2
 ; CHECK-NEXT:    jp .LBB0_2
-; CHECK-NEXT:  # BB#1: # %bb1
+; CHECK-NEXT:  # %bb.1: # %bb1
 ; CHECK-NEXT:    addsd {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:  .LBB0_2: # %bb2
 ; CHECK-NEXT:    retq
@@ -50,7 +50,7 @@ bb2:
 
 define double @profile_metadata(double %x, double %y) {
 ; CHECK-LABEL: profile_metadata:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    mulsd %xmm1, %xmm0
 ; CHECK-NEXT:    xorpd %xmm1, %xmm1
 ; CHECK-NEXT:    ucomisd %xmm1, %xmm0
@@ -81,7 +81,7 @@ bb2:
 
 define void @foo(float %f) {
 ; CHECK-LABEL: foo:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorps %xmm1, %xmm1
 ; CHECK-NEXT:    ucomiss %xmm1, %xmm0
 ; CHECK-NEXT:    jne .LBB2_2

Modified: llvm/trunk/test/CodeGen/X86/fp128-cast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp128-cast.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp128-cast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp128-cast.ll Mon Dec  4 09:18:51 2017
@@ -363,7 +363,7 @@ cleanup:
 
 define i1 @PR34866(i128 %x) {
 ; X64-LABEL: PR34866:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps {{.*}}(%rip), %xmm0
 ; X64-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    xorq -{{[0-9]+}}(%rsp), %rsi
@@ -373,13 +373,13 @@ define i1 @PR34866(i128 %x) {
 ; X64-NEXT:    retq
 ;
 ; X64_NO_MMX-LABEL: PR34866:
-; X64_NO_MMX:       # BB#0:
+; X64_NO_MMX:       # %bb.0:
 ; X64_NO_MMX-NEXT:    orq %rsi, %rdi
 ; X64_NO_MMX-NEXT:    sete %al
 ; X64_NO_MMX-NEXT:    retq
 ;
 ; X32-LABEL: PR34866:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    orl {{[0-9]+}}(%esp), %ecx
@@ -394,7 +394,7 @@ define i1 @PR34866(i128 %x) {
 
 define i1 @PR34866_commute(i128 %x) {
 ; X64-LABEL: PR34866_commute:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps {{.*}}(%rip), %xmm0
 ; X64-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    xorq -{{[0-9]+}}(%rsp), %rsi
@@ -404,13 +404,13 @@ define i1 @PR34866_commute(i128 %x) {
 ; X64-NEXT:    retq
 ;
 ; X64_NO_MMX-LABEL: PR34866_commute:
-; X64_NO_MMX:       # BB#0:
+; X64_NO_MMX:       # %bb.0:
 ; X64_NO_MMX-NEXT:    orq %rsi, %rdi
 ; X64_NO_MMX-NEXT:    sete %al
 ; X64_NO_MMX-NEXT:    retq
 ;
 ; X32-LABEL: PR34866_commute:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    orl {{[0-9]+}}(%esp), %ecx

Modified: llvm/trunk/test/CodeGen/X86/fp128-i128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp128-i128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp128-i128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp128-i128.ll Mon Dec  4 09:18:51 2017
@@ -43,7 +43,7 @@
 ; }
 define void @TestUnionLD1(fp128 %s, i64 %n) #0 {
 ; CHECK-LABEL: TestUnionLD1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
 ; CHECK-NEXT:    movabsq $281474976710655, %rcx # imm = 0xFFFFFFFFFFFF
@@ -78,7 +78,7 @@ entry:
 ; }
 define fp128 @TestUnionLD2(fp128 %s) #0 {
 ; CHECK-LABEL: TestUnionLD2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
 ; CHECK-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
@@ -102,7 +102,7 @@ entry:
 ; }
 define fp128 @TestI128_1(fp128 %x) #0 {
 ; CHECK-LABEL: TestI128_1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subq $40, %rsp
 ; CHECK-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -140,11 +140,11 @@ entry:
 ; }
 define fp128 @TestI128_2(fp128 %x, fp128 %y) #0 {
 ; CHECK-LABEL: TestI128_2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    cmpq $0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    jns .LBB3_2
-; CHECK-NEXT:  # BB#1: # %entry
+; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    movaps %xmm1, %xmm0
 ; CHECK-NEXT:  .LBB3_2: # %entry
 ; CHECK-NEXT:    retq
@@ -168,14 +168,14 @@ entry:
 ; }
 define fp128 @TestI128_3(fp128 %x, i32* nocapture readnone %ex) #0 {
 ; CHECK-LABEL: TestI128_3:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subq $56, %rsp
 ; CHECK-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; CHECK-NEXT:    movabsq $9223090561878065152, %rcx # imm = 0x7FFF000000000000
 ; CHECK-NEXT:    testq %rcx, %rax
 ; CHECK-NEXT:    je .LBB4_2
-; CHECK-NEXT:  # BB#1:
+; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
 ; CHECK-NEXT:    jmp .LBB4_3
 ; CHECK-NEXT:  .LBB4_2: # %if.then
@@ -224,7 +224,7 @@ if.end:
 ; }
 define fp128 @TestI128_4(fp128 %x) #0 {
 ; CHECK-LABEL: TestI128_4:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subq $40, %rsp
 ; CHECK-NEXT:    movaps %xmm0, %xmm1
 ; CHECK-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp)
@@ -253,7 +253,7 @@ entry:
 ; }
 define void @TestShift128_2() #2 {
 ; CHECK-LABEL: TestShift128_2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq {{.*}}(%rip), %rax
 ; CHECK-NEXT:    shlq $32, %rax
 ; CHECK-NEXT:    movq {{.*}}(%rip), %rcx
@@ -272,7 +272,7 @@ entry:
 
 define fp128 @acosl(fp128 %x) #0 {
 ; CHECK-LABEL: acosl:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subq $40, %rsp
 ; CHECK-NEXT:    movaps %xmm0, %xmm1
 ; CHECK-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp)
@@ -294,11 +294,11 @@ entry:
 ; Compare i128 values and check i128 constants.
 define fp128 @TestComp(fp128 %x, fp128 %y) #0 {
 ; CHECK-LABEL: TestComp:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    cmpq $0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    jns .LBB8_2
-; CHECK-NEXT:  # BB#1: # %entry
+; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    movaps %xmm1, %xmm0
 ; CHECK-NEXT:  .LBB8_2: # %entry
 ; CHECK-NEXT:    retq
@@ -314,7 +314,7 @@ declare void @foo(fp128) #1
 ; Test logical operations on fp128 values.
 define fp128 @TestFABS_LD(fp128 %x) #0 {
 ; CHECK-LABEL: TestFABS_LD:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -329,7 +329,7 @@ declare fp128 @copysignl(fp128, fp128) #
 ; Test more complicated logical operations generated from copysignl.
 define void @TestCopySign({ fp128, fp128 }* noalias nocapture sret %agg.result, { fp128, fp128 }* byval nocapture readonly align 16 %z) #0 {
 ; CHECK-LABEL: TestCopySign:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    subq $40, %rsp
@@ -345,7 +345,7 @@ define void @TestCopySign({ fp128, fp128
 ; CHECK-NEXT:    callq __subtf3
 ; CHECK-NEXT:    testl %ebp, %ebp
 ; CHECK-NEXT:    jle .LBB10_1
-; CHECK-NEXT:  # BB#2: # %if.then
+; CHECK-NEXT:  # %bb.2: # %if.then
 ; CHECK-NEXT:    andps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    movaps %xmm0, %xmm1
 ; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload

Modified: llvm/trunk/test/CodeGen/X86/fp128-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp128-select.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp128-select.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp128-select.ll Mon Dec  4 09:18:51 2017
@@ -10,10 +10,10 @@
 
 define void @test_select(fp128* %p, fp128* %q, i1 zeroext %c) {
 ; MMX-LABEL: test_select:
-; MMX:       # BB#0:
+; MMX:       # %bb.0:
 ; MMX-NEXT:    testl %edx, %edx
 ; MMX-NEXT:    jne .LBB0_1
-; MMX-NEXT:  # BB#2:
+; MMX-NEXT:  # %bb.2:
 ; MMX-NEXT:    movaps {{.*}}(%rip), %xmm0
 ; MMX-NEXT:    movaps %xmm0, (%rsi)
 ; MMX-NEXT:    retq
@@ -23,7 +23,7 @@ define void @test_select(fp128* %p, fp12
 ; MMX-NEXT:    retq
 ;
 ; CHECK-LABEL: test_select:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testl %edx, %edx
 ; CHECK-NEXT:    cmovneq (%rdi), %rax

Modified: llvm/trunk/test/CodeGen/X86/gfni-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/gfni-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/gfni-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/gfni-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 declare <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i8)
 define <16 x i8> @test_gf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2) {
 ; CHECK-LABEL: test_gf2p8affineinvqb_128:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    gf2p8affineinvqb $11, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0xcf,0xc1,0x0b]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
  %1 = call <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
@@ -14,7 +14,7 @@ define <16 x i8> @test_gf2p8affineinvqb_
 declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
 define <16 x i8> @test_gf2p8affineqb_128(<16 x i8> %src1, <16 x i8> %src2) {
 ; CHECK-LABEL: test_gf2p8affineqb_128:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    gf2p8affineqb $11, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0xce,0xc1,0x0b]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
  %1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
@@ -24,7 +24,7 @@ define <16 x i8> @test_gf2p8affineqb_128
 declare <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8>, <16 x i8>)
 define <16 x i8> @test_gf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2) {
 ; CHECK-LABEL: test_gf2p8mulb_128:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    gf2p8mulb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0xcf,0xc1]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
   %1 = call <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8> %src1, <16 x i8> %src2)

Modified: llvm/trunk/test/CodeGen/X86/gpr-to-mask.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/gpr-to-mask.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/gpr-to-mask.ll (original)
+++ llvm/trunk/test/CodeGen/X86/gpr-to-mask.ll Mon Dec  4 09:18:51 2017
@@ -3,10 +3,10 @@
 
 define void @test_fcmp_storefloat(i1 %cond, float* %fptr, float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) {
 ; CHECK-LABEL: test_fcmp_storefloat:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB0_2
-; CHECK-NEXT:  # BB#1: # %if
+; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    vcmpeqss %xmm3, %xmm2, %k1
 ; CHECK-NEXT:    jmp .LBB0_3
 ; CHECK-NEXT:  .LBB0_2: # %else
@@ -35,10 +35,10 @@ exit:
 
 define void @test_fcmp_storei1(i1 %cond, float* %fptr, i1* %iptr, float %f1, float %f2, float %f3, float %f4) {
 ; CHECK-LABEL: test_fcmp_storei1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB1_2
-; CHECK-NEXT:  # BB#1: # %if
+; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    vcmpeqss %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    jmp .LBB1_3
 ; CHECK-NEXT:  .LBB1_2: # %else
@@ -67,10 +67,10 @@ exit:
 
 define void @test_load_add(i1 %cond, float* %fptr, i1* %iptr1, i1* %iptr2, float %f1, float %f2)  {
 ; CHECK-LABEL: test_load_add:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB2_2
-; CHECK-NEXT:  # BB#1: # %if
+; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    kmovb (%rdx), %k0
 ; CHECK-NEXT:    kmovb (%rcx), %k1
 ; CHECK-NEXT:    kaddb %k1, %k0, %k1
@@ -103,10 +103,10 @@ exit:
 
 define void @test_load_i1(i1 %cond, float* %fptr, i1* %iptr1, i1* %iptr2, float %f1, float %f2)  {
 ; CHECK-LABEL: test_load_i1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB3_2
-; CHECK-NEXT:  # BB#1: # %if
+; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    kmovb (%rdx), %k1
 ; CHECK-NEXT:    jmp .LBB3_3
 ; CHECK-NEXT:  .LBB3_2: # %else
@@ -135,10 +135,10 @@ exit:
 
 define void @test_loadi1_storei1(i1 %cond, i1* %iptr1, i1* %iptr2, i1* %iptr3)  {
 ; CHECK-LABEL: test_loadi1_storei1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB4_2
-; CHECK-NEXT:  # BB#1: # %if
+; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    movb (%rsi), %al
 ; CHECK-NEXT:    jmp .LBB4_3
 ; CHECK-NEXT:  .LBB4_2: # %else
@@ -166,12 +166,12 @@ exit:
 
 define void @test_shl1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
 ; CHECK-LABEL: test_shl1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; CHECK-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB5_2
-; CHECK-NEXT:  # BB#1: # %if
+; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    kmovb (%rsi), %k0
 ; CHECK-NEXT:    kaddb %k0, %k0, %k1
 ; CHECK-NEXT:    jmp .LBB5_3
@@ -204,12 +204,12 @@ exit:
 
 define void @test_shr1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
 ; CHECK-LABEL: test_shr1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; CHECK-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB6_2
-; CHECK-NEXT:  # BB#1: # %if
+; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    movb (%rsi), %al
 ; CHECK-NEXT:    shrb %al
 ; CHECK-NEXT:    jmp .LBB6_3
@@ -243,12 +243,12 @@ exit:
 
 define void @test_shr2(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
 ; CHECK-LABEL: test_shr2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; CHECK-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB7_2
-; CHECK-NEXT:  # BB#1: # %if
+; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    kmovb (%rsi), %k0
 ; CHECK-NEXT:    kshiftrb $2, %k0, %k1
 ; CHECK-NEXT:    jmp .LBB7_3
@@ -281,12 +281,12 @@ exit:
 
 define void @test_shl(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
 ; CHECK-LABEL: test_shl:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; CHECK-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB8_2
-; CHECK-NEXT:  # BB#1: # %if
+; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    kmovb (%rsi), %k0
 ; CHECK-NEXT:    kshiftlb $6, %k0, %k1
 ; CHECK-NEXT:    jmp .LBB8_3
@@ -319,14 +319,14 @@ exit:
 
 define void @test_add(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
 ; CHECK-LABEL: test_add:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; CHECK-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; CHECK-NEXT:    kmovb (%rsi), %k0
 ; CHECK-NEXT:    kmovb (%rdx), %k1
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB9_2
-; CHECK-NEXT:  # BB#1: # %if
+; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    kandb %k1, %k0, %k1
 ; CHECK-NEXT:    jmp .LBB9_3
 ; CHECK-NEXT:  .LBB9_2: # %else

Modified: llvm/trunk/test/CodeGen/X86/haddsub-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub-2.ll Mon Dec  4 09:18:51 2017
@@ -6,12 +6,12 @@
 
 define <4 x float> @hadd_ps_test1(<4 x float> %A, <4 x float> %B) {
 ; SSE-LABEL: hadd_ps_test1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: hadd_ps_test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %A, i32 0
@@ -35,12 +35,12 @@ define <4 x float> @hadd_ps_test1(<4 x f
 
 define <4 x float> @hadd_ps_test2(<4 x float> %A, <4 x float> %B) {
 ; SSE-LABEL: hadd_ps_test2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: hadd_ps_test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %A, i32 2
@@ -64,12 +64,12 @@ define <4 x float> @hadd_ps_test2(<4 x f
 
 define <4 x float> @hsub_ps_test1(<4 x float> %A, <4 x float> %B) {
 ; SSE-LABEL: hsub_ps_test1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    hsubps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: hsub_ps_test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %A, i32 0
@@ -93,12 +93,12 @@ define <4 x float> @hsub_ps_test1(<4 x f
 
 define <4 x float> @hsub_ps_test2(<4 x float> %A, <4 x float> %B) {
 ; SSE-LABEL: hsub_ps_test2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    hsubps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: hsub_ps_test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %A, i32 2
@@ -122,7 +122,7 @@ define <4 x float> @hsub_ps_test2(<4 x f
 
 define <4 x i32> @phadd_d_test1(<4 x i32> %A, <4 x i32> %B) {
 ; SSE3-LABEL: phadd_d_test1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movd %xmm0, %eax
 ; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
 ; SSE3-NEXT:    movd %xmm2, %ecx
@@ -151,12 +151,12 @@ define <4 x i32> @phadd_d_test1(<4 x i32
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: phadd_d_test1:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phaddd %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: phadd_d_test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x i32> %A, i32 0
@@ -180,7 +180,7 @@ define <4 x i32> @phadd_d_test1(<4 x i32
 
 define <4 x i32> @phadd_d_test2(<4 x i32> %A, <4 x i32> %B) {
 ; SSE3-LABEL: phadd_d_test2:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
 ; SSE3-NEXT:    movd %xmm2, %eax
 ; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
@@ -209,12 +209,12 @@ define <4 x i32> @phadd_d_test2(<4 x i32
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: phadd_d_test2:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phaddd %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: phadd_d_test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x i32> %A, i32 2
@@ -238,7 +238,7 @@ define <4 x i32> @phadd_d_test2(<4 x i32
 
 define <4 x i32> @phsub_d_test1(<4 x i32> %A, <4 x i32> %B) {
 ; SSE3-LABEL: phsub_d_test1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movd %xmm0, %eax
 ; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
 ; SSE3-NEXT:    movd %xmm2, %ecx
@@ -267,12 +267,12 @@ define <4 x i32> @phsub_d_test1(<4 x i32
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: phsub_d_test1:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phsubd %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: phsub_d_test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vphsubd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x i32> %A, i32 0
@@ -296,7 +296,7 @@ define <4 x i32> @phsub_d_test1(<4 x i32
 
 define <4 x i32> @phsub_d_test2(<4 x i32> %A, <4 x i32> %B) {
 ; SSE3-LABEL: phsub_d_test2:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
 ; SSE3-NEXT:    movd %xmm2, %eax
 ; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
@@ -325,12 +325,12 @@ define <4 x i32> @phsub_d_test2(<4 x i32
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: phsub_d_test2:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phsubd %xmm1, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: phsub_d_test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vphsubd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x i32> %A, i32 2
@@ -354,12 +354,12 @@ define <4 x i32> @phsub_d_test2(<4 x i32
 
 define <2 x double> @hadd_pd_test1(<2 x double> %A, <2 x double> %B) {
 ; SSE-LABEL: hadd_pd_test1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddpd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: hadd_pd_test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <2 x double> %A, i32 0
@@ -375,12 +375,12 @@ define <2 x double> @hadd_pd_test1(<2 x
 
 define <2 x double> @hadd_pd_test2(<2 x double> %A, <2 x double> %B) {
 ; SSE-LABEL: hadd_pd_test2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddpd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: hadd_pd_test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <2 x double> %A, i32 1
@@ -396,12 +396,12 @@ define <2 x double> @hadd_pd_test2(<2 x
 
 define <2 x double> @hsub_pd_test1(<2 x double> %A, <2 x double> %B) {
 ; SSE-LABEL: hsub_pd_test1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    hsubpd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: hsub_pd_test1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <2 x double> %A, i32 0
@@ -417,12 +417,12 @@ define <2 x double> @hsub_pd_test1(<2 x
 
 define <2 x double> @hsub_pd_test2(<2 x double> %A, <2 x double> %B) {
 ; SSE-LABEL: hsub_pd_test2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    hsubpd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: hsub_pd_test2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <2 x double> %B, i32 0
@@ -438,14 +438,14 @@ define <2 x double> @hsub_pd_test2(<2 x
 
 define <4 x double> @avx_vhadd_pd_test(<4 x double> %A, <4 x double> %B) {
 ; SSE-LABEL: avx_vhadd_pd_test:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddpd %xmm1, %xmm0
 ; SSE-NEXT:    haddpd %xmm3, %xmm2
 ; SSE-NEXT:    movapd %xmm2, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: avx_vhadd_pd_test:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX-NEXT:    vhaddpd %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
@@ -473,14 +473,14 @@ define <4 x double> @avx_vhadd_pd_test(<
 
 define <4 x double> @avx_vhsub_pd_test(<4 x double> %A, <4 x double> %B) {
 ; SSE-LABEL: avx_vhsub_pd_test:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    hsubpd %xmm1, %xmm0
 ; SSE-NEXT:    hsubpd %xmm3, %xmm2
 ; SSE-NEXT:    movapd %xmm2, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: avx_vhsub_pd_test:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX-NEXT:    vhsubpd %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
@@ -508,7 +508,7 @@ define <4 x double> @avx_vhsub_pd_test(<
 
 define <8 x i32> @avx2_vphadd_d_test(<8 x i32> %A, <8 x i32> %B) {
 ; SSE3-LABEL: avx2_vphadd_d_test:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movd %xmm0, %ecx
 ; SSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,2,3]
 ; SSE3-NEXT:    movd %xmm4, %r8d
@@ -562,14 +562,14 @@ define <8 x i32> @avx2_vphadd_d_test(<8
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: avx2_vphadd_d_test:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phaddd %xmm1, %xmm0
 ; SSSE3-NEXT:    phaddd %xmm3, %xmm2
 ; SSSE3-NEXT:    movdqa %xmm2, %xmm1
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: avx2_vphadd_d_test:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vphaddd %xmm2, %xmm1, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
@@ -578,7 +578,7 @@ define <8 x i32> @avx2_vphadd_d_test(<8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avx2_vphadd_d_test:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX2-NEXT:    vphaddd %xmm2, %xmm1, %xmm1
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
@@ -622,7 +622,7 @@ define <8 x i32> @avx2_vphadd_d_test(<8
 
 define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) {
 ; SSE3-LABEL: avx2_vphadd_w_test:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pushq %rbp
 ; SSE3-NEXT:    .cfi_def_cfa_offset 16
 ; SSE3-NEXT:    pushq %r15
@@ -732,14 +732,14 @@ define <16 x i16> @avx2_vphadd_w_test(<1
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: avx2_vphadd_w_test:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phaddw %xmm1, %xmm0
 ; SSSE3-NEXT:    phaddw %xmm3, %xmm2
 ; SSSE3-NEXT:    movdqa %xmm2, %xmm1
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: avx2_vphadd_w_test:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vphaddw %xmm2, %xmm1, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
@@ -748,7 +748,7 @@ define <16 x i16> @avx2_vphadd_w_test(<1
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avx2_vphadd_w_test:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX2-NEXT:    vphaddw %xmm2, %xmm1, %xmm1
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
@@ -826,7 +826,7 @@ define <16 x i16> @avx2_vphadd_w_test(<1
 
 define <4 x i32> @not_a_hsub_1(<4 x i32> %A, <4 x i32> %B) {
 ; SSE-LABEL: not_a_hsub_1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %xmm0, %eax
 ; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
 ; SSE-NEXT:    movd %xmm2, %ecx
@@ -855,7 +855,7 @@ define <4 x i32> @not_a_hsub_1(<4 x i32>
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: not_a_hsub_1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    vpextrd $1, %xmm0, %ecx
 ; AVX-NEXT:    subl %ecx, %eax
@@ -894,7 +894,7 @@ define <4 x i32> @not_a_hsub_1(<4 x i32>
 
 define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
 ; SSE-LABEL: not_a_hsub_2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    movhlps {{.*#+}} xmm2 = xmm2[1,1]
 ; SSE-NEXT:    movaps %xmm0, %xmm3
@@ -915,7 +915,7 @@ define <4 x float> @not_a_hsub_2(<4 x fl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: not_a_hsub_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
 ; AVX-NEXT:    vsubss %xmm3, %xmm2, %xmm2
@@ -951,7 +951,7 @@ define <4 x float> @not_a_hsub_2(<4 x fl
 
 define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) {
 ; SSE-LABEL: not_a_hsub_3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm1, %xmm2
 ; SSE-NEXT:    movhlps {{.*#+}} xmm2 = xmm2[1,1]
 ; SSE-NEXT:    subsd %xmm2, %xmm1
@@ -963,7 +963,7 @@ define <2 x double> @not_a_hsub_3(<2 x d
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: not_a_hsub_3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
 ; AVX-NEXT:    vsubsd %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
@@ -986,13 +986,13 @@ define <2 x double> @not_a_hsub_3(<2 x d
 
 define <8 x float> @avx_vhadd_ps(<8 x float> %a, <8 x float> %b) {
 ; SSE-LABEL: avx_vhadd_ps:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm2, %xmm0
 ; SSE-NEXT:    haddps %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: avx_vhadd_ps:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
@@ -1032,13 +1032,13 @@ define <8 x float> @avx_vhadd_ps(<8 x fl
 
 define <8 x float> @avx_vhsub_ps(<8 x float> %a, <8 x float> %b) {
 ; SSE-LABEL: avx_vhsub_ps:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    hsubps %xmm2, %xmm0
 ; SSE-NEXT:    hsubps %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: avx_vhsub_ps:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
@@ -1078,13 +1078,13 @@ define <8 x float> @avx_vhsub_ps(<8 x fl
 
 define <4 x double> @avx_hadd_pd(<4 x double> %a, <4 x double> %b) {
 ; SSE-LABEL: avx_hadd_pd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddpd %xmm2, %xmm0
 ; SSE-NEXT:    haddpd %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: avx_hadd_pd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x double> %a, i32 0
@@ -1108,13 +1108,13 @@ define <4 x double> @avx_hadd_pd(<4 x do
 
 define <4 x double> @avx_hsub_pd(<4 x double> %a, <4 x double> %b) {
 ; SSE-LABEL: avx_hsub_pd:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    hsubpd %xmm2, %xmm0
 ; SSE-NEXT:    hsubpd %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: avx_hsub_pd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x double> %a, i32 0
@@ -1140,7 +1140,7 @@ define <4 x double> @avx_hsub_pd(<4 x do
 
 define <8 x i32> @avx2_hadd_d(<8 x i32> %a, <8 x i32> %b) {
 ; SSE3-LABEL: avx2_hadd_d:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movd %xmm0, %ecx
 ; SSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,2,3]
 ; SSE3-NEXT:    movd %xmm4, %r8d
@@ -1194,13 +1194,13 @@ define <8 x i32> @avx2_hadd_d(<8 x i32>
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: avx2_hadd_d:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phaddd %xmm2, %xmm0
 ; SSSE3-NEXT:    phaddd %xmm3, %xmm1
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: avx2_hadd_d:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vphaddd %xmm2, %xmm3, %xmm2
@@ -1209,7 +1209,7 @@ define <8 x i32> @avx2_hadd_d(<8 x i32>
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avx2_hadd_d:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
   %vecext = extractelement <8 x i32> %a, i32 0
@@ -1249,7 +1249,7 @@ define <8 x i32> @avx2_hadd_d(<8 x i32>
 
 define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) {
 ; SSE3-LABEL: avx2_hadd_w:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    pushq %rbp
 ; SSE3-NEXT:    .cfi_def_cfa_offset 16
 ; SSE3-NEXT:    pushq %r15
@@ -1359,13 +1359,13 @@ define <16 x i16> @avx2_hadd_w(<16 x i16
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: avx2_hadd_w:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phaddw %xmm2, %xmm0
 ; SSSE3-NEXT:    phaddw %xmm3, %xmm1
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: avx2_hadd_w:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; AVX1-NEXT:    vphaddw %xmm2, %xmm3, %xmm2
@@ -1374,7 +1374,7 @@ define <16 x i16> @avx2_hadd_w(<16 x i16
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avx2_hadd_w:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vphaddw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
   %vecext = extractelement <16 x i16> %a, i32 0

Modified: llvm/trunk/test/CodeGen/X86/haddsub-shuf.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub-shuf.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub-shuf.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub-shuf.ll Mon Dec  4 09:18:51 2017
@@ -7,12 +7,12 @@
 
 define <4 x float> @hadd_v4f32(<4 x float> %a) {
 ; SSSE3-LABEL: hadd_v4f32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    haddps %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hadd_v4f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a02 = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 2>
@@ -24,12 +24,12 @@ define <4 x float> @hadd_v4f32(<4 x floa
 
 define <4 x float> @hsub_v4f32(<4 x float> %a) {
 ; SSSE3-LABEL: hsub_v4f32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    hsubps %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hsub_v4f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a02 = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 2>
@@ -41,12 +41,12 @@ define <4 x float> @hsub_v4f32(<4 x floa
 
 define <2 x double> @hadd_v2f64(<2 x double> %a) {
 ; SSSE3-LABEL: hadd_v2f64:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    haddpd %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hadd_v2f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a0 = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
@@ -58,12 +58,12 @@ define <2 x double> @hadd_v2f64(<2 x dou
 
 define <2 x double> @hsub_v2f64(<2 x double> %a) {
 ; SSSE3-LABEL: hsub_v2f64:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    hsubpd %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hsub_v2f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubpd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a0 = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
@@ -75,12 +75,12 @@ define <2 x double> @hsub_v2f64(<2 x dou
 
 define <4 x i32> @hadd_v4i32(<4 x i32> %a) {
 ; SSSE3-LABEL: hadd_v4i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phaddd %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hadd_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a02 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -92,12 +92,12 @@ define <4 x i32> @hadd_v4i32(<4 x i32> %
 
 define <4 x i32> @hsub_v4i32(<4 x i32> %a) {
 ; SSSE3-LABEL: hsub_v4i32:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phsubd %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hsub_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a02 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -109,12 +109,12 @@ define <4 x i32> @hsub_v4i32(<4 x i32> %
 
 define <8 x i16> @hadd_v8i16(<8 x i16> %a) {
 ; SSSE3-LABEL: hadd_v8i16:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phaddw %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hadd_v8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a0246 = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -126,12 +126,12 @@ define <8 x i16> @hadd_v8i16(<8 x i16> %
 
 define <8 x i16> @hsub_v8i16(<8 x i16> %a) {
 ; SSSE3-LABEL: hsub_v8i16:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    phsubw %xmm0, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hsub_v8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vphsubw %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a0246 = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef>

Modified: llvm/trunk/test/CodeGen/X86/haddsub-undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub-undef.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub-undef.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub-undef.ll Mon Dec  4 09:18:51 2017
@@ -7,12 +7,12 @@
 
 define <4 x float> @test1_undef(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test1_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test1_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
@@ -32,12 +32,12 @@ define <4 x float> @test1_undef(<4 x flo
 
 define <4 x float> @test2_undef(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test2_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test2_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
@@ -57,12 +57,12 @@ define <4 x float> @test2_undef(<4 x flo
 
 define <4 x float> @test3_undef(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test3_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test3_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
@@ -82,13 +82,13 @@ define <4 x float> @test3_undef(<4 x flo
 
 define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test4_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test4_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -101,7 +101,7 @@ define <4 x float> @test4_undef(<4 x flo
 
 define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
 ; SSE-LABEL: test5_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movaps %xmm0, %xmm1
 ; SSE-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1]
 ; SSE-NEXT:    addsd %xmm0, %xmm1
@@ -109,7 +109,7 @@ define <2 x double> @test5_undef(<2 x do
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test5_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -122,12 +122,12 @@ define <2 x double> @test5_undef(<2 x do
 
 define <4 x float> @test6_undef(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test6_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test6_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
@@ -143,12 +143,12 @@ define <4 x float> @test6_undef(<4 x flo
 
 define <4 x float> @test7_undef(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test7_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test7_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %b, i32 0
@@ -164,7 +164,7 @@ define <4 x float> @test7_undef(<4 x flo
 
 define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test8_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; SSE-NEXT:    addss %xmm0, %xmm1
 ; SSE-NEXT:    movaps %xmm0, %xmm2
@@ -176,7 +176,7 @@ define <4 x float> @test8_undef(<4 x flo
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test8_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm1
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
@@ -197,12 +197,12 @@ define <4 x float> @test8_undef(<4 x flo
 
 define <4 x float> @test9_undef(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: test9_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test9_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
@@ -218,12 +218,12 @@ define <4 x float> @test9_undef(<4 x flo
 
 define <8 x float> @test10_undef(<8 x float> %a, <8 x float> %b) {
 ; SSE-LABEL: test10_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test10_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
@@ -239,7 +239,7 @@ define <8 x float> @test10_undef(<8 x fl
 
 define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
 ; SSE-LABEL: test11_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
@@ -248,7 +248,7 @@ define <8 x float> @test11_undef(<8 x fl
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test11_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %ymm0, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
@@ -264,12 +264,12 @@ define <8 x float> @test11_undef(<8 x fl
 
 define <8 x float> @test12_undef(<8 x float> %a, <8 x float> %b) {
 ; SSE-LABEL: test12_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test12_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %ymm0, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
@@ -285,12 +285,12 @@ define <8 x float> @test12_undef(<8 x fl
 
 define <8 x float> @test13_undef(<8 x float> %a, <8 x float> %b) {
 ; SSE-LABEL: test13_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    haddps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test13_undef:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -315,17 +315,17 @@ define <8 x float> @test13_undef(<8 x fl
 
 define <8 x i32> @test14_undef(<8 x i32> %a, <8 x i32> %b) {
 ; SSE-LABEL: test14_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    phaddd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test14_undef:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test14_undef:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
   %vecext = extractelement <8 x i32> %a, i32 0
@@ -344,7 +344,7 @@ define <8 x i32> @test14_undef(<8 x i32>
 ; integer horizontal adds instead of two scalar adds followed by vector inserts.
 define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
 ; SSE-LABEL: test15_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %xmm0, %eax
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; SSE-NEXT:    movd %xmm0, %ecx
@@ -359,7 +359,7 @@ define <8 x i32> @test15_undef(<8 x i32>
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test15_undef:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovd %xmm0, %eax
 ; AVX1-NEXT:    vpextrd $1, %xmm0, %ecx
 ; AVX1-NEXT:    addl %eax, %ecx
@@ -374,7 +374,7 @@ define <8 x i32> @test15_undef(<8 x i32>
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test15_undef:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vphaddd %ymm0, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
   %vecext = extractelement <8 x i32> %a, i32 0
@@ -390,17 +390,17 @@ define <8 x i32> @test15_undef(<8 x i32>
 
 define <8 x i32> @test16_undef(<8 x i32> %a, <8 x i32> %b) {
 ; SSE-LABEL: test16_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    phaddd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test16_undef:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test16_undef:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vphaddd %ymm0, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
   %vecext = extractelement <8 x i32> %a, i32 0
@@ -416,18 +416,18 @@ define <8 x i32> @test16_undef(<8 x i32>
 
 define <8 x i32> @test17_undef(<8 x i32> %a, <8 x i32> %b) {
 ; SSE-LABEL: test17_undef:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    phaddd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test17_undef:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test17_undef:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/haddsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub.ll Mon Dec  4 09:18:51 2017
@@ -4,12 +4,12 @@
 
 define <2 x double> @haddpd1(<2 x double> %x, <2 x double> %y) {
 ; SSE3-LABEL: haddpd1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddpd %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: haddpd1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 0, i32 2>
@@ -20,12 +20,12 @@ define <2 x double> @haddpd1(<2 x double
 
 define <2 x double> @haddpd2(<2 x double> %x, <2 x double> %y) {
 ; SSE3-LABEL: haddpd2:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddpd %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: haddpd2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 1, i32 2>
@@ -36,12 +36,12 @@ define <2 x double> @haddpd2(<2 x double
 
 define <2 x double> @haddpd3(<2 x double> %x) {
 ; SSE3-LABEL: haddpd3:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddpd %xmm0, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: haddpd3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
@@ -52,12 +52,12 @@ define <2 x double> @haddpd3(<2 x double
 
 define <4 x float> @haddps1(<4 x float> %x, <4 x float> %y) {
 ; SSE3-LABEL: haddps1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddps %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: haddps1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -68,12 +68,12 @@ define <4 x float> @haddps1(<4 x float>
 
 define <4 x float> @haddps2(<4 x float> %x, <4 x float> %y) {
 ; SSE3-LABEL: haddps2:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddps %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: haddps2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 1, i32 2, i32 5, i32 6>
@@ -84,12 +84,12 @@ define <4 x float> @haddps2(<4 x float>
 
 define <4 x float> @haddps3(<4 x float> %x) {
 ; SSE3-LABEL: haddps3:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddps %xmm0, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: haddps3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
@@ -100,12 +100,12 @@ define <4 x float> @haddps3(<4 x float>
 
 define <4 x float> @haddps4(<4 x float> %x) {
 ; SSE3-LABEL: haddps4:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddps %xmm0, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: haddps4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -116,12 +116,12 @@ define <4 x float> @haddps4(<4 x float>
 
 define <4 x float> @haddps5(<4 x float> %x) {
 ; SSE3-LABEL: haddps5:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddps %xmm0, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: haddps5:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 undef, i32 undef>
@@ -132,12 +132,12 @@ define <4 x float> @haddps5(<4 x float>
 
 define <4 x float> @haddps6(<4 x float> %x) {
 ; SSE3-LABEL: haddps6:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddps %xmm0, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: haddps6:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
@@ -148,12 +148,12 @@ define <4 x float> @haddps6(<4 x float>
 
 define <4 x float> @haddps7(<4 x float> %x) {
 ; SSE3-LABEL: haddps7:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddps %xmm0, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: haddps7:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 3, i32 undef, i32 undef>
@@ -164,12 +164,12 @@ define <4 x float> @haddps7(<4 x float>
 
 define <2 x double> @hsubpd1(<2 x double> %x, <2 x double> %y) {
 ; SSE3-LABEL: hsubpd1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    hsubpd %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hsubpd1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 0, i32 2>
@@ -180,12 +180,12 @@ define <2 x double> @hsubpd1(<2 x double
 
 define <2 x double> @hsubpd2(<2 x double> %x) {
 ; SSE3-LABEL: hsubpd2:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    hsubpd %xmm0, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hsubpd2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubpd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
@@ -196,12 +196,12 @@ define <2 x double> @hsubpd2(<2 x double
 
 define <4 x float> @hsubps1(<4 x float> %x, <4 x float> %y) {
 ; SSE3-LABEL: hsubps1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    hsubps %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hsubps1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -212,12 +212,12 @@ define <4 x float> @hsubps1(<4 x float>
 
 define <4 x float> @hsubps2(<4 x float> %x) {
 ; SSE3-LABEL: hsubps2:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    hsubps %xmm0, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hsubps2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
@@ -228,12 +228,12 @@ define <4 x float> @hsubps2(<4 x float>
 
 define <4 x float> @hsubps3(<4 x float> %x) {
 ; SSE3-LABEL: hsubps3:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    hsubps %xmm0, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hsubps3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
@@ -244,12 +244,12 @@ define <4 x float> @hsubps3(<4 x float>
 
 define <4 x float> @hsubps4(<4 x float> %x) {
 ; SSE3-LABEL: hsubps4:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    hsubps %xmm0, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: hsubps4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
@@ -260,13 +260,13 @@ define <4 x float> @hsubps4(<4 x float>
 
 define <8 x float> @vhaddps1(<8 x float> %x, <8 x float> %y) {
 ; SSE3-LABEL: vhaddps1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddps %xmm2, %xmm0
 ; SSE3-NEXT:    haddps %xmm3, %xmm1
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: vhaddps1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -277,13 +277,13 @@ define <8 x float> @vhaddps1(<8 x float>
 
 define <8 x float> @vhaddps2(<8 x float> %x, <8 x float> %y) {
 ; SSE3-LABEL: vhaddps2:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddps %xmm2, %xmm0
 ; SSE3-NEXT:    haddps %xmm3, %xmm1
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: vhaddps2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 1, i32 2, i32 9, i32 10, i32 5, i32 6, i32 13, i32 14>
@@ -294,13 +294,13 @@ define <8 x float> @vhaddps2(<8 x float>
 
 define <8 x float> @vhaddps3(<8 x float> %x) {
 ; SSE3-LABEL: vhaddps3:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddps %xmm0, %xmm0
 ; SSE3-NEXT:    haddps %xmm1, %xmm1
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: vhaddps3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %ymm0, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %a = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> <i32 undef, i32 2, i32 8, i32 10, i32 4, i32 6, i32 undef, i32 14>
@@ -311,13 +311,13 @@ define <8 x float> @vhaddps3(<8 x float>
 
 define <8 x float> @vhsubps1(<8 x float> %x, <8 x float> %y) {
 ; SSE3-LABEL: vhsubps1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    hsubps %xmm2, %xmm0
 ; SSE3-NEXT:    hsubps %xmm3, %xmm1
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: vhsubps1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -328,13 +328,13 @@ define <8 x float> @vhsubps1(<8 x float>
 
 define <8 x float> @vhsubps3(<8 x float> %x) {
 ; SSE3-LABEL: vhsubps3:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    hsubps %xmm0, %xmm0
 ; SSE3-NEXT:    hsubps %xmm1, %xmm1
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: vhsubps3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubps %ymm0, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %a = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> <i32 undef, i32 2, i32 8, i32 10, i32 4, i32 6, i32 undef, i32 14>
@@ -345,13 +345,13 @@ define <8 x float> @vhsubps3(<8 x float>
 
 define <4 x double> @vhaddpd1(<4 x double> %x, <4 x double> %y) {
 ; SSE3-LABEL: vhaddpd1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddpd %xmm2, %xmm0
 ; SSE3-NEXT:    haddpd %xmm3, %xmm1
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: vhaddpd1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -362,13 +362,13 @@ define <4 x double> @vhaddpd1(<4 x doubl
 
 define <4 x double> @vhsubpd1(<4 x double> %x, <4 x double> %y) {
 ; SSE3-LABEL: vhsubpd1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    hsubpd %xmm2, %xmm0
 ; SSE3-NEXT:    hsubpd %xmm3, %xmm1
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: vhsubpd1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %a = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -379,12 +379,12 @@ define <4 x double> @vhsubpd1(<4 x doubl
 
 define <2 x float> @haddps_v2f32(<4 x float> %v0) {
 ; SSE3-LABEL: haddps_v2f32:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    haddps %xmm0, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; AVX-LABEL: haddps_v2f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %v0.0 = extractelement <4 x float> %v0, i32 0

Modified: llvm/trunk/test/CodeGen/X86/half.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/half.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/half.ll (original)
+++ llvm/trunk/test/CodeGen/X86/half.ll Mon Dec  4 09:18:51 2017
@@ -10,19 +10,19 @@
 
 define void @test_load_store(half* %in, half* %out) #0 {
 ; BWON-LABEL: test_load_store:
-; BWON:       # BB#0:
+; BWON:       # %bb.0:
 ; BWON-NEXT:    movzwl (%rdi), %eax
 ; BWON-NEXT:    movw %ax, (%rsi)
 ; BWON-NEXT:    retq
 ;
 ; BWOFF-LABEL: test_load_store:
-; BWOFF:       # BB#0:
+; BWOFF:       # %bb.0:
 ; BWOFF-NEXT:    movw (%rdi), %ax
 ; BWOFF-NEXT:    movw %ax, (%rsi)
 ; BWOFF-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_load_store:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-I686-NEXT:    movw (%ecx), %cx
@@ -35,17 +35,17 @@ define void @test_load_store(half* %in,
 
 define i16 @test_bitcast_from_half(half* %addr) #0 {
 ; BWON-LABEL: test_bitcast_from_half:
-; BWON:       # BB#0:
+; BWON:       # %bb.0:
 ; BWON-NEXT:    movzwl (%rdi), %eax
 ; BWON-NEXT:    retq
 ;
 ; BWOFF-LABEL: test_bitcast_from_half:
-; BWOFF:       # BB#0:
+; BWOFF:       # %bb.0:
 ; BWOFF-NEXT:    movw (%rdi), %ax
 ; BWOFF-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_bitcast_from_half:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-I686-NEXT:    movw (%eax), %ax
 ; CHECK-I686-NEXT:    retl
@@ -56,12 +56,12 @@ define i16 @test_bitcast_from_half(half*
 
 define void @test_bitcast_to_half(half* %addr, i16 %in) #0 {
 ; CHECK-LABEL: test_bitcast_to_half:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movw %si, (%rdi)
 ; CHECK-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_bitcast_to_half:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    movw {{[0-9]+}}(%esp), %ax
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-I686-NEXT:    movw %ax, (%ecx)
@@ -73,19 +73,19 @@ define void @test_bitcast_to_half(half*
 
 define float @test_extend32(half* %addr) #0 {
 ; CHECK-LIBCALL-LABEL: test_extend32:
-; CHECK-LIBCALL:       # BB#0:
+; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    movzwl (%rdi), %edi
 ; CHECK-LIBCALL-NEXT:    jmp __gnu_h2f_ieee # TAILCALL
 ;
 ; BWON-F16C-LABEL: test_extend32:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    movswl (%rdi), %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm0
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_extend32:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    subl $12, %esp
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-I686-NEXT:    movzwl (%eax), %eax
@@ -100,7 +100,7 @@ define float @test_extend32(half* %addr)
 
 define double @test_extend64(half* %addr) #0 {
 ; CHECK-LIBCALL-LABEL: test_extend64:
-; CHECK-LIBCALL:       # BB#0:
+; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    pushq %rax
 ; CHECK-LIBCALL-NEXT:    movzwl (%rdi), %edi
 ; CHECK-LIBCALL-NEXT:    callq __gnu_h2f_ieee
@@ -109,7 +109,7 @@ define double @test_extend64(half* %addr
 ; CHECK-LIBCALL-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_extend64:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    movswl (%rdi), %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm0
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
@@ -117,7 +117,7 @@ define double @test_extend64(half* %addr
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_extend64:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    subl $12, %esp
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-I686-NEXT:    movzwl (%eax), %eax
@@ -132,7 +132,7 @@ define double @test_extend64(half* %addr
 
 define void @test_trunc32(float %in, half* %addr) #0 {
 ; CHECK-LIBCALL-LABEL: test_trunc32:
-; CHECK-LIBCALL:       # BB#0:
+; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    pushq %rbx
 ; CHECK-LIBCALL-NEXT:    movq %rdi, %rbx
 ; CHECK-LIBCALL-NEXT:    callq __gnu_f2h_ieee
@@ -141,14 +141,14 @@ define void @test_trunc32(float %in, hal
 ; CHECK-LIBCALL-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_trunc32:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    vmovd %xmm0, %eax
 ; BWON-F16C-NEXT:    movw %ax, (%rdi)
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_trunc32:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    pushl %esi
 ; CHECK-I686-NEXT:    subl $8, %esp
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -166,7 +166,7 @@ define void @test_trunc32(float %in, hal
 
 define void @test_trunc64(double %in, half* %addr) #0 {
 ; CHECK-LABEL: test_trunc64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    callq __truncdfhf2
@@ -175,7 +175,7 @@ define void @test_trunc64(double %in, ha
 ; CHECK-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_trunc64:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    pushl %esi
 ; CHECK-I686-NEXT:    subl $8, %esp
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -193,7 +193,7 @@ define void @test_trunc64(double %in, ha
 
 define i64 @test_fptosi_i64(half* %p) #0 {
 ; CHECK-LIBCALL-LABEL: test_fptosi_i64:
-; CHECK-LIBCALL:       # BB#0:
+; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    pushq %rax
 ; CHECK-LIBCALL-NEXT:    movzwl (%rdi), %edi
 ; CHECK-LIBCALL-NEXT:    callq __gnu_h2f_ieee
@@ -202,7 +202,7 @@ define i64 @test_fptosi_i64(half* %p) #0
 ; CHECK-LIBCALL-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_fptosi_i64:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    movswl (%rdi), %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm0
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
@@ -210,7 +210,7 @@ define i64 @test_fptosi_i64(half* %p) #0
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_fptosi_i64:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    subl $12, %esp
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-I686-NEXT:    movzwl (%eax), %eax
@@ -227,7 +227,7 @@ define i64 @test_fptosi_i64(half* %p) #0
 
 define void @test_sitofp_i64(i64 %a, half* %p) #0 {
 ; CHECK-LIBCALL-LABEL: test_sitofp_i64:
-; CHECK-LIBCALL:       # BB#0:
+; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    pushq %rbx
 ; CHECK-LIBCALL-NEXT:    movq %rsi, %rbx
 ; CHECK-LIBCALL-NEXT:    cvtsi2ssq %rdi, %xmm0
@@ -237,7 +237,7 @@ define void @test_sitofp_i64(i64 %a, hal
 ; CHECK-LIBCALL-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_sitofp_i64:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    vmovd %xmm0, %eax
@@ -245,7 +245,7 @@ define void @test_sitofp_i64(i64 %a, hal
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_sitofp_i64:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    pushl %esi
 ; CHECK-I686-NEXT:    subl $24, %esp
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -267,7 +267,7 @@ define void @test_sitofp_i64(i64 %a, hal
 
 define i64 @test_fptoui_i64(half* %p) #0 {
 ; CHECK-LIBCALL-LABEL: test_fptoui_i64:
-; CHECK-LIBCALL:       # BB#0:
+; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    pushq %rax
 ; CHECK-LIBCALL-NEXT:    movzwl (%rdi), %edi
 ; CHECK-LIBCALL-NEXT:    callq __gnu_h2f_ieee
@@ -284,7 +284,7 @@ define i64 @test_fptoui_i64(half* %p) #0
 ; CHECK-LIBCALL-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_fptoui_i64:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    movswl (%rdi), %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm0
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
@@ -299,7 +299,7 @@ define i64 @test_fptoui_i64(half* %p) #0
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_fptoui_i64:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    subl $12, %esp
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-I686-NEXT:    movzwl (%eax), %eax
@@ -316,12 +316,12 @@ define i64 @test_fptoui_i64(half* %p) #0
 
 define void @test_uitofp_i64(i64 %a, half* %p) #0 {
 ; CHECK-LIBCALL-LABEL: test_uitofp_i64:
-; CHECK-LIBCALL:       # BB#0:
+; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    pushq %rbx
 ; CHECK-LIBCALL-NEXT:    movq %rsi, %rbx
 ; CHECK-LIBCALL-NEXT:    testq %rdi, %rdi
 ; CHECK-LIBCALL-NEXT:    js .LBB10_1
-; CHECK-LIBCALL-NEXT:  # BB#2:
+; CHECK-LIBCALL-NEXT:  # %bb.2:
 ; CHECK-LIBCALL-NEXT:    cvtsi2ssq %rdi, %xmm0
 ; CHECK-LIBCALL-NEXT:    jmp .LBB10_3
 ; CHECK-LIBCALL-NEXT:  .LBB10_1:
@@ -338,10 +338,10 @@ define void @test_uitofp_i64(i64 %a, hal
 ; CHECK-LIBCALL-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_uitofp_i64:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    testq %rdi, %rdi
 ; BWON-F16C-NEXT:    js .LBB10_1
-; BWON-F16C-NEXT:  # BB#2:
+; BWON-F16C-NEXT:  # %bb.2:
 ; BWON-F16C-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    jmp .LBB10_3
 ; BWON-F16C-NEXT:  .LBB10_1:
@@ -358,7 +358,7 @@ define void @test_uitofp_i64(i64 %a, hal
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_uitofp_i64:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    pushl %esi
 ; CHECK-I686-NEXT:    subl $24, %esp
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -382,7 +382,7 @@ define void @test_uitofp_i64(i64 %a, hal
 
 define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
 ; CHECK-LIBCALL-LABEL: test_extend32_vec4:
-; CHECK-LIBCALL:       # BB#0:
+; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    pushq %rbx
 ; CHECK-LIBCALL-NEXT:    subq $48, %rsp
 ; CHECK-LIBCALL-NEXT:    movq %rdi, %rbx
@@ -408,7 +408,7 @@ define <4 x float> @test_extend32_vec4(<
 ; CHECK-LIBCALL-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_extend32_vec4:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    movswl 6(%rdi), %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm0
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
@@ -427,7 +427,7 @@ define <4 x float> @test_extend32_vec4(<
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_extend32_vec4:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    pushl %esi
 ; CHECK-I686-NEXT:    subl $56, %esp
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -468,7 +468,7 @@ define <4 x float> @test_extend32_vec4(<
 
 define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
 ; CHECK-LIBCALL-LABEL: test_extend64_vec4:
-; CHECK-LIBCALL:       # BB#0:
+; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    pushq %rbx
 ; CHECK-LIBCALL-NEXT:    subq $16, %rsp
 ; CHECK-LIBCALL-NEXT:    movq %rdi, %rbx
@@ -500,7 +500,7 @@ define <4 x double> @test_extend64_vec4(
 ; CHECK-LIBCALL-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_extend64_vec4:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    movswl (%rdi), %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm0
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
@@ -523,7 +523,7 @@ define <4 x double> @test_extend64_vec4(
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_extend64_vec4:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    pushl %esi
 ; CHECK-I686-NEXT:    subl $88, %esp
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -563,7 +563,7 @@ define <4 x double> @test_extend64_vec4(
 
 define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
 ; BWON-NOF16C-LABEL: test_trunc32_vec4:
-; BWON-NOF16C:       # BB#0:
+; BWON-NOF16C:       # %bb.0:
 ; BWON-NOF16C-NEXT:    pushq %rbp
 ; BWON-NOF16C-NEXT:    pushq %r15
 ; BWON-NOF16C-NEXT:    pushq %r14
@@ -596,7 +596,7 @@ define void @test_trunc32_vec4(<4 x floa
 ; BWON-NOF16C-NEXT:    retq
 ;
 ; BWOFF-LABEL: test_trunc32_vec4:
-; BWOFF:       # BB#0:
+; BWOFF:       # %bb.0:
 ; BWOFF-NEXT:    pushq %rbp
 ; BWOFF-NEXT:    pushq %r15
 ; BWOFF-NEXT:    pushq %r14
@@ -629,7 +629,7 @@ define void @test_trunc32_vec4(<4 x floa
 ; BWOFF-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_trunc32_vec4:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
 ; BWON-F16C-NEXT:    vmovd %xmm1, %eax
@@ -648,7 +648,7 @@ define void @test_trunc32_vec4(<4 x floa
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_trunc32_vec4:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    pushl %ebp
 ; CHECK-I686-NEXT:    pushl %ebx
 ; CHECK-I686-NEXT:    pushl %edi
@@ -691,7 +691,7 @@ define void @test_trunc32_vec4(<4 x floa
 
 define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) #0 {
 ; BWON-NOF16C-LABEL: test_trunc64_vec4:
-; BWON-NOF16C:       # BB#0:
+; BWON-NOF16C:       # %bb.0:
 ; BWON-NOF16C-NEXT:    pushq %rbp
 ; BWON-NOF16C-NEXT:    pushq %r15
 ; BWON-NOF16C-NEXT:    pushq %r14
@@ -724,7 +724,7 @@ define void @test_trunc64_vec4(<4 x doub
 ; BWON-NOF16C-NEXT:    retq
 ;
 ; BWOFF-LABEL: test_trunc64_vec4:
-; BWOFF:       # BB#0:
+; BWOFF:       # %bb.0:
 ; BWOFF-NEXT:    pushq %rbp
 ; BWOFF-NEXT:    pushq %r15
 ; BWOFF-NEXT:    pushq %r14
@@ -757,7 +757,7 @@ define void @test_trunc64_vec4(<4 x doub
 ; BWOFF-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_trunc64_vec4:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    pushq %rbp
 ; BWON-F16C-NEXT:    pushq %r15
 ; BWON-F16C-NEXT:    pushq %r14
@@ -795,7 +795,7 @@ define void @test_trunc64_vec4(<4 x doub
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_trunc64_vec4:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    pushl %ebp
 ; CHECK-I686-NEXT:    pushl %ebx
 ; CHECK-I686-NEXT:    pushl %edi
@@ -840,7 +840,7 @@ declare float @test_floatret();
 ; fp_round and the subsequent fptrunc from float to half.
 define half @test_f80trunc_nodagcombine() #0 {
 ; CHECK-LIBCALL-LABEL: test_f80trunc_nodagcombine:
-; CHECK-LIBCALL:       # BB#0:
+; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    pushq %rax
 ; CHECK-LIBCALL-NEXT:    callq test_floatret
 ; CHECK-LIBCALL-NEXT:    callq __gnu_f2h_ieee
@@ -850,7 +850,7 @@ define half @test_f80trunc_nodagcombine(
 ; CHECK-LIBCALL-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_f80trunc_nodagcombine:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    pushq %rax
 ; BWON-F16C-NEXT:    callq test_floatret
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
@@ -859,7 +859,7 @@ define half @test_f80trunc_nodagcombine(
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_f80trunc_nodagcombine:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    subl $12, %esp
 ; CHECK-I686-NEXT:    calll test_floatret
 ; CHECK-I686-NEXT:    fstps (%esp)
@@ -879,7 +879,7 @@ define half @test_f80trunc_nodagcombine(
 
 define float @test_sitofp_fadd_i32(i32 %a, half* %b) #0 {
 ; CHECK-LIBCALL-LABEL: test_sitofp_fadd_i32:
-; CHECK-LIBCALL:       # BB#0:
+; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    pushq %rbx
 ; CHECK-LIBCALL-NEXT:    subq $16, %rsp
 ; CHECK-LIBCALL-NEXT:    movl %edi, %ebx
@@ -896,7 +896,7 @@ define float @test_sitofp_fadd_i32(i32 %
 ; CHECK-LIBCALL-NEXT:    retq
 ;
 ; BWON-F16C-LABEL: test_sitofp_fadd_i32:
-; BWON-F16C:       # BB#0:
+; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    movswl (%rsi), %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm0
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
@@ -907,7 +907,7 @@ define float @test_sitofp_fadd_i32(i32 %
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_sitofp_fadd_i32:
-; CHECK-I686:       # BB#0:
+; CHECK-I686:       # %bb.0:
 ; CHECK-I686-NEXT:    subl $28, %esp
 ; CHECK-I686-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-I686-NEXT:    movzwl (%eax), %eax

Modified: llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll Mon Dec  4 09:18:51 2017
@@ -15,7 +15,7 @@
 
 define i64 @test_reduce_v2i64(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v2i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -38,7 +38,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v2i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; X86-SSE42-NEXT:    pcmpgtq %xmm2, %xmm0
@@ -48,7 +48,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v2i64:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; X86-AVX-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -57,7 +57,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v2i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -78,7 +78,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v2i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; X64-SSE42-NEXT:    pcmpgtq %xmm2, %xmm0
@@ -87,7 +87,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v2i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -95,7 +95,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v2i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; X64-AVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -103,7 +103,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v2i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX512-NEXT:    vpmaxsq %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovq %xmm0, %rax
@@ -117,7 +117,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 
 define i32 @test_reduce_v4i32(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v4i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; X86-SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
@@ -134,7 +134,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v4i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pmaxsd %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -143,7 +143,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v4i32:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -152,7 +152,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v4i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; X64-SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
@@ -169,7 +169,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v4i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pmaxsd %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -178,7 +178,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v4i32:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -197,7 +197,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 
 define i16 @test_reduce_v8i16(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    pmaxsw %xmm0, %xmm1
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -210,7 +210,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
 ; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
@@ -220,7 +220,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v8i16:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
 ; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vphminposuw %xmm0, %xmm0
@@ -230,7 +230,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    pmaxsw %xmm0, %xmm1
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -243,7 +243,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
 ; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
@@ -253,7 +253,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v8i16:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
 ; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vphminposuw %xmm0, %xmm0
@@ -276,7 +276,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 
 define i8 @test_reduce_v16i8(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; X86-SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
@@ -308,7 +308,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pmaxsb %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -324,7 +324,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v16i8:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -338,7 +338,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; X64-SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
@@ -370,7 +370,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pmaxsb %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -386,7 +386,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v16i8:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -420,7 +420,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 
 define i64 @test_reduce_v4i64(<4 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v4i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; X86-SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -458,7 +458,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v4i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
 ; X86-SSE42-NEXT:    pcmpgtq %xmm1, %xmm0
 ; X86-SSE42-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
@@ -471,7 +471,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v4i64:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
@@ -489,7 +489,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v4i64:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
 ; X86-AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -502,7 +502,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v4i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; X64-SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -538,7 +538,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v4i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
 ; X64-SSE42-NEXT:    pcmpgtq %xmm1, %xmm0
 ; X64-SSE42-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
@@ -550,7 +550,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v4i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
@@ -567,7 +567,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v4i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
 ; X64-AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -579,7 +579,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v4i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxsq %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -599,7 +599,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 
 define i32 @test_reduce_v8i32(<8 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; X86-SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
 ; X86-SSE2-NEXT:    pand %xmm2, %xmm0
@@ -621,7 +621,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxsd %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pmaxsd %xmm0, %xmm1
@@ -631,7 +631,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v8i32:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -643,7 +643,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v8i32:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -655,7 +655,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; X64-SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
 ; X64-SSE2-NEXT:    pand %xmm2, %xmm0
@@ -677,7 +677,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxsd %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pmaxsd %xmm0, %xmm1
@@ -687,7 +687,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v8i32:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -699,7 +699,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v8i32:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -711,7 +711,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v8i32:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -736,7 +736,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 
 define i16 @test_reduce_v16i16(<16 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pmaxsw %xmm1, %xmm0
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    pmaxsw %xmm0, %xmm1
@@ -750,7 +750,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxsw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
 ; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
@@ -761,7 +761,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v16i16:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
@@ -774,7 +774,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v16i16:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
@@ -787,7 +787,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pmaxsw %xmm1, %xmm0
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    pmaxsw %xmm0, %xmm1
@@ -801,7 +801,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxsw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
 ; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
@@ -812,7 +812,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v16i16:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
@@ -825,7 +825,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v16i16:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
@@ -838,7 +838,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v16i16:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
@@ -867,7 +867,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 
 define i8 @test_reduce_v32i8(<32 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v32i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; X86-SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
 ; X86-SSE2-NEXT:    pand %xmm2, %xmm0
@@ -904,7 +904,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v32i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxsb %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pmaxsb %xmm0, %xmm1
@@ -921,7 +921,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v32i8:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -938,7 +938,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v32i8:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -955,7 +955,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v32i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; X64-SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
 ; X64-SSE2-NEXT:    pand %xmm2, %xmm0
@@ -992,7 +992,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v32i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxsb %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pmaxsb %xmm0, %xmm1
@@ -1009,7 +1009,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v32i8:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1026,7 +1026,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v32i8:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1043,7 +1043,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v32i8:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1083,7 +1083,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 
 define i64 @test_reduce_v8i64(<8 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    subl $28, %esp
 ; X86-SSE2-NEXT:    .cfi_def_cfa_offset 32
 ; X86-SSE2-NEXT:    movdqa %xmm3, %xmm5
@@ -1158,7 +1158,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm4
 ; X86-SSE42-NEXT:    movdqa %xmm4, %xmm5
 ; X86-SSE42-NEXT:    pcmpgtq %xmm2, %xmm5
@@ -1179,7 +1179,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v8i64:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1203,7 +1203,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v8i64:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
 ; X86-AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; X86-AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1
@@ -1218,7 +1218,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
 ; X64-SSE2-NEXT:    movdqa %xmm3, %xmm5
 ; X64-SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -1284,7 +1284,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm4
 ; X64-SSE42-NEXT:    movdqa %xmm4, %xmm5
 ; X64-SSE42-NEXT:    pcmpgtq %xmm2, %xmm5
@@ -1304,7 +1304,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v8i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1327,7 +1327,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v8i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
 ; X64-AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; X64-AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1
@@ -1341,7 +1341,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v8i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1366,7 +1366,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 
 define i32 @test_reduce_v16i32(<16 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm4
 ; X86-SSE2-NEXT:    pcmpgtd %xmm2, %xmm4
 ; X86-SSE2-NEXT:    movdqa %xmm1, %xmm5
@@ -1398,7 +1398,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxsd %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pmaxsd %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pmaxsd %xmm1, %xmm0
@@ -1410,7 +1410,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v16i32:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
@@ -1425,7 +1425,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v16i32:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
@@ -1438,7 +1438,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm4
 ; X64-SSE2-NEXT:    pcmpgtd %xmm2, %xmm4
 ; X64-SSE2-NEXT:    movdqa %xmm1, %xmm5
@@ -1470,7 +1470,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxsd %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pmaxsd %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pmaxsd %xmm1, %xmm0
@@ -1482,7 +1482,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v16i32:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
@@ -1497,7 +1497,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v16i32:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
@@ -1510,7 +1510,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v16i32:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpmaxsd %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1540,7 +1540,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 
 define i16 @test_reduce_v32i16(<32 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v32i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pmaxsw %xmm3, %xmm1
 ; X86-SSE2-NEXT:    pmaxsw %xmm2, %xmm0
 ; X86-SSE2-NEXT:    pmaxsw %xmm1, %xmm0
@@ -1556,7 +1556,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v32i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxsw %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pmaxsw %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pmaxsw %xmm1, %xmm0
@@ -1569,7 +1569,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v32i16:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
@@ -1585,7 +1585,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v32i16:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
@@ -1599,7 +1599,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v32i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pmaxsw %xmm3, %xmm1
 ; X64-SSE2-NEXT:    pmaxsw %xmm2, %xmm0
 ; X64-SSE2-NEXT:    pmaxsw %xmm1, %xmm0
@@ -1615,7 +1615,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v32i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxsw %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pmaxsw %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pmaxsw %xmm1, %xmm0
@@ -1628,7 +1628,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v32i16:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
@@ -1644,7 +1644,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v32i16:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0
@@ -1658,7 +1658,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v32i16:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1692,7 +1692,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 
 define i8 @test_reduce_v64i8(<64 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v64i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm4
 ; X86-SSE2-NEXT:    pcmpgtb %xmm2, %xmm4
 ; X86-SSE2-NEXT:    movdqa %xmm1, %xmm5
@@ -1739,7 +1739,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v64i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxsb %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pmaxsb %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pmaxsb %xmm1, %xmm0
@@ -1758,7 +1758,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v64i8:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
@@ -1778,7 +1778,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v64i8:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
@@ -1796,7 +1796,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v64i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm4
 ; X64-SSE2-NEXT:    pcmpgtb %xmm2, %xmm4
 ; X64-SSE2-NEXT:    movdqa %xmm1, %xmm5
@@ -1843,7 +1843,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v64i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxsb %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pmaxsb %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pmaxsb %xmm1, %xmm0
@@ -1862,7 +1862,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v64i8:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
@@ -1882,7 +1882,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v64i8:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm0
@@ -1900,7 +1900,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v64i8:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpmaxsb %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll (original)
+++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll Mon Dec  4 09:18:51 2017
@@ -15,7 +15,7 @@
 
 define i64 @test_reduce_v2i64(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v2i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -38,7 +38,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v2i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; X86-SSE42-NEXT:    movdqa %xmm2, %xmm0
@@ -49,7 +49,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v2i64:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; X86-AVX-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -58,7 +58,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v2i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -79,7 +79,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v2i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; X64-SSE42-NEXT:    movdqa %xmm2, %xmm0
@@ -89,7 +89,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v2i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -97,7 +97,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v2i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX2-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; X64-AVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
@@ -105,7 +105,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v2i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX512-NEXT:    vpminsq %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovq %xmm0, %rax
@@ -119,7 +119,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 
 define i32 @test_reduce_v4i32(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v4i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; X86-SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
@@ -136,7 +136,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v4i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pminsd %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -145,7 +145,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v4i32:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -154,7 +154,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v4i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; X64-SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
@@ -171,7 +171,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v4i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pminsd %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -180,7 +180,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v4i32:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX-NEXT:    vpminsd %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -199,7 +199,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 
 define i16 @test_reduce_v8i16(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    pminsw %xmm0, %xmm1
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -212,7 +212,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
@@ -222,7 +222,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v8i16:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vphminposuw %xmm0, %xmm0
@@ -232,7 +232,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    pminsw %xmm0, %xmm1
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -245,7 +245,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
@@ -255,7 +255,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v8i16:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vphminposuw %xmm0, %xmm0
@@ -278,7 +278,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 
 define i8 @test_reduce_v16i8(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; X86-SSE2-NEXT:    pcmpgtb %xmm0, %xmm2
@@ -310,7 +310,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pminsb %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -326,7 +326,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v16i8:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -340,7 +340,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; X64-SSE2-NEXT:    pcmpgtb %xmm0, %xmm2
@@ -372,7 +372,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pminsb %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -388,7 +388,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v16i8:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -422,7 +422,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 
 define i64 @test_reduce_v4i64(<4 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v4i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -460,7 +460,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v4i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
 ; X86-SSE42-NEXT:    movdqa %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pcmpgtq %xmm2, %xmm0
@@ -474,7 +474,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v4i64:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
@@ -492,7 +492,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v4i64:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
 ; X86-AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -505,7 +505,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v4i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -541,7 +541,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v4i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
 ; X64-SSE42-NEXT:    movdqa %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pcmpgtq %xmm2, %xmm0
@@ -554,7 +554,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v4i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
@@ -571,7 +571,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v4i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
 ; X64-AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -583,7 +583,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v4i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminsq %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -603,7 +603,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 
 define i32 @test_reduce_v8i32(<8 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; X86-SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
 ; X86-SSE2-NEXT:    pand %xmm2, %xmm0
@@ -625,7 +625,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminsd %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pminsd %xmm0, %xmm1
@@ -635,7 +635,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v8i32:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -647,7 +647,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v8i32:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -659,7 +659,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; X64-SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
 ; X64-SSE2-NEXT:    pand %xmm2, %xmm0
@@ -681,7 +681,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminsd %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pminsd %xmm0, %xmm1
@@ -691,7 +691,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v8i32:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -703,7 +703,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v8i32:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -715,7 +715,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v8i32:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -740,7 +740,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 
 define i16 @test_reduce_v16i16(<16 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pminsw %xmm1, %xmm0
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    pminsw %xmm0, %xmm1
@@ -754,7 +754,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminsw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
@@ -765,7 +765,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v16i16:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
@@ -778,7 +778,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v16i16:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
@@ -791,7 +791,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pminsw %xmm1, %xmm0
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    pminsw %xmm0, %xmm1
@@ -805,7 +805,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminsw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
@@ -816,7 +816,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v16i16:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
@@ -829,7 +829,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v16i16:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
@@ -842,7 +842,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v16i16:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
@@ -871,7 +871,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 
 define i8 @test_reduce_v32i8(<32 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v32i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; X86-SSE2-NEXT:    pcmpgtb %xmm0, %xmm2
 ; X86-SSE2-NEXT:    pand %xmm2, %xmm0
@@ -908,7 +908,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v32i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminsb %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pminsb %xmm0, %xmm1
@@ -925,7 +925,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v32i8:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -942,7 +942,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v32i8:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -959,7 +959,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v32i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; X64-SSE2-NEXT:    pcmpgtb %xmm0, %xmm2
 ; X64-SSE2-NEXT:    pand %xmm2, %xmm0
@@ -996,7 +996,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v32i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminsb %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pminsb %xmm0, %xmm1
@@ -1013,7 +1013,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v32i8:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1030,7 +1030,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v32i8:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1047,7 +1047,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v32i8:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1087,7 +1087,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 
 define i64 @test_reduce_v8i64(<8 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    subl $28, %esp
 ; X86-SSE2-NEXT:    .cfi_def_cfa_offset 32
 ; X86-SSE2-NEXT:    movdqa %xmm2, %xmm6
@@ -1160,7 +1160,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm4
 ; X86-SSE42-NEXT:    movdqa %xmm3, %xmm5
 ; X86-SSE42-NEXT:    pcmpgtq %xmm1, %xmm5
@@ -1181,7 +1181,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v8i64:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1205,7 +1205,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v8i64:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
 ; X86-AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; X86-AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1
@@ -1220,7 +1220,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm9 = [2147483648,0,2147483648,0]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm5
 ; X64-SSE2-NEXT:    pxor %xmm9, %xmm5
@@ -1286,7 +1286,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm4
 ; X64-SSE42-NEXT:    movdqa %xmm3, %xmm5
 ; X64-SSE42-NEXT:    pcmpgtq %xmm1, %xmm5
@@ -1306,7 +1306,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v8i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1329,7 +1329,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v8i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
 ; X64-AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; X64-AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1
@@ -1343,7 +1343,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v8i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpminsq %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1368,7 +1368,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 
 define i32 @test_reduce_v16i32(<16 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; X86-SSE2-NEXT:    pcmpgtd %xmm1, %xmm4
 ; X86-SSE2-NEXT:    movdqa %xmm2, %xmm5
@@ -1400,7 +1400,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminsd %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pminsd %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pminsd %xmm1, %xmm0
@@ -1412,7 +1412,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v16i32:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpminsd %xmm2, %xmm3, %xmm2
@@ -1427,7 +1427,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v16i32:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
@@ -1440,7 +1440,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; X64-SSE2-NEXT:    pcmpgtd %xmm1, %xmm4
 ; X64-SSE2-NEXT:    movdqa %xmm2, %xmm5
@@ -1472,7 +1472,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminsd %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pminsd %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pminsd %xmm1, %xmm0
@@ -1484,7 +1484,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v16i32:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpminsd %xmm2, %xmm3, %xmm2
@@ -1499,7 +1499,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v16i32:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
@@ -1512,7 +1512,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v16i32:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpminsd %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1542,7 +1542,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 
 define i16 @test_reduce_v32i16(<32 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v32i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pminsw %xmm3, %xmm1
 ; X86-SSE2-NEXT:    pminsw %xmm2, %xmm0
 ; X86-SSE2-NEXT:    pminsw %xmm1, %xmm0
@@ -1558,7 +1558,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v32i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminsw %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pminsw %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pminsw %xmm1, %xmm0
@@ -1571,7 +1571,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v32i16:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm2
@@ -1587,7 +1587,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v32i16:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
@@ -1601,7 +1601,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v32i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pminsw %xmm3, %xmm1
 ; X64-SSE2-NEXT:    pminsw %xmm2, %xmm0
 ; X64-SSE2-NEXT:    pminsw %xmm1, %xmm0
@@ -1617,7 +1617,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v32i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminsw %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pminsw %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pminsw %xmm1, %xmm0
@@ -1630,7 +1630,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v32i16:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm2
@@ -1646,7 +1646,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v32i16:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminsw %xmm1, %xmm0, %xmm0
@@ -1660,7 +1660,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v32i16:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1694,7 +1694,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 
 define i8 @test_reduce_v64i8(<64 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v64i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; X86-SSE2-NEXT:    pcmpgtb %xmm1, %xmm4
 ; X86-SSE2-NEXT:    movdqa %xmm2, %xmm5
@@ -1741,7 +1741,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v64i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminsb %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pminsb %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pminsb %xmm1, %xmm0
@@ -1760,7 +1760,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v64i8:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm2
@@ -1780,7 +1780,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v64i8:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
@@ -1798,7 +1798,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v64i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; X64-SSE2-NEXT:    pcmpgtb %xmm1, %xmm4
 ; X64-SSE2-NEXT:    movdqa %xmm2, %xmm5
@@ -1845,7 +1845,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v64i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminsb %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pminsb %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pminsb %xmm1, %xmm0
@@ -1864,7 +1864,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v64i8:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm2
@@ -1884,7 +1884,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v64i8:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm0
@@ -1902,7 +1902,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v64i8:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpminsb %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll Mon Dec  4 09:18:51 2017
@@ -15,7 +15,7 @@
 
 define i64 @test_reduce_v2i64(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v2i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -38,7 +38,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v2i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
@@ -51,7 +51,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v2i64:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
 ; X86-AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm3
@@ -63,7 +63,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v2i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -84,7 +84,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v2i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
@@ -96,7 +96,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v2i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; X64-AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
@@ -107,7 +107,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v2i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; X64-AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm3
@@ -118,7 +118,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v2i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX512-NEXT:    vpmaxuq %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovq %xmm0, %rax
@@ -132,7 +132,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 
 define i32 @test_reduce_v4i32(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v4i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -155,7 +155,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v4i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pmaxud %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -164,7 +164,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v4i32:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -173,7 +173,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v4i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -196,7 +196,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v4i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pmaxud %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -205,7 +205,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v4i32:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -224,7 +224,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 
 define i16 @test_reduce_v8i16(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -258,7 +258,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
@@ -268,7 +268,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v8i16:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vphminposuw %xmm0, %xmm0
@@ -278,7 +278,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -312,7 +312,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
 ; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
@@ -322,7 +322,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v8i16:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vphminposuw %xmm0, %xmm0
@@ -345,7 +345,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 
 define i8 @test_reduce_v16i8(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    pmaxub %xmm0, %xmm1
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -361,7 +361,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pmaxub %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -377,7 +377,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v16i8:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -391,7 +391,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    pmaxub %xmm0, %xmm1
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -407,7 +407,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pmaxub %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -423,7 +423,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v16i8:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -457,7 +457,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 
 define i64 @test_reduce_v4i64(<4 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v4i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X86-SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -495,7 +495,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v4i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
 ; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
 ; X86-SSE42-NEXT:    movdqa %xmm1, %xmm4
@@ -514,7 +514,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v4i64:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
 ; X86-AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
@@ -538,7 +538,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v4i64:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
 ; X86-AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3
@@ -556,7 +556,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v4i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -592,7 +592,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v4i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
 ; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; X64-SSE42-NEXT:    movdqa %xmm1, %xmm4
@@ -610,7 +610,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v4i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; X64-AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
@@ -633,7 +633,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v4i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
 ; X64-AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3
@@ -650,7 +650,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v4i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxuq %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -670,7 +670,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 
 define i32 @test_reduce_v8i32(<8 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X86-SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -701,7 +701,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxud %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pmaxud %xmm0, %xmm1
@@ -711,7 +711,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v8i32:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -723,7 +723,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v8i32:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -735,7 +735,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -766,7 +766,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxud %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pmaxud %xmm0, %xmm1
@@ -776,7 +776,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v8i32:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -788,7 +788,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v8i32:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -800,7 +800,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v8i32:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -825,7 +825,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 
 define i16 @test_reduce_v16i16(<16 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X86-SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -867,7 +867,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X86-SSE42-NEXT:    pxor %xmm1, %xmm0
@@ -878,7 +878,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v16i16:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -891,7 +891,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v16i16:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -904,7 +904,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X64-SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -946,7 +946,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE42-NEXT:    pxor %xmm1, %xmm0
@@ -957,7 +957,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v16i16:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -970,7 +970,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v16i16:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -983,7 +983,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v16i16:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
@@ -1012,7 +1012,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 
 define i8 @test_reduce_v32i8(<32 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v32i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pmaxub %xmm1, %xmm0
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    pmaxub %xmm0, %xmm1
@@ -1029,7 +1029,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v32i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxub %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pmaxub %xmm0, %xmm1
@@ -1046,7 +1046,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v32i8:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1063,7 +1063,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v32i8:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1080,7 +1080,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v32i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pmaxub %xmm1, %xmm0
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    pmaxub %xmm0, %xmm1
@@ -1097,7 +1097,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v32i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxub %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pmaxub %xmm0, %xmm1
@@ -1114,7 +1114,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v32i8:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1131,7 +1131,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v32i8:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1148,7 +1148,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v32i8:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1188,7 +1188,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 
 define i64 @test_reduce_v8i64(<8 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    subl $28, %esp
 ; X86-SSE2-NEXT:    .cfi_def_cfa_offset 32
 ; X86-SSE2-NEXT:    movdqa %xmm3, %xmm5
@@ -1263,7 +1263,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm4
 ; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm6 = [0,2147483648,0,2147483648]
 ; X86-SSE42-NEXT:    movdqa %xmm3, %xmm0
@@ -1296,7 +1296,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v8i64:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
 ; X86-AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -1330,7 +1330,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v8i64:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
 ; X86-AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm3
 ; X86-AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm4
@@ -1352,7 +1352,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm3, %xmm5
 ; X64-SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -1418,7 +1418,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm4
 ; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm6 = [9223372036854775808,9223372036854775808]
 ; X64-SSE42-NEXT:    movdqa %xmm3, %xmm0
@@ -1450,7 +1450,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v8i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; X64-AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -1483,7 +1483,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v8i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
 ; X64-AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm3
 ; X64-AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm4
@@ -1504,7 +1504,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v8i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1529,7 +1529,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 
 define i32 @test_reduce_v16i32(<16 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
 ; X86-SSE2-NEXT:    movdqa %xmm3, %xmm5
 ; X86-SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -1576,7 +1576,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxud %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pmaxud %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pmaxud %xmm1, %xmm0
@@ -1588,7 +1588,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v16i32:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpmaxud %xmm2, %xmm3, %xmm2
@@ -1603,7 +1603,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v16i32:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
@@ -1616,7 +1616,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm3, %xmm5
 ; X64-SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -1663,7 +1663,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxud %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pmaxud %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pmaxud %xmm1, %xmm0
@@ -1675,7 +1675,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v16i32:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpmaxud %xmm2, %xmm3, %xmm2
@@ -1690,7 +1690,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v16i32:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm0
@@ -1703,7 +1703,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v16i32:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpmaxud %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1733,7 +1733,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 
 define i16 @test_reduce_v32i16(<32 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v32i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X86-SSE2-NEXT:    movdqa %xmm3, %xmm5
 ; X86-SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -1791,7 +1791,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v32i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxuw %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pmaxuw %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pmaxuw %xmm1, %xmm0
@@ -1804,7 +1804,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v32i16:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
@@ -1820,7 +1820,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v32i16:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
@@ -1834,7 +1834,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v32i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X64-SSE2-NEXT:    movdqa %xmm3, %xmm5
 ; X64-SSE2-NEXT:    pxor %xmm4, %xmm5
@@ -1892,7 +1892,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v32i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxuw %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pmaxuw %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pmaxuw %xmm1, %xmm0
@@ -1905,7 +1905,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v32i16:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
@@ -1921,7 +1921,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v32i16:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0
@@ -1935,7 +1935,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v32i16:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1969,7 +1969,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 
 define i8 @test_reduce_v64i8(<64 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v64i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pmaxub %xmm3, %xmm1
 ; X86-SSE2-NEXT:    pmaxub %xmm2, %xmm0
 ; X86-SSE2-NEXT:    pmaxub %xmm1, %xmm0
@@ -1988,7 +1988,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v64i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pmaxub %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pmaxub %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pmaxub %xmm1, %xmm0
@@ -2007,7 +2007,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v64i8:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpmaxub %xmm2, %xmm3, %xmm2
@@ -2027,7 +2027,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v64i8:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
@@ -2045,7 +2045,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v64i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pmaxub %xmm3, %xmm1
 ; X64-SSE2-NEXT:    pmaxub %xmm2, %xmm0
 ; X64-SSE2-NEXT:    pmaxub %xmm1, %xmm0
@@ -2064,7 +2064,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v64i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pmaxub %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pmaxub %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pmaxub %xmm1, %xmm0
@@ -2083,7 +2083,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v64i8:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpmaxub %xmm2, %xmm3, %xmm2
@@ -2103,7 +2103,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v64i8:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm0
@@ -2121,7 +2121,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v64i8:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpmaxub %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll (original)
+++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll Mon Dec  4 09:18:51 2017
@@ -15,7 +15,7 @@
 
 define i64 @test_reduce_v2i64(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v2i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -38,7 +38,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v2i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm0 = [0,2147483648,0,2147483648]
@@ -52,7 +52,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v2i64:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
 ; X86-AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm3
@@ -64,7 +64,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v2i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -85,7 +85,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v2i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
@@ -98,7 +98,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v2i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; X64-AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
@@ -109,7 +109,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v2i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; X64-AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm3
@@ -120,7 +120,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v2i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX512-NEXT:    vpminuq %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vmovq %xmm0, %rax
@@ -134,7 +134,7 @@ define i64 @test_reduce_v2i64(<2 x i64>
 
 define i32 @test_reduce_v4i32(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v4i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -157,7 +157,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v4i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pminud %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -166,7 +166,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v4i32:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -175,7 +175,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v4i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -198,7 +198,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v4i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pminud %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -207,7 +207,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v4i32:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -226,7 +226,7 @@ define i32 @test_reduce_v4i32(<4 x i32>
 
 define i16 @test_reduce_v8i16(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -260,21 +260,21 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
 ; X86-SSE42-NEXT:    movd %xmm0, %eax
 ; X86-SSE42-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v8i16:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vphminposuw %xmm0, %xmm0
 ; X86-AVX-NEXT:    vmovd %xmm0, %eax
 ; X86-AVX-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
@@ -308,14 +308,14 @@ define i16 @test_reduce_v8i16(<8 x i16>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
 ; X64-SSE42-NEXT:    movd %xmm0, %eax
 ; X64-SSE42-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v8i16:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vphminposuw %xmm0, %xmm0
 ; X64-AVX-NEXT:    vmovd %xmm0, %eax
 ; X64-AVX-NEXT:    ## kill: %ax<def> %ax<kill> %eax<kill>
@@ -335,7 +335,7 @@ define i16 @test_reduce_v8i16(<8 x i16>
 
 define i8 @test_reduce_v16i8(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    pminub %xmm0, %xmm1
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -351,7 +351,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pminub %xmm0, %xmm1
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -367,7 +367,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_reduce_v16i8:
-; X86-AVX:       ## BB#0:
+; X86-AVX:       ## %bb.0:
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -381,7 +381,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    pminub %xmm0, %xmm1
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -397,7 +397,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pminub %xmm0, %xmm1
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
@@ -413,7 +413,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_reduce_v16i8:
-; X64-AVX:       ## BB#0:
+; X64-AVX:       ## %bb.0:
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -447,7 +447,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %
 
 define i64 @test_reduce_v4i64(<4 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v4i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -485,7 +485,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v4i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm2
 ; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
 ; X86-SSE42-NEXT:    movdqa %xmm2, %xmm4
@@ -506,7 +506,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v4i64:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
 ; X86-AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
@@ -530,7 +530,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v4i64:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
 ; X86-AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3
@@ -548,7 +548,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v4i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -584,7 +584,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v4i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm2
 ; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; X64-SSE42-NEXT:    movdqa %xmm2, %xmm4
@@ -604,7 +604,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v4i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; X64-AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
@@ -627,7 +627,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v4i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
 ; X64-AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3
@@ -644,7 +644,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v4i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminuq %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -664,7 +664,7 @@ define i64 @test_reduce_v4i64(<4 x i64>
 
 define i32 @test_reduce_v8i32(<8 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -695,7 +695,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminud %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pminud %xmm0, %xmm1
@@ -705,7 +705,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v8i32:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -717,7 +717,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v8i32:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -729,7 +729,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -760,7 +760,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminud %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pminud %xmm0, %xmm1
@@ -770,7 +770,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v8i32:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -782,7 +782,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v8i32:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -794,7 +794,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v8i32:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -819,7 +819,7 @@ define i32 @test_reduce_v8i32(<8 x i32>
 
 define i16 @test_reduce_v16i16(<16 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; X86-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -861,7 +861,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminuw %xmm1, %xmm0
 ; X86-SSE42-NEXT:    phminposuw %xmm0, %xmm0
 ; X86-SSE42-NEXT:    movd %xmm0, %eax
@@ -869,7 +869,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v16i16:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
@@ -879,7 +879,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v16i16:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
@@ -889,7 +889,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; X64-SSE2-NEXT:    pxor %xmm2, %xmm3
@@ -931,7 +931,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminuw %xmm1, %xmm0
 ; X64-SSE42-NEXT:    phminposuw %xmm0, %xmm0
 ; X64-SSE42-NEXT:    movd %xmm0, %eax
@@ -939,7 +939,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v16i16:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vphminposuw %xmm0, %xmm0
@@ -949,7 +949,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v16i16:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vphminposuw %xmm0, %xmm0
@@ -959,7 +959,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v16i16:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
 ; X64-AVX512-NEXT:    vphminposuw %xmm0, %xmm0
@@ -985,7 +985,7 @@ define i16 @test_reduce_v16i16(<16 x i16
 
 define i8 @test_reduce_v32i8(<32 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v32i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pminub %xmm1, %xmm0
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE2-NEXT:    pminub %xmm0, %xmm1
@@ -1002,7 +1002,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v32i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminub %xmm1, %xmm0
 ; X86-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-SSE42-NEXT:    pminub %xmm0, %xmm1
@@ -1019,7 +1019,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v32i8:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1036,7 +1036,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v32i8:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1053,7 +1053,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v32i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pminub %xmm1, %xmm0
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE2-NEXT:    pminub %xmm0, %xmm1
@@ -1070,7 +1070,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v32i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminub %xmm1, %xmm0
 ; X64-SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-SSE42-NEXT:    pminub %xmm0, %xmm1
@@ -1087,7 +1087,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v32i8:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1104,7 +1104,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v32i8:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1121,7 +1121,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v32i8:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX512-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -1161,7 +1161,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %
 
 define i64 @test_reduce_v8i64(<8 x i64> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v8i64:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    subl $28, %esp
 ; X86-SSE2-NEXT:    .cfi_def_cfa_offset 32
 ; X86-SSE2-NEXT:    movdqa %xmm2, %xmm6
@@ -1234,7 +1234,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v8i64:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    movdqa %xmm0, %xmm5
 ; X86-SSE42-NEXT:    movdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648]
 ; X86-SSE42-NEXT:    pxor %xmm4, %xmm0
@@ -1267,7 +1267,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v8i64:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
 ; X86-AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -1301,7 +1301,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v8i64:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
 ; X86-AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3
 ; X86-AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm4
@@ -1323,7 +1323,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v8i64:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm5
 ; X64-SSE2-NEXT:    pxor %xmm9, %xmm5
@@ -1389,7 +1389,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v8i64:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    movdqa %xmm0, %xmm5
 ; X64-SSE42-NEXT:    movdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
 ; X64-SSE42-NEXT:    pxor %xmm4, %xmm0
@@ -1421,7 +1421,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v8i64:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; X64-AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
@@ -1454,7 +1454,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v8i64:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
 ; X64-AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm3
 ; X64-AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm4
@@ -1475,7 +1475,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v8i64:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpminuq %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1500,7 +1500,7 @@ define i64 @test_reduce_v8i64(<8 x i64>
 
 define i32 @test_reduce_v16i32(<16 x i32> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v16i32:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm6
 ; X86-SSE2-NEXT:    pxor %xmm4, %xmm6
@@ -1547,7 +1547,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v16i32:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminud %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pminud %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pminud %xmm1, %xmm0
@@ -1559,7 +1559,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v16i32:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpminud %xmm2, %xmm3, %xmm2
@@ -1574,7 +1574,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v16i32:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0
@@ -1587,7 +1587,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v16i32:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm6
 ; X64-SSE2-NEXT:    pxor %xmm4, %xmm6
@@ -1634,7 +1634,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v16i32:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminud %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pminud %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pminud %xmm1, %xmm0
@@ -1646,7 +1646,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v16i32:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpminud %xmm2, %xmm3, %xmm2
@@ -1661,7 +1661,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v16i32:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm0
@@ -1674,7 +1674,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v16i32:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpminud %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1704,7 +1704,7 @@ define i32 @test_reduce_v16i32(<16 x i32
 
 define i16 @test_reduce_v32i16(<32 x i16> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v32i16:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X86-SSE2-NEXT:    movdqa %xmm0, %xmm6
 ; X86-SSE2-NEXT:    pxor %xmm4, %xmm6
@@ -1762,7 +1762,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v32i16:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminuw %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pminuw %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pminuw %xmm1, %xmm0
@@ -1772,7 +1772,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v32i16:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpminuw %xmm2, %xmm3, %xmm2
@@ -1785,7 +1785,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v32i16:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
@@ -1796,7 +1796,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v32i16:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; X64-SSE2-NEXT:    movdqa %xmm0, %xmm6
 ; X64-SSE2-NEXT:    pxor %xmm4, %xmm6
@@ -1854,7 +1854,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v32i16:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminuw %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pminuw %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pminuw %xmm1, %xmm0
@@ -1864,7 +1864,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v32i16:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpminuw %xmm2, %xmm3, %xmm2
@@ -1877,7 +1877,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v32i16:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
@@ -1888,7 +1888,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v32i16:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -1919,7 +1919,7 @@ define i16 @test_reduce_v32i16(<32 x i16
 
 define i8 @test_reduce_v64i8(<64 x i8> %a0) {
 ; X86-SSE2-LABEL: test_reduce_v64i8:
-; X86-SSE2:       ## BB#0:
+; X86-SSE2:       ## %bb.0:
 ; X86-SSE2-NEXT:    pminub %xmm3, %xmm1
 ; X86-SSE2-NEXT:    pminub %xmm2, %xmm0
 ; X86-SSE2-NEXT:    pminub %xmm1, %xmm0
@@ -1938,7 +1938,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-SSE42-LABEL: test_reduce_v64i8:
-; X86-SSE42:       ## BB#0:
+; X86-SSE42:       ## %bb.0:
 ; X86-SSE42-NEXT:    pminub %xmm3, %xmm1
 ; X86-SSE42-NEXT:    pminub %xmm2, %xmm0
 ; X86-SSE42-NEXT:    pminub %xmm1, %xmm0
@@ -1957,7 +1957,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-SSE42-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_reduce_v64i8:
-; X86-AVX1:       ## BB#0:
+; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X86-AVX1-NEXT:    vpminub %xmm2, %xmm3, %xmm2
@@ -1977,7 +1977,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: test_reduce_v64i8:
-; X86-AVX2:       ## BB#0:
+; X86-AVX2:       ## %bb.0:
 ; X86-AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; X86-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X86-AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0
@@ -1995,7 +1995,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_reduce_v64i8:
-; X64-SSE2:       ## BB#0:
+; X64-SSE2:       ## %bb.0:
 ; X64-SSE2-NEXT:    pminub %xmm3, %xmm1
 ; X64-SSE2-NEXT:    pminub %xmm2, %xmm0
 ; X64-SSE2-NEXT:    pminub %xmm1, %xmm0
@@ -2014,7 +2014,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-SSE42-LABEL: test_reduce_v64i8:
-; X64-SSE42:       ## BB#0:
+; X64-SSE42:       ## %bb.0:
 ; X64-SSE42-NEXT:    pminub %xmm3, %xmm1
 ; X64-SSE42-NEXT:    pminub %xmm2, %xmm0
 ; X64-SSE42-NEXT:    pminub %xmm1, %xmm0
@@ -2033,7 +2033,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-SSE42-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_reduce_v64i8:
-; X64-AVX1:       ## BB#0:
+; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; X64-AVX1-NEXT:    vpminub %xmm2, %xmm3, %xmm2
@@ -2053,7 +2053,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: test_reduce_v64i8:
-; X64-AVX2:       ## BB#0:
+; X64-AVX2:       ## %bb.0:
 ; X64-AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm0
@@ -2071,7 +2071,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: test_reduce_v64i8:
-; X64-AVX512:       ## BB#0:
+; X64-AVX512:       ## %bb.0:
 ; X64-AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; X64-AVX512-NEXT:    vpminub %zmm1, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/horizontal-shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/horizontal-shuffle.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/horizontal-shuffle.ll (original)
+++ llvm/trunk/test/CodeGen/X86/horizontal-shuffle.ll Mon Dec  4 09:18:51 2017
@@ -8,12 +8,12 @@
 
 define <4 x float> @test_unpackl_fhadd_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) {
 ; X32-LABEL: test_unpackl_fhadd_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vhaddps %xmm2, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_fhadd_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vhaddps %xmm2, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1)
@@ -24,12 +24,12 @@ define <4 x float> @test_unpackl_fhadd_1
 
 define <2 x double> @test_unpackh_fhadd_128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) {
 ; X32-LABEL: test_unpackh_fhadd_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vhaddpd %xmm3, %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_fhadd_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vhaddpd %xmm3, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1)
@@ -40,12 +40,12 @@ define <2 x double> @test_unpackh_fhadd_
 
 define <2 x double> @test_unpackl_fhsub_128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) {
 ; X32-LABEL: test_unpackl_fhsub_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vhsubpd %xmm2, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_fhsub_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vhsubpd %xmm2, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1)
@@ -56,12 +56,12 @@ define <2 x double> @test_unpackl_fhsub_
 
 define <4 x float> @test_unpackh_fhsub_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) {
 ; X32-LABEL: test_unpackh_fhsub_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vhsubps %xmm3, %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_fhsub_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vhsubps %xmm3, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1)
@@ -72,12 +72,12 @@ define <4 x float> @test_unpackh_fhsub_1
 
 define <8 x i16> @test_unpackl_hadd_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
 ; X32-LABEL: test_unpackl_hadd_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vphaddw %xmm2, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_hadd_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vphaddw %xmm2, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1)
@@ -88,12 +88,12 @@ define <8 x i16> @test_unpackl_hadd_128(
 
 define <4 x i32> @test_unpackh_hadd_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) {
 ; X32-LABEL: test_unpackh_hadd_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vphaddd %xmm3, %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_hadd_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vphaddd %xmm3, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1)
@@ -104,12 +104,12 @@ define <4 x i32> @test_unpackh_hadd_128(
 
 define <4 x i32> @test_unpackl_hsub_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) {
 ; X32-LABEL: test_unpackl_hsub_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vphsubd %xmm2, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_hsub_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vphsubd %xmm2, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1)
@@ -120,12 +120,12 @@ define <4 x i32> @test_unpackl_hsub_128(
 
 define <8 x i16> @test_unpackh_hsub_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
 ; X32-LABEL: test_unpackh_hsub_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vphsubw %xmm3, %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_hsub_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vphsubw %xmm3, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1)
@@ -136,12 +136,12 @@ define <8 x i16> @test_unpackh_hsub_128(
 
 define <16 x i8> @test_unpackl_packss_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
 ; X32-LABEL: test_unpackl_packss_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_packss_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1)
@@ -152,12 +152,12 @@ define <16 x i8> @test_unpackl_packss_12
 
 define <8 x i16> @test_unpackh_packss_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) {
 ; X32-LABEL: test_unpackh_packss_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vpackssdw %xmm3, %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_packss_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpackssdw %xmm3, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1)
@@ -168,12 +168,12 @@ define <8 x i16> @test_unpackh_packss_12
 
 define <8 x i16> @test_unpackl_packus_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) {
 ; X32-LABEL: test_unpackl_packus_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_packus_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
@@ -184,12 +184,12 @@ define <8 x i16> @test_unpackl_packus_12
 
 define <16 x i8> @test_unpackh_packus_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
 ; X32-LABEL: test_unpackh_packus_128:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vpackuswb %xmm3, %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_packus_128:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpackuswb %xmm3, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
@@ -204,12 +204,12 @@ define <16 x i8> @test_unpackh_packus_12
 
 define <8 x float> @test_unpackl_fhadd_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> %a3) {
 ; X32-LABEL: test_unpackl_fhadd_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vhaddps %ymm2, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_fhadd_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vhaddps %ymm2, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -220,12 +220,12 @@ define <8 x float> @test_unpackl_fhadd_2
 
 define <4 x double> @test_unpackh_fhadd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> %a3) {
 ; X32-LABEL: test_unpackh_fhadd_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vhaddpd %ymm3, %ymm1, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_fhadd_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vhaddpd %ymm3, %ymm1, %ymm0
 ; X64-NEXT:    retq
   %1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -236,12 +236,12 @@ define <4 x double> @test_unpackh_fhadd_
 
 define <4 x double> @test_unpackl_fhsub_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> %a3) {
 ; X32-LABEL: test_unpackl_fhsub_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vhsubpd %ymm2, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_fhsub_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vhsubpd %ymm2, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -252,12 +252,12 @@ define <4 x double> @test_unpackl_fhsub_
 
 define <8 x float> @test_unpackh_fhsub_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> %a3) {
 ; X32-LABEL: test_unpackh_fhsub_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vhsubps %ymm3, %ymm1, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_fhsub_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vhsubps %ymm3, %ymm1, %ymm0
 ; X64-NEXT:    retq
   %1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -268,12 +268,12 @@ define <8 x float> @test_unpackh_fhsub_2
 
 define <16 x i16> @test_unpackl_hadd_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) {
 ; X32-LABEL: test_unpackl_hadd_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vphaddw %ymm2, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_hadd_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vphaddw %ymm2, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -284,12 +284,12 @@ define <16 x i16> @test_unpackl_hadd_256
 
 define <8 x i32> @test_unpackh_hadd_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) {
 ; X32-LABEL: test_unpackh_hadd_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vphaddd %ymm3, %ymm1, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_hadd_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vphaddd %ymm3, %ymm1, %ymm0
 ; X64-NEXT:    retq
   %1 = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -300,12 +300,12 @@ define <8 x i32> @test_unpackh_hadd_256(
 
 define <8 x i32> @test_unpackl_hsub_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) {
 ; X32-LABEL: test_unpackl_hsub_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vphsubd %ymm2, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_hsub_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vphsubd %ymm2, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -316,12 +316,12 @@ define <8 x i32> @test_unpackl_hsub_256(
 
 define <16 x i16> @test_unpackh_hsub_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) {
 ; X32-LABEL: test_unpackh_hsub_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vphsubw %ymm3, %ymm1, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_hsub_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vphsubw %ymm3, %ymm1, %ymm0
 ; X64-NEXT:    retq
   %1 = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -332,12 +332,12 @@ define <16 x i16> @test_unpackh_hsub_256
 
 define <32 x i8> @test_unpackl_packss_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) {
 ; X32-LABEL: test_unpackl_packss_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vpacksswb %ymm2, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_packss_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpacksswb %ymm2, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1)
@@ -348,12 +348,12 @@ define <32 x i8> @test_unpackl_packss_25
 
 define <16 x i16> @test_unpackh_packss_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) {
 ; X32-LABEL: test_unpackh_packss_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vpackssdw %ymm3, %ymm1, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_packss_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpackssdw %ymm3, %ymm1, %ymm0
 ; X64-NEXT:    retq
   %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1)
@@ -364,12 +364,12 @@ define <16 x i16> @test_unpackh_packss_2
 
 define <16 x i16> @test_unpackl_packus_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) {
 ; X32-LABEL: test_unpackl_packus_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vpackusdw %ymm2, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackl_packus_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpackusdw %ymm2, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1)
@@ -380,12 +380,12 @@ define <16 x i16> @test_unpackl_packus_2
 
 define <32 x i8> @test_unpackh_packus_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) {
 ; X32-LABEL: test_unpackh_packus_256:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    vpacksswb %ymm3, %ymm1, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_unpackh_packus_256:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vpacksswb %ymm3, %ymm1, %ymm0
 ; X64-NEXT:    retq
   %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1)

Modified: llvm/trunk/test/CodeGen/X86/i256-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i256-add.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i256-add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i256-add.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define void @add(i256* %p, i256* %q) nounwind {
 ; X32-LABEL: add:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    pushl %edi
@@ -50,7 +50,7 @@ define void @add(i256* %p, i256* %q) nou
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: add:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq 16(%rdi), %rax
 ; X64-NEXT:    movq (%rdi), %rcx
 ; X64-NEXT:    movq 8(%rdi), %rdx
@@ -71,7 +71,7 @@ define void @add(i256* %p, i256* %q) nou
 }
 define void @sub(i256* %p, i256* %q) nounwind {
 ; X32-LABEL: sub:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    pushl %edi
@@ -114,7 +114,7 @@ define void @sub(i256* %p, i256* %q) nou
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: sub:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq 16(%rdi), %rax
 ; X64-NEXT:    movq (%rdi), %rcx
 ; X64-NEXT:    movq 8(%rdi), %rdx

Modified: llvm/trunk/test/CodeGen/X86/i64-mem-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i64-mem-copy.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i64-mem-copy.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i64-mem-copy.ll Mon Dec  4 09:18:51 2017
@@ -7,13 +7,13 @@
 
 define void @foo(i64* %x, i64* %y) {
 ; X64-LABEL: foo:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rsi), %rax
 ; X64-NEXT:    movq %rax, (%rdi)
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: foo:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
@@ -29,13 +29,13 @@ define void @foo(i64* %x, i64* %y) {
 
 define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, i64* %i) {
 ; X64-LABEL: store_i64_from_vector:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    paddw %xmm1, %xmm0
 ; X64-NEXT:    movq %xmm0, (%rdi)
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: store_i64_from_vector:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    paddw %xmm1, %xmm0
 ; X32-NEXT:    movq %xmm0, (%eax)
@@ -49,7 +49,7 @@ define void @store_i64_from_vector(<8 x
 
 define void @store_i64_from_vector256(<16 x i16> %x, <16 x i16> %y, i64* %i) {
 ; X32AVX-LABEL: store_i64_from_vector256:
-; X32AVX:       # BB#0:
+; X32AVX:       # %bb.0:
 ; X32AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32AVX-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
 ; X32AVX-NEXT:    vextracti128 $1, %ymm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/i64-to-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i64-to-float.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i64-to-float.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i64-to-float.ll Mon Dec  4 09:18:51 2017
@@ -8,27 +8,27 @@
 
 define <2 x double> @mask_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
 ; X32-SSE-LABEL: mask_sitofp_2i64_2f64:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
 ; X32-SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX-LABEL: mask_sitofp_2i64_2f64:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; X32-AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
 ; X32-AVX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: mask_sitofp_2i64_2f64:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; X64-SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: mask_sitofp_2i64_2f64:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; X64-AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
@@ -39,27 +39,27 @@ define <2 x double> @mask_sitofp_2i64_2f
 
 define <2 x double> @mask_uitofp_2i64_2f64(<2 x i64> %a) nounwind {
 ; X32-SSE-LABEL: mask_uitofp_2i64_2f64:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
 ; X32-SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX-LABEL: mask_uitofp_2i64_2f64:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; X32-AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
 ; X32-AVX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: mask_uitofp_2i64_2f64:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X64-SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 ; X64-SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: mask_uitofp_2i64_2f64:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; X64-AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
@@ -70,14 +70,14 @@ define <2 x double> @mask_uitofp_2i64_2f
 
 define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
 ; X32-SSE-LABEL: mask_sitofp_4i64_4f32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; X32-SSE-NEXT:    andps {{\.LCPI.*}}, %xmm0
 ; X32-SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX-LABEL: mask_sitofp_4i64_4f32:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X32-AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; X32-AVX-NEXT:    vandps {{\.LCPI.*}}, %xmm0, %xmm0
@@ -86,14 +86,14 @@ define <4 x float> @mask_sitofp_4i64_4f3
 ; X32-AVX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: mask_sitofp_4i64_4f32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; X64-SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; X64-SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: mask_sitofp_4i64_4f32:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; X64-AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
@@ -107,14 +107,14 @@ define <4 x float> @mask_sitofp_4i64_4f3
 
 define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
 ; X32-SSE-LABEL: mask_uitofp_4i64_4f32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; X32-SSE-NEXT:    andps {{\.LCPI.*}}, %xmm0
 ; X32-SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX-LABEL: mask_uitofp_4i64_4f32:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X32-AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; X32-AVX-NEXT:    vandps {{\.LCPI.*}}, %xmm0, %xmm0
@@ -123,14 +123,14 @@ define <4 x float> @mask_uitofp_4i64_4f3
 ; X32-AVX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: mask_uitofp_4i64_4f32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; X64-SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; X64-SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: mask_uitofp_4i64_4f32:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; X64-AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
@@ -144,7 +144,7 @@ define <4 x float> @mask_uitofp_4i64_4f3
 
 define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
 ; X32-SSE-LABEL: clamp_sitofp_2i64_2f64:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pushl %ebp
 ; X32-SSE-NEXT:    movl %esp, %ebp
 ; X32-SSE-NEXT:    andl $-8, %esp
@@ -194,7 +194,7 @@ define <2 x double> @clamp_sitofp_2i64_2
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX-LABEL: clamp_sitofp_2i64_2f64:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    pushl %ebp
 ; X32-AVX-NEXT:    movl %esp, %ebp
 ; X32-AVX-NEXT:    andl $-8, %esp
@@ -220,7 +220,7 @@ define <2 x double> @clamp_sitofp_2i64_2
 ; X32-AVX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: clamp_sitofp_2i64_2f64:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [2147483648,0,2147483648,0]
 ; X64-SSE-NEXT:    movdqa %xmm0, %xmm2
 ; X64-SSE-NEXT:    pxor %xmm1, %xmm2
@@ -262,7 +262,7 @@ define <2 x double> @clamp_sitofp_2i64_2
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: clamp_sitofp_2i64_2f64:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [18446744073709551361,18446744073709551361]
 ; X64-AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; X64-AVX-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/iabs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/iabs.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/iabs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/iabs.ll Mon Dec  4 09:18:51 2017
@@ -11,7 +11,7 @@
 ; rdar://10695237
 define i8 @test_i8(i8 %a) nounwind {
 ; X86-LABEL: test_i8:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    sarb $7, %cl
@@ -20,7 +20,7 @@ define i8 @test_i8(i8 %a) nounwind {
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    sarb $7, %al
 ; X64-NEXT:    addb %al, %dil
@@ -35,7 +35,7 @@ define i8 @test_i8(i8 %a) nounwind {
 
 define i16 @test_i16(i16 %a) nounwind {
 ; X86-NO-CMOV-LABEL: test_i16:
-; X86-NO-CMOV:       # BB#0:
+; X86-NO-CMOV:       # %bb.0:
 ; X86-NO-CMOV-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X86-NO-CMOV-NEXT:    movl %eax, %ecx
 ; X86-NO-CMOV-NEXT:    sarw $15, %cx
@@ -45,7 +45,7 @@ define i16 @test_i16(i16 %a) nounwind {
 ; X86-NO-CMOV-NEXT:    retl
 ;
 ; X86-CMOV-LABEL: test_i16:
-; X86-CMOV:       # BB#0:
+; X86-CMOV:       # %bb.0:
 ; X86-CMOV-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; X86-CMOV-NEXT:    movl %ecx, %eax
 ; X86-CMOV-NEXT:    negw %ax
@@ -53,7 +53,7 @@ define i16 @test_i16(i16 %a) nounwind {
 ; X86-CMOV-NEXT:    retl
 ;
 ; X64-LABEL: test_i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    negw %ax
 ; X64-NEXT:    cmovlw %di, %ax
@@ -66,7 +66,7 @@ define i16 @test_i16(i16 %a) nounwind {
 
 define i32 @test_i32(i32 %a) nounwind {
 ; X86-NO-CMOV-LABEL: test_i32:
-; X86-NO-CMOV:       # BB#0:
+; X86-NO-CMOV:       # %bb.0:
 ; X86-NO-CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NO-CMOV-NEXT:    movl %eax, %ecx
 ; X86-NO-CMOV-NEXT:    sarl $31, %ecx
@@ -75,7 +75,7 @@ define i32 @test_i32(i32 %a) nounwind {
 ; X86-NO-CMOV-NEXT:    retl
 ;
 ; X86-CMOV-LABEL: test_i32:
-; X86-CMOV:       # BB#0:
+; X86-CMOV:       # %bb.0:
 ; X86-CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-CMOV-NEXT:    movl %ecx, %eax
 ; X86-CMOV-NEXT:    negl %eax
@@ -83,7 +83,7 @@ define i32 @test_i32(i32 %a) nounwind {
 ; X86-CMOV-NEXT:    retl
 ;
 ; X64-LABEL: test_i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    negl %eax
 ; X64-NEXT:    cmovll %edi, %eax
@@ -96,7 +96,7 @@ define i32 @test_i32(i32 %a) nounwind {
 
 define i64 @test_i64(i64 %a) nounwind {
 ; X86-LABEL: test_i64:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl %edx, %ecx
 ; X86-NEXT:    sarl $31, %ecx
@@ -108,7 +108,7 @@ define i64 @test_i64(i64 %a) nounwind {
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    negq %rax
 ; X64-NEXT:    cmovlq %rdi, %rax

Modified: llvm/trunk/test/CodeGen/X86/illegal-bitfield-loadstore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/illegal-bitfield-loadstore.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/illegal-bitfield-loadstore.ll (original)
+++ llvm/trunk/test/CodeGen/X86/illegal-bitfield-loadstore.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define void @i24_or(i24* %a) {
 ; X86-LABEL: i24_or:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl (%ecx), %edx
 ; X86-NEXT:    movzbl 2(%ecx), %eax
@@ -16,7 +16,7 @@ define void @i24_or(i24* %a) {
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: i24_or:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzbl 2(%rdi), %ecx
 ; X64-NEXT:    movb %cl, 2(%rdi)
@@ -33,7 +33,7 @@ define void @i24_or(i24* %a) {
 
 define void @i24_and_or(i24* %a) {
 ; X86-LABEL: i24_and_or:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl (%ecx), %edx
 ; X86-NEXT:    movzbl 2(%ecx), %eax
@@ -46,7 +46,7 @@ define void @i24_and_or(i24* %a) {
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: i24_and_or:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzbl 2(%rdi), %ecx
 ; X64-NEXT:    movb %cl, 2(%rdi)
@@ -65,7 +65,7 @@ define void @i24_and_or(i24* %a) {
 
 define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
 ; X86-LABEL: i24_insert_bit:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -84,7 +84,7 @@ define void @i24_insert_bit(i24* %a, i1
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: i24_insert_bit:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzbl 2(%rdi), %ecx
 ; X64-NEXT:    movb %cl, 2(%rdi)
@@ -106,13 +106,13 @@ define void @i24_insert_bit(i24* %a, i1
 
 define void @i56_or(i56* %a) {
 ; X86-LABEL: i56_or:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    orl $384, (%eax) # imm = 0x180
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: i56_or:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl 4(%rdi), %eax
 ; X64-NEXT:    movzbl 6(%rdi), %ecx
 ; X64-NEXT:    movb %cl, 6(%rdi)
@@ -135,7 +135,7 @@ define void @i56_or(i56* %a) {
 
 define void @i56_and_or(i56* %a) {
 ; X86-LABEL: i56_and_or:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl $384, %ecx # imm = 0x180
 ; X86-NEXT:    orl (%eax), %ecx
@@ -144,7 +144,7 @@ define void @i56_and_or(i56* %a) {
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: i56_and_or:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl 4(%rdi), %eax
 ; X64-NEXT:    movzbl 6(%rdi), %ecx
 ; X64-NEXT:    movb %cl, 6(%rdi)
@@ -170,7 +170,7 @@ define void @i56_and_or(i56* %a) {
 
 define void @i56_insert_bit(i56* %a, i1 zeroext %bit) {
 ; X86-LABEL: i56_insert_bit:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    shll $13, %ecx
@@ -181,7 +181,7 @@ define void @i56_insert_bit(i56* %a, i1
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: i56_insert_bit:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    movzwl 4(%rdi), %ecx
 ; X64-NEXT:    movzbl 6(%rdi), %edx

Modified: llvm/trunk/test/CodeGen/X86/immediate_merging.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/immediate_merging.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/immediate_merging.ll (original)
+++ llvm/trunk/test/CodeGen/X86/immediate_merging.ll Mon Dec  4 09:18:51 2017
@@ -15,7 +15,7 @@
 ; instructions.
 define i32 @foo() optsize {
 ; X86-LABEL: foo:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl $1234, %eax # imm = 0x4D2
 ; X86-NEXT:    movl %eax, a
 ; X86-NEXT:    movl %eax, b
@@ -23,7 +23,7 @@ define i32 @foo() optsize {
 ; X86-NEXT:    movl %eax, c
 ; X86-NEXT:    cmpl %eax, e
 ; X86-NEXT:    jne .LBB0_2
-; X86-NEXT:  # BB#1: # %if.then
+; X86-NEXT:  # %bb.1: # %if.then
 ; X86-NEXT:    movl $1, x
 ; X86-NEXT:  .LBB0_2: # %if.end
 ; X86-NEXT:    movl $1234, f # imm = 0x4D2
@@ -34,7 +34,7 @@ define i32 @foo() optsize {
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: foo:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl $1234, %eax # imm = 0x4D2
 ; X64-NEXT:    movl %eax, {{.*}}(%rip)
 ; X64-NEXT:    movl %eax, {{.*}}(%rip)
@@ -42,7 +42,7 @@ define i32 @foo() optsize {
 ; X64-NEXT:    movl %eax, {{.*}}(%rip)
 ; X64-NEXT:    cmpl %eax, {{.*}}(%rip)
 ; X64-NEXT:    jne .LBB0_2
-; X64-NEXT:  # BB#1: # %if.then
+; X64-NEXT:  # %bb.1: # %if.then
 ; X64-NEXT:    movl $1, {{.*}}(%rip)
 ; X64-NEXT:  .LBB0_2: # %if.end
 ; X64-NEXT:    movl $1234, {{.*}}(%rip) # imm = 0x4D2
@@ -76,14 +76,14 @@ if.end:
 ; Test -O2 to make sure that all immediates get pulled in to their users.
 define i32 @foo2() {
 ; X86-LABEL: foo2:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl $1234, a # imm = 0x4D2
 ; X86-NEXT:    movl $1234, b # imm = 0x4D2
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: foo2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl $1234, {{.*}}(%rip) # imm = 0x4D2
 ; X64-NEXT:    movl $1234, {{.*}}(%rip) # imm = 0x4D2
 ; X64-NEXT:    xorl %eax, %eax
@@ -103,7 +103,7 @@ declare void @llvm.memset.p0i8.i32(i8* n
 ; sure we don't directly store the immediates.
 define void @foomemset() optsize {
 ; X86-LABEL: foomemset:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl $555819297, %eax # imm = 0x21212121
 ; X86-NEXT:    movl %eax, AA+20
 ; X86-NEXT:    movl %eax, AA+16
@@ -114,7 +114,7 @@ define void @foomemset() optsize {
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: foomemset:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movabsq $2387225703656530209, %rax # imm = 0x2121212121212121
 ; X64-NEXT:    movq %rax, AA+{{.*}}(%rip)
 ; X64-NEXT:    movq %rax, AA+{{.*}}(%rip)

Modified: llvm/trunk/test/CodeGen/X86/immediate_merging64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/immediate_merging64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/immediate_merging64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/immediate_merging64.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 ; optimizing for code size.
 define i1 @imm_multiple_users(i64 %a, i64* %b) optsize {
 ; CHECK-LABEL: imm_multiple_users:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq $-1, %rax
 ; CHECK-NEXT:    movq %rax, (%rsi)
 ; CHECK-NEXT:    cmpq %rax, %rdi
@@ -26,7 +26,7 @@ declare void @llvm.memset.p0i8.i64(i8* n
 ; code size.
 define void @memset_zero(i8* noalias nocapture %D) optsize {
 ; CHECK-LABEL: memset_zero:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    movq %rax, 7(%rdi)
 ; CHECK-NEXT:    movq %rax, (%rdi)

Modified: llvm/trunk/test/CodeGen/X86/implicit-null-checks.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/implicit-null-checks.mir?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/implicit-null-checks.mir (original)
+++ llvm/trunk/test/CodeGen/X86/implicit-null-checks.mir Mon Dec  4 09:18:51 2017
@@ -391,15 +391,15 @@ liveins:
   - { reg: '%esi' }
 # CHECK:  bb.0.entry:
 # CHECK:    %eax = MOV32ri 2200000
-# CHECK-NEXT:    %eax = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, %eax, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
-# CHECK-NEXT:    JMP_1 %bb.1.not_null
+# CHECK-NEXT:    %eax = FAULTING_OP 1, %bb.3, {{[0-9]+}}, %eax, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
+# CHECK-NEXT:    JMP_1 %bb.1
 
 body:             |
   bb.0.entry:
     liveins: %esi, %rdi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.3.is_null, implicit %eflags
+    JE_1 %bb.3, implicit %eflags
 
   bb.1.not_null:
     liveins: %esi, %rdi
@@ -407,7 +407,7 @@ body:             |
     %eax = MOV32ri 2200000
     %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
     CMP32rr killed %eax, killed %esi, implicit-def %eflags
-    JE_1 %bb.4.ret_100, implicit %eflags
+    JE_1 %bb.4, implicit %eflags
 
   bb.2.ret_200:
     %eax = MOV32ri 200
@@ -433,7 +433,7 @@ liveins:
 # CHECK: bb.0.entry:
 # CHECK:    %eax = MOV32rm killed %rdx, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.ptr)
 # CHECK-NEXT:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.3.is_null, implicit %eflags
+# CHECK-NEXT:    JE_1 %bb.3, implicit %eflags
 
 body:             |
   bb.0.entry:
@@ -441,7 +441,7 @@ body:             |
 
     %eax = MOV32rm killed %rdx, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.ptr)
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.3.is_null, implicit %eflags
+    JE_1 %bb.3, implicit %eflags
 
   bb.1.not_null:
     liveins: %esi, %rdi
@@ -449,7 +449,7 @@ body:             |
     %eax = MOV32ri 2200000
     %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
     CMP32rr killed %eax, killed %esi, implicit-def %eflags
-    JE_1 %bb.4.ret_100, implicit %eflags
+    JE_1 %bb.4, implicit %eflags
 
   bb.2.ret_200:
 
@@ -475,14 +475,14 @@ liveins:
   - { reg: '%esi' }
 # CHECK:  bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.3.is_null, implicit %eflags
+# CHECK-NEXT:    JE_1 %bb.3, implicit %eflags
 
 body:             |
   bb.0.entry:
     liveins: %esi, %rdi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.3.is_null, implicit %eflags
+    JE_1 %bb.3, implicit %eflags
 
   bb.1.not_null:
     liveins: %esi, %rdi
@@ -491,7 +491,7 @@ body:             |
     %eax = ADD32ri killed %eax, 100, implicit-def dead %eflags
     %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
     CMP32rr killed %eax, killed %esi, implicit-def %eflags
-    JE_1 %bb.4.ret_100, implicit %eflags
+    JE_1 %bb.4, implicit %eflags
 
   bb.2.ret_200:
     %eax = MOV32ri 200
@@ -516,14 +516,14 @@ liveins:
   - { reg: '%rsi' }
 # CHECK:  bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.3.is_null, implicit %eflags
+# CHECK-NEXT:    JE_1 %bb.3, implicit %eflags
 
 body:             |
   bb.0.entry:
     liveins: %rsi, %rdi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.3.is_null, implicit %eflags
+    JE_1 %bb.3, implicit %eflags
 
   bb.1.not_null:
     liveins: %rsi, %rdi
@@ -531,7 +531,7 @@ body:             |
     %rdi  = MOV64ri 5000
     %rdi = AND64rm killed %rdi, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
     CMP64rr killed %rdi, killed %rsi, implicit-def %eflags
-    JE_1 %bb.4.ret_100, implicit %eflags
+    JE_1 %bb.4, implicit %eflags
 
   bb.2.ret_200:
     %eax = MOV32ri 200
@@ -556,14 +556,14 @@ liveins:
   - { reg: '%rsi' }
 # CHECK:  bb.0.entry:
 # CHECK:  %rbx = MOV64rr %rdx
-# CHECK-NEXT:  %rbx = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, %rbx, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
+# CHECK-NEXT:  %rbx = FAULTING_OP 1, %bb.3, {{[0-9]+}}, %rbx, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
 
 body:             |
   bb.0.entry:
     liveins: %rsi, %rdi, %rdx
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.3.is_null, implicit %eflags
+    JE_1 %bb.3, implicit %eflags
 
   bb.1.not_null:
     liveins: %rsi, %rdi, %rdx
@@ -572,7 +572,7 @@ body:             |
     %rbx = AND64rm killed %rbx, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
     %rdx = MOV64ri 0
     CMP64rr killed %rbx, killed %rsi, implicit-def %eflags
-    JE_1 %bb.4.ret_100, implicit %eflags
+    JE_1 %bb.4, implicit %eflags
 
   bb.2.ret_200:
     %eax = MOV32ri 200
@@ -611,7 +611,7 @@ body:             |
     CFI_INSTRUCTION offset %rbx, -16
     %rbx = MOV64rr %rdi
     TEST64rr %rbx, %rbx, implicit-def %eflags
-    JE_1 %bb.2.leave, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.stay:
     liveins: %rbx
@@ -648,7 +648,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -668,8 +668,8 @@ body:             |
 name:            use_alternate_load_op
 # CHECK-LABEL: name:            use_alternate_load_op
 # CHECK: bb.0.entry:
-# CHECK: %rax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: %rax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg
+# CHECK-NEXT: JMP_1 %bb.1
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -682,7 +682,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -701,8 +701,8 @@ body:             |
 name:            imp_null_check_gep_load_with_use_dep
 # CHECK-LABEL: name:            imp_null_check_gep_load_with_use_dep
 # CHECK:  bb.0.entry:
-# CHECK:    %eax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from %ir.x)
-# CHECK-NEXT:    JMP_1 %bb.1.not_null
+# CHECK:    %eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from %ir.x)
+# CHECK-NEXT:    JMP_1 %bb.1
 alignment:       4
 tracksRegLiveness: true
 liveins:         
@@ -713,7 +713,7 @@ body:             |
     liveins: %rsi, %rdi
   
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.1.is_null, implicit %eflags
+    JE_1 %bb.1, implicit %eflags
   
   bb.2.not_null:
     liveins: %rdi, %rsi
@@ -733,8 +733,8 @@ name:            imp_null_check_load_wit
 # CHECK-LABEL: name:            imp_null_check_load_with_base_sep
 # CHECK:  bb.0.entry:
 # CHECK:     %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags
-# CHECK-NEXT:    %esi = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %esi, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags
-# CHECK-NEXT:    JMP_1 %bb.1.not_null
+# CHECK-NEXT:    %esi = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %esi, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags
+# CHECK-NEXT:    JMP_1 %bb.1
 alignment:       4
 tracksRegLiveness: true
 liveins:         
@@ -745,7 +745,7 @@ body:             |
     liveins: %rsi, %rdi
   
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.1.is_null, implicit %eflags
+    JE_1 %bb.1, implicit %eflags
   
   bb.2.not_null:
     liveins: %rdi, %rsi
@@ -764,8 +764,8 @@ body:             |
 name:            inc_store
 # CHECK-LABEL: name:            inc_store
 # CHECK: bb.0.entry:
-# CHECK:  %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %rsi
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK:  %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %rsi
+# CHECK-NEXT: JMP_1 %bb.1
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -778,7 +778,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -794,8 +794,8 @@ body:             |
 name:            inc_store_plus_offset
 # CHECK-LABEL: inc_store_plus_offset
 # CHECK: bb.0.entry:
-# CHECK:  %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %rsi
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK:  %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %rsi
+# CHECK-NEXT: JMP_1 %bb.1
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -808,7 +808,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -825,8 +825,8 @@ name:            inc_store_with_dep
 # CHECK-LABEL: inc_store_with_dep
 # CHECK: bb.0.entry:
 # CHECK:  %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags
-# CHECK-NEXT:  %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK-NEXT:  %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
+# CHECK-NEXT: JMP_1 %bb.1
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -839,7 +839,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -857,7 +857,7 @@ name:            inc_store_with_dep_in_n
 # CHECK-LABEL: inc_store_with_dep_in_null
 # CHECK: bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT:    JE_1 %bb.2, implicit killed %eflags
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -870,7 +870,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -892,7 +892,7 @@ name:            inc_store_with_volatile
 # CHECK-LABEL: inc_store_with_volatile
 # CHECK: bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT:    JE_1 %bb.2, implicit killed %eflags
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -905,7 +905,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -922,7 +922,7 @@ name:            inc_store_with_two_dep
 # CHECK-LABEL: inc_store_with_two_dep
 # CHECK: bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT:    JE_1 %bb.2, implicit killed %eflags
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -935,7 +935,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -954,7 +954,7 @@ name:            inc_store_with_redefine
 # CHECK-LABEL: inc_store_with_redefined_base
 # CHECK: bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT:    JE_1 %bb.2, implicit killed %eflags
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -967,7 +967,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -984,8 +984,8 @@ body:             |
 name:            inc_store_with_reused_base
 # CHECK-LABEL: inc_store_with_reused_base
 # CHECK: bb.0.entry:
-# CHECK:  %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK:  %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
+# CHECK-NEXT: JMP_1 %bb.1
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -998,7 +998,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -1017,7 +1017,7 @@ name:            inc_store_across_call
 # CHECK-LABEL: inc_store_across_call
 # CHECK: bb.0.entry:
 # CHECK:    TEST64rr %rbx, %rbx, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT:    JE_1 %bb.2, implicit killed %eflags
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -1037,7 +1037,7 @@ body:             |
     CFI_INSTRUCTION offset %rbx, -16
     %rbx = MOV64rr killed %rdi
     TEST64rr %rbx, %rbx, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rbx
@@ -1059,7 +1059,7 @@ name:            inc_store_with_dep_in_d
 # CHECK-LABEL: inc_store_with_dep_in_dep
 # CHECK: bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT:    JE_1 %bb.2, implicit killed %eflags
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -1072,7 +1072,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -1092,7 +1092,7 @@ name:            inc_store_with_load_ove
 # CHECK-LABEL: inc_store_with_load_over_store
 # CHECK: bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT:    JE_1 %bb.2, implicit killed %eflags
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -1105,7 +1105,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -1124,7 +1124,7 @@ name:            inc_store_with_store_ov
 # CHECK-LABEL: inc_store_with_store_over_load
 # CHECK: bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT:    JE_1 %bb.2, implicit killed %eflags
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -1137,7 +1137,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -1156,7 +1156,7 @@ name:            inc_store_with_store_ov
 # CHECK-LABEL: inc_store_with_store_over_store
 # CHECK: bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT:    JE_1 %bb.2, implicit killed %eflags
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -1169,7 +1169,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -1186,8 +1186,8 @@ body:             |
 name:            inc_store_with_load_and_store
 # CHECK-LABEL: inc_store_with_load_and_store
 # CHECK: bb.0.entry:
-# CHECK:  %noreg = FAULTING_OP 2, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %esi, implicit-def %eflags
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK:  %noreg = FAULTING_OP 2, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %esi, implicit-def %eflags
+# CHECK-NEXT: JMP_1 %bb.1
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -1200,7 +1200,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -1217,8 +1217,8 @@ body:             |
 name:            inc_store_and_load_no_alias
 # CHECK-LABEL: inc_store_and_load_no_alias
 # CHECK: bb.0.entry:
-# CHECK:  %eax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
-# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK:  %eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
+# CHECK-NEXT: JMP_1 %bb.1
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -1231,7 +1231,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -1250,7 +1250,7 @@ name:            inc_store_and_load_alia
 # CHECK-LABEL: inc_store_and_load_alias
 # CHECK: bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT:    JE_1 %bb.2, implicit killed %eflags
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -1263,7 +1263,7 @@ body:             |
     liveins: %rdi, %rsi
 
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi
@@ -1282,7 +1282,7 @@ name:            inc_spill_dep
 # CHECK-LABEL: inc_spill_dep
 # CHECK: bb.0.entry:
 # CHECK:    TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT:    JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK-NEXT:    JE_1 %bb.2, implicit killed %eflags
 # CHECK: bb.1.not_null
 
 alignment:       4
@@ -1299,7 +1299,7 @@ body:             |
     %rsp = frame-setup SUB64ri8 %rsp, 8, implicit-def dead %eflags
     MOV32mr %rsp, 1, %noreg, 0, %noreg, %esi :: (store 4 into %stack.0)
     TEST64rr %rdi, %rdi, implicit-def %eflags
-    JE_1 %bb.2.is_null, implicit killed %eflags
+    JE_1 %bb.2, implicit killed %eflags
 
   bb.1.not_null:
     liveins: %rdi, %rsi

Modified: llvm/trunk/test/CodeGen/X86/imul-lea-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/imul-lea-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/imul-lea-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/imul-lea-2.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i64 @t1(i64 %a) nounwind readnone {
 ; CHECK-LABEL: t1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    leaq (%rdi,%rdi,8), %rax
 ; CHECK-NEXT:    leaq (%rax,%rax,8), %rax
 ; CHECK-NEXT:    retq
@@ -15,7 +15,7 @@ entry:
 
 define i64 @t2(i64 %a) nounwind readnone {
 ; CHECK-LABEL: t2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    shlq $3, %rdi
 ; CHECK-NEXT:    leaq (%rdi,%rdi,4), %rax
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/imul-lea.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/imul-lea.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/imul-lea.ll (original)
+++ llvm/trunk/test/CodeGen/X86/imul-lea.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@ declare i32 @foo()
 
 define i32 @test() {
 ; CHECK-LABEL: test:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    calll foo
 ; CHECK-NEXT:    leal (%eax,%eax,8), %eax
 ; CHECK-NEXT:    retl

Modified: llvm/trunk/test/CodeGen/X86/imul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/imul.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/imul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/imul.ll Mon Dec  4 09:18:51 2017
@@ -174,14 +174,14 @@ define i64 @mul18446744073709551615_64(i
 
 define i32 @test(i32 %a) {
 ; X64-LABEL: test:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    shll $5, %eax
 ; X64-NEXT:    subl %edi, %eax
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    shll $5, %eax
@@ -194,7 +194,7 @@ entry:
 
 define i32 @test1(i32 %a) {
 ; X64-LABEL: test1:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    shll $5, %eax
 ; X64-NEXT:    subl %edi, %eax
@@ -202,7 +202,7 @@ define i32 @test1(i32 %a) {
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test1:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    shll $5, %eax
@@ -217,7 +217,7 @@ entry:
 
 define i32 @test2(i32 %a) {
 ; X64-LABEL: test2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    shll $5, %eax
@@ -225,7 +225,7 @@ define i32 @test2(i32 %a) {
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test2:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    shll $5, %eax
@@ -238,7 +238,7 @@ entry:
 
 define i32 @test3(i32 %a) {
 ; X64-LABEL: test3:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    shll $5, %eax
@@ -247,7 +247,7 @@ define i32 @test3(i32 %a) {
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test3:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    shll $5, %eax
@@ -261,14 +261,14 @@ entry:
 
 define i64 @test4(i64 %a) {
 ; X64-LABEL: test4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    shlq $5, %rax
 ; X64-NEXT:    subq %rdi, %rax
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test4:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    shll $5, %ecx
@@ -284,7 +284,7 @@ entry:
 
 define i64 @test5(i64 %a) {
 ; X64-LABEL: test5:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    shlq $5, %rax
 ; X64-NEXT:    subq %rdi, %rax
@@ -292,7 +292,7 @@ define i64 @test5(i64 %a) {
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test5:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -316,14 +316,14 @@ entry:
 
 define i64 @test6(i64 %a) {
 ; X64-LABEL: test6:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    shlq $5, %rax
 ; X64-NEXT:    leaq (%rax,%rdi), %rax
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test6:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    shll $5, %ecx
@@ -339,7 +339,7 @@ entry:
 
 define i64 @test7(i64 %a) {
 ; X64-LABEL: test7:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    shlq $5, %rax
 ; X64-NEXT:    leaq (%rax,%rdi), %rax
@@ -347,7 +347,7 @@ define i64 @test7(i64 %a) {
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test7:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -370,13 +370,13 @@ entry:
 
 define i64 @testOverflow(i64 %a) {
 ; X64-LABEL: testOverflow:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
 ; X64-NEXT:    imulq %rdi, %rax
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: testOverflow:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8

Modified: llvm/trunk/test/CodeGen/X86/inline-0bh.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/inline-0bh.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/inline-0bh.ll (original)
+++ llvm/trunk/test/CodeGen/X86/inline-0bh.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 ; Function Attrs: noinline nounwind
 define i32 @PR31007() {
 ; CHECK-LABEL: PR31007:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:  #APP
 ; CHECK   :    addb $11, %al
 ; CHECK:       #NO_APP

Modified: llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll (original)
+++ llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 ; There should be no stack manipulations between the inline asm and ret.
 define x86_fp80 @test1() {
 ; CHECK-LABEL: test1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## InlineAsm Start
 ; CHECK-NEXT:    fld0
 ; CHECK-NEXT:    ## InlineAsm End
@@ -15,7 +15,7 @@ define x86_fp80 @test1() {
 
 define double @test2() {
 ; CHECK-LABEL: test2:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    ## InlineAsm Start
 ; CHECK-NEXT:    fld0
 ; CHECK-NEXT:    ## InlineAsm End
@@ -28,7 +28,7 @@ define double @test2() {
 ; Asm consumes stack, nothing should be popped.
 define void @test3(x86_fp80 %X) {
 ; CHECK-LABEL: test3:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    ## InlineAsm Start
 ; CHECK-NEXT:    frob
@@ -40,7 +40,7 @@ define void @test3(x86_fp80 %X) {
 
 define void @test4(double %X) {
 ; CHECK-LABEL: test4:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    ## InlineAsm Start
 ; CHECK-NEXT:    frob
@@ -54,7 +54,7 @@ define void @test4(double %X) {
 ; The fadd can be done in xmm or x87 regs - we don't test that.
 define void @test5(double %X) {
 ; CHECK-LABEL: test5:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    fadds LCPI4_0
 ; CHECK-NEXT:    ## InlineAsm Start
@@ -68,7 +68,7 @@ define void @test5(double %X) {
 
 define void @test6(double %A, double %B, double %C, double %D, double %E) nounwind {
 ; CHECK-LABEL: test6:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
@@ -113,7 +113,7 @@ entry:
 ; inline asm.
 define void @testPR4185() {
 ; CHECK-LABEL: testPR4185:
-; CHECK:       ## BB#0: ## %return
+; CHECK:       ## %bb.0: ## %return
 ; CHECK-NEXT:    flds LCPI6_0
 ; CHECK-NEXT:    fld %st(0)
 ; CHECK-NEXT:    ## InlineAsm Start
@@ -135,7 +135,7 @@ return:
 ; A valid alternative would be to remat the constant pool load before each inline asm.
 define void @testPR4185b() {
 ; CHECK-LABEL: testPR4185b:
-; CHECK:       ## BB#0: ## %return
+; CHECK:       ## %bb.0: ## %return
 ; CHECK-NEXT:    flds LCPI7_0
 ; CHECK-NEXT:    ## InlineAsm Start
 ; CHECK-NEXT:    fistl %st(0)
@@ -154,7 +154,7 @@ return:
 ; The return value from ceil must be duped before being consumed by asm.
 define void @testPR4459(x86_fp80 %a) {
 ; CHECK-LABEL: testPR4459:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    subl $28, %esp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
@@ -182,7 +182,7 @@ declare x86_fp80 @ceil(x86_fp80)
 ; Set up call to test.
 define void @testPR4484(x86_fp80 %a) {
 ; CHECK-LABEL: testPR4484:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    subl $28, %esp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
@@ -206,7 +206,7 @@ entry:
 ; PR4485
 define void @testPR4485(x86_fp80* %a) {
 ; CHECK-LABEL: testPR4485:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    fldt (%eax)
 ; CHECK-NEXT:    flds LCPI10_0
@@ -247,7 +247,7 @@ entry:
 ;   }
 define void @fist1(x86_fp80 %x, i32* %p) nounwind ssp {
 ; CHECK-LABEL: fist1:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    ## InlineAsm Start
@@ -271,7 +271,7 @@ entry:
 ;   }
 define x86_fp80 @fist2(x86_fp80 %x, i32* %p) nounwind ssp {
 ; CHECK-LABEL: fist2:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    ## InlineAsm Start
@@ -291,7 +291,7 @@ entry:
 ;   }
 define void @fucomp1(x86_fp80 %x, x86_fp80 %y) nounwind ssp {
 ; CHECK-LABEL: fucomp1:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    fxch %st(1)
@@ -318,7 +318,7 @@ entry:
 ;
 define void @fucomp2(x86_fp80 %x, x86_fp80 %y) nounwind ssp {
 ; CHECK-LABEL: fucomp2:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    fxch %st(1)
@@ -335,7 +335,7 @@ entry:
 
 define void @fucomp3(x86_fp80 %x, x86_fp80 %y) nounwind ssp {
 ; CHECK-LABEL: fucomp3:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    fldt {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    fxch %st(1)
@@ -353,7 +353,7 @@ entry:
 %complex = type { float, float }
 define float @sincos1(float %x) nounwind ssp {
 ; CHECK-LABEL: sincos1:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    ## InlineAsm Start
 ; CHECK-NEXT:    sincos
@@ -370,7 +370,7 @@ entry:
 ; Same thing, swapped output operands.
 define float @sincos2(float %x) nounwind ssp {
 ; CHECK-LABEL: sincos2:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    ## InlineAsm Start
 ; CHECK-NEXT:    sincos
@@ -391,7 +391,7 @@ entry:
 ; Discard both results.
 define float @sincos3(float %x) nounwind ssp {
 ; CHECK-LABEL: sincos3:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    fld %st(0)
 ; CHECK-NEXT:    ## InlineAsm Start
@@ -416,7 +416,7 @@ entry:
 ; Pass the same value in two fixed stack slots.
 define i32 @PR10602() nounwind ssp {
 ; CHECK-LABEL: PR10602:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    flds LCPI19_0
 ; CHECK-NEXT:    fld %st(0)
 ; CHECK-NEXT:    fxch %st(1)
@@ -450,13 +450,13 @@ entry:
 ; Function Attrs: ssp
 define void @test_live_st(i32 %a1) {
 ; CHECK-LABEL: test_live_st:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    subl $12, %esp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    fldt (%eax)
 ; CHECK-NEXT:    cmpl $1, {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    jne LBB20_2
-; CHECK-NEXT:  ## BB#1: ## %sw.bb4.i
+; CHECK-NEXT:  ## %bb.1: ## %sw.bb4.i
 ; CHECK-NEXT:    ## InlineAsm Start
 ; CHECK-NEXT:    frndint
 ; CHECK-NEXT:    ## InlineAsm End
@@ -502,7 +502,7 @@ return:
 ; Check that x87 stackifier is correctly rewriting FP registers to ST registers.
 define double @test_operand_rewrite() {
 ; CHECK-LABEL: test_operand_rewrite:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    ## InlineAsm Start
 ; CHECK-NEXT:    foo %st(0), %st(1)
 ; CHECK-NEXT:    ## InlineAsm End

Modified: llvm/trunk/test/CodeGen/X86/inline-sse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/inline-sse.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/inline-sse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/inline-sse.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define void @nop() nounwind {
 ; X32-LABEL: nop:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-16, %esp
@@ -20,7 +20,7 @@ define void @nop() nounwind {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: nop:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    #APP
 ; X64-NEXT:    #NO_APP
 ; X64-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)

Modified: llvm/trunk/test/CodeGen/X86/insert-into-constant-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insert-into-constant-vector.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insert-into-constant-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insert-into-constant-vector.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define <16 x i8> @elt0_v16i8(i8 %x) {
 ; X32SSE2-LABEL: elt0_v16i8:
-; X32SSE2:       # BB#0:
+; X32SSE2:       # %bb.0:
 ; X32SSE2-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32SSE2-NEXT:    movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; X32SSE2-NEXT:    andnps %xmm1, %xmm0
@@ -18,7 +18,7 @@ define <16 x i8> @elt0_v16i8(i8 %x) {
 ; X32SSE2-NEXT:    retl
 ;
 ; X64SSE2-LABEL: elt0_v16i8:
-; X64SSE2:       # BB#0:
+; X64SSE2:       # %bb.0:
 ; X64SSE2-NEXT:    movd %edi, %xmm1
 ; X64SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; X64SSE2-NEXT:    pandn %xmm1, %xmm0
@@ -26,25 +26,25 @@ define <16 x i8> @elt0_v16i8(i8 %x) {
 ; X64SSE2-NEXT:    retq
 ;
 ; X32SSE4-LABEL: elt0_v16i8:
-; X32SSE4:       # BB#0:
+; X32SSE4:       # %bb.0:
 ; X32SSE4-NEXT:    movdqa {{.*#+}} xmm0 = <u,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15>
 ; X32SSE4-NEXT:    pinsrb $0, {{[0-9]+}}(%esp), %xmm0
 ; X32SSE4-NEXT:    retl
 ;
 ; X64SSE4-LABEL: elt0_v16i8:
-; X64SSE4:       # BB#0:
+; X64SSE4:       # %bb.0:
 ; X64SSE4-NEXT:    movdqa {{.*#+}} xmm0 = <u,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15>
 ; X64SSE4-NEXT:    pinsrb $0, %edi, %xmm0
 ; X64SSE4-NEXT:    retq
 ;
 ; X32AVX-LABEL: elt0_v16i8:
-; X32AVX:       # BB#0:
+; X32AVX:       # %bb.0:
 ; X32AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15>
 ; X32AVX-NEXT:    vpinsrb $0, {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; X32AVX-NEXT:    retl
 ;
 ; X64AVX-LABEL: elt0_v16i8:
-; X64AVX:       # BB#0:
+; X64AVX:       # %bb.0:
 ; X64AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15>
 ; X64AVX-NEXT:    vpinsrb $0, %edi, %xmm0, %xmm0
 ; X64AVX-NEXT:    retq
@@ -54,25 +54,25 @@ define <16 x i8> @elt0_v16i8(i8 %x) {
 
 define <8 x i16> @elt5_v8i16(i16 %x) {
 ; X32SSE-LABEL: elt5_v8i16:
-; X32SSE:       # BB#0:
+; X32SSE:       # %bb.0:
 ; X32SSE-NEXT:    movdqa {{.*#+}} xmm0 = <42,1,2,3,4,u,6,7>
 ; X32SSE-NEXT:    pinsrw $5, {{[0-9]+}}(%esp), %xmm0
 ; X32SSE-NEXT:    retl
 ;
 ; X64SSE-LABEL: elt5_v8i16:
-; X64SSE:       # BB#0:
+; X64SSE:       # %bb.0:
 ; X64SSE-NEXT:    movdqa {{.*#+}} xmm0 = <42,1,2,3,4,u,6,7>
 ; X64SSE-NEXT:    pinsrw $5, %edi, %xmm0
 ; X64SSE-NEXT:    retq
 ;
 ; X32AVX-LABEL: elt5_v8i16:
-; X32AVX:       # BB#0:
+; X32AVX:       # %bb.0:
 ; X32AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = <42,1,2,3,4,u,6,7>
 ; X32AVX-NEXT:    vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; X32AVX-NEXT:    retl
 ;
 ; X64AVX-LABEL: elt5_v8i16:
-; X64AVX:       # BB#0:
+; X64AVX:       # %bb.0:
 ; X64AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = <42,1,2,3,4,u,6,7>
 ; X64AVX-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
 ; X64AVX-NEXT:    retq
@@ -82,7 +82,7 @@ define <8 x i16> @elt5_v8i16(i16 %x) {
 
 define <4 x i32> @elt3_v4i32(i32 %x) {
 ; X32SSE2-LABEL: elt3_v4i32:
-; X32SSE2:       # BB#0:
+; X32SSE2:       # %bb.0:
 ; X32SSE2-NEXT:    movaps {{.*#+}} xmm0 = <42,1,2,u>
 ; X32SSE2-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
@@ -90,7 +90,7 @@ define <4 x i32> @elt3_v4i32(i32 %x) {
 ; X32SSE2-NEXT:    retl
 ;
 ; X64SSE2-LABEL: elt3_v4i32:
-; X64SSE2:       # BB#0:
+; X64SSE2:       # %bb.0:
 ; X64SSE2-NEXT:    movd %edi, %xmm1
 ; X64SSE2-NEXT:    movaps {{.*#+}} xmm0 = <42,1,2,u>
 ; X64SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
@@ -98,25 +98,25 @@ define <4 x i32> @elt3_v4i32(i32 %x) {
 ; X64SSE2-NEXT:    retq
 ;
 ; X32SSE4-LABEL: elt3_v4i32:
-; X32SSE4:       # BB#0:
+; X32SSE4:       # %bb.0:
 ; X32SSE4-NEXT:    movdqa {{.*#+}} xmm0 = <42,1,2,u>
 ; X32SSE4-NEXT:    pinsrd $3, {{[0-9]+}}(%esp), %xmm0
 ; X32SSE4-NEXT:    retl
 ;
 ; X64SSE4-LABEL: elt3_v4i32:
-; X64SSE4:       # BB#0:
+; X64SSE4:       # %bb.0:
 ; X64SSE4-NEXT:    movdqa {{.*#+}} xmm0 = <42,1,2,u>
 ; X64SSE4-NEXT:    pinsrd $3, %edi, %xmm0
 ; X64SSE4-NEXT:    retq
 ;
 ; X32AVX-LABEL: elt3_v4i32:
-; X32AVX:       # BB#0:
+; X32AVX:       # %bb.0:
 ; X32AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = <42,1,2,u>
 ; X32AVX-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; X32AVX-NEXT:    retl
 ;
 ; X64AVX-LABEL: elt3_v4i32:
-; X64AVX:       # BB#0:
+; X64AVX:       # %bb.0:
 ; X64AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = <42,1,2,u>
 ; X64AVX-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0
 ; X64AVX-NEXT:    retq
@@ -126,7 +126,7 @@ define <4 x i32> @elt3_v4i32(i32 %x) {
 
 define <2 x i64> @elt0_v2i64(i64 %x) {
 ; X32SSE-LABEL: elt0_v2i64:
-; X32SSE:       # BB#0:
+; X32SSE:       # %bb.0:
 ; X32SSE-NEXT:    movl $1, %eax
 ; X32SSE-NEXT:    movd %eax, %xmm1
 ; X32SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
@@ -134,20 +134,20 @@ define <2 x i64> @elt0_v2i64(i64 %x) {
 ; X32SSE-NEXT:    retl
 ;
 ; X64SSE2-LABEL: elt0_v2i64:
-; X64SSE2:       # BB#0:
+; X64SSE2:       # %bb.0:
 ; X64SSE2-NEXT:    movq %rdi, %xmm1
 ; X64SSE2-NEXT:    movapd {{.*#+}} xmm0 = <u,1>
 ; X64SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X64SSE2-NEXT:    retq
 ;
 ; X64SSE4-LABEL: elt0_v2i64:
-; X64SSE4:       # BB#0:
+; X64SSE4:       # %bb.0:
 ; X64SSE4-NEXT:    movdqa {{.*#+}} xmm0 = <u,1>
 ; X64SSE4-NEXT:    pinsrq $0, %rdi, %xmm0
 ; X64SSE4-NEXT:    retq
 ;
 ; X32AVX-LABEL: elt0_v2i64:
-; X32AVX:       # BB#0:
+; X32AVX:       # %bb.0:
 ; X32AVX-NEXT:    movl $1, %eax
 ; X32AVX-NEXT:    vmovd %eax, %xmm0
 ; X32AVX-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -155,7 +155,7 @@ define <2 x i64> @elt0_v2i64(i64 %x) {
 ; X32AVX-NEXT:    retl
 ;
 ; X64AVX-LABEL: elt0_v2i64:
-; X64AVX:       # BB#0:
+; X64AVX:       # %bb.0:
 ; X64AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,1>
 ; X64AVX-NEXT:    vpinsrq $0, %rdi, %xmm0, %xmm0
 ; X64AVX-NEXT:    retq
@@ -165,7 +165,7 @@ define <2 x i64> @elt0_v2i64(i64 %x) {
 
 define <4 x float> @elt1_v4f32(float %x) {
 ; X32SSE2-LABEL: elt1_v4f32:
-; X32SSE2:       # BB#0:
+; X32SSE2:       # %bb.0:
 ; X32SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32SSE2-NEXT:    movaps {{.*#+}} xmm1 = <42,u,2,3>
 ; X32SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
@@ -173,33 +173,33 @@ define <4 x float> @elt1_v4f32(float %x)
 ; X32SSE2-NEXT:    retl
 ;
 ; X64SSE2-LABEL: elt1_v4f32:
-; X64SSE2:       # BB#0:
+; X64SSE2:       # %bb.0:
 ; X64SSE2-NEXT:    movaps {{.*#+}} xmm1 = <42,u,2,3>
 ; X64SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
 ; X64SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
 ; X64SSE2-NEXT:    retq
 ;
 ; X32SSE4-LABEL: elt1_v4f32:
-; X32SSE4:       # BB#0:
+; X32SSE4:       # %bb.0:
 ; X32SSE4-NEXT:    movaps {{.*#+}} xmm0 = <42,u,2,3>
 ; X32SSE4-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
 ; X32SSE4-NEXT:    retl
 ;
 ; X64SSE4-LABEL: elt1_v4f32:
-; X64SSE4:       # BB#0:
+; X64SSE4:       # %bb.0:
 ; X64SSE4-NEXT:    movaps {{.*#+}} xmm1 = <42,u,2,3>
 ; X64SSE4-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
 ; X64SSE4-NEXT:    movaps %xmm1, %xmm0
 ; X64SSE4-NEXT:    retq
 ;
 ; X32AVX-LABEL: elt1_v4f32:
-; X32AVX:       # BB#0:
+; X32AVX:       # %bb.0:
 ; X32AVX-NEXT:    vmovaps {{.*#+}} xmm0 = <42,u,2,3>
 ; X32AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
 ; X32AVX-NEXT:    retl
 ;
 ; X64AVX-LABEL: elt1_v4f32:
-; X64AVX:       # BB#0:
+; X64AVX:       # %bb.0:
 ; X64AVX-NEXT:    vmovaps {{.*#+}} xmm1 = <42,u,2,3>
 ; X64AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
 ; X64AVX-NEXT:    retq
@@ -209,26 +209,26 @@ define <4 x float> @elt1_v4f32(float %x)
 
 define <2 x double> @elt1_v2f64(double %x) {
 ; X32SSE-LABEL: elt1_v2f64:
-; X32SSE:       # BB#0:
+; X32SSE:       # %bb.0:
 ; X32SSE-NEXT:    movapd {{.*#+}} xmm0 = <42,u>
 ; X32SSE-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; X32SSE-NEXT:    retl
 ;
 ; X64SSE-LABEL: elt1_v2f64:
-; X64SSE:       # BB#0:
+; X64SSE:       # %bb.0:
 ; X64SSE-NEXT:    movaps {{.*#+}} xmm1 = <42,u>
 ; X64SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
 ; X64SSE-NEXT:    movaps %xmm1, %xmm0
 ; X64SSE-NEXT:    retq
 ;
 ; X32AVX-LABEL: elt1_v2f64:
-; X32AVX:       # BB#0:
+; X32AVX:       # %bb.0:
 ; X32AVX-NEXT:    vmovapd {{.*#+}} xmm0 = <42,u>
 ; X32AVX-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; X32AVX-NEXT:    retl
 ;
 ; X64AVX-LABEL: elt1_v2f64:
-; X64AVX:       # BB#0:
+; X64AVX:       # %bb.0:
 ; X64AVX-NEXT:    vmovaps {{.*#+}} xmm1 = <42,u>
 ; X64AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; X64AVX-NEXT:    retq
@@ -238,7 +238,7 @@ define <2 x double> @elt1_v2f64(double %
 
 define <8 x i32> @elt7_v8i32(i32 %x) {
 ; X32SSE2-LABEL: elt7_v8i32:
-; X32SSE2:       # BB#0:
+; X32SSE2:       # %bb.0:
 ; X32SSE2-NEXT:    movaps {{.*#+}} xmm1 = <4,5,6,u>
 ; X32SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
@@ -247,7 +247,7 @@ define <8 x i32> @elt7_v8i32(i32 %x) {
 ; X32SSE2-NEXT:    retl
 ;
 ; X64SSE2-LABEL: elt7_v8i32:
-; X64SSE2:       # BB#0:
+; X64SSE2:       # %bb.0:
 ; X64SSE2-NEXT:    movd %edi, %xmm0
 ; X64SSE2-NEXT:    movaps {{.*#+}} xmm1 = <4,5,6,u>
 ; X64SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
@@ -256,21 +256,21 @@ define <8 x i32> @elt7_v8i32(i32 %x) {
 ; X64SSE2-NEXT:    retq
 ;
 ; X32SSE4-LABEL: elt7_v8i32:
-; X32SSE4:       # BB#0:
+; X32SSE4:       # %bb.0:
 ; X32SSE4-NEXT:    movdqa {{.*#+}} xmm1 = <4,5,6,u>
 ; X32SSE4-NEXT:    pinsrd $3, {{[0-9]+}}(%esp), %xmm1
 ; X32SSE4-NEXT:    movaps {{.*#+}} xmm0 = [42,1,2,3]
 ; X32SSE4-NEXT:    retl
 ;
 ; X64SSE4-LABEL: elt7_v8i32:
-; X64SSE4:       # BB#0:
+; X64SSE4:       # %bb.0:
 ; X64SSE4-NEXT:    movdqa {{.*#+}} xmm1 = <4,5,6,u>
 ; X64SSE4-NEXT:    pinsrd $3, %edi, %xmm1
 ; X64SSE4-NEXT:    movaps {{.*#+}} xmm0 = [42,1,2,3]
 ; X64SSE4-NEXT:    retq
 ;
 ; X32AVX-LABEL: elt7_v8i32:
-; X32AVX:       # BB#0:
+; X32AVX:       # %bb.0:
 ; X32AVX-NEXT:    vmovdqa {{.*#+}} ymm0 = <42,1,2,3,4,5,6,u>
 ; X32AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X32AVX-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
@@ -278,7 +278,7 @@ define <8 x i32> @elt7_v8i32(i32 %x) {
 ; X32AVX-NEXT:    retl
 ;
 ; X64AVX-LABEL: elt7_v8i32:
-; X64AVX:       # BB#0:
+; X64AVX:       # %bb.0:
 ; X64AVX-NEXT:    vmovdqa {{.*#+}} ymm0 = <42,1,2,3,4,5,6,u>
 ; X64AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64AVX-NEXT:    vpinsrd $3, %edi, %xmm1, %xmm1
@@ -290,7 +290,7 @@ define <8 x i32> @elt7_v8i32(i32 %x) {
 
 define <8 x float> @elt6_v8f32(float %x) {
 ; X32SSE2-LABEL: elt6_v8f32:
-; X32SSE2:       # BB#0:
+; X32SSE2:       # %bb.0:
 ; X32SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32SSE2-NEXT:    movaps {{.*#+}} xmm1 = <4,5,u,7>
 ; X32SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
@@ -299,7 +299,7 @@ define <8 x float> @elt6_v8f32(float %x)
 ; X32SSE2-NEXT:    retl
 ;
 ; X64SSE2-LABEL: elt6_v8f32:
-; X64SSE2:       # BB#0:
+; X64SSE2:       # %bb.0:
 ; X64SSE2-NEXT:    movaps {{.*#+}} xmm1 = <4,5,u,7>
 ; X64SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
 ; X64SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
@@ -307,21 +307,21 @@ define <8 x float> @elt6_v8f32(float %x)
 ; X64SSE2-NEXT:    retq
 ;
 ; X32SSE4-LABEL: elt6_v8f32:
-; X32SSE4:       # BB#0:
+; X32SSE4:       # %bb.0:
 ; X32SSE4-NEXT:    movaps {{.*#+}} xmm1 = <4,5,u,7>
 ; X32SSE4-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
 ; X32SSE4-NEXT:    movaps {{.*#+}} xmm0 = [4.200000e+01,1.000000e+00,2.000000e+00,3.000000e+00]
 ; X32SSE4-NEXT:    retl
 ;
 ; X64SSE4-LABEL: elt6_v8f32:
-; X64SSE4:       # BB#0:
+; X64SSE4:       # %bb.0:
 ; X64SSE4-NEXT:    movaps {{.*#+}} xmm1 = <4,5,u,7>
 ; X64SSE4-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
 ; X64SSE4-NEXT:    movaps {{.*#+}} xmm0 = [4.200000e+01,1.000000e+00,2.000000e+00,3.000000e+00]
 ; X64SSE4-NEXT:    retq
 ;
 ; X32AVX-LABEL: elt6_v8f32:
-; X32AVX:       # BB#0:
+; X32AVX:       # %bb.0:
 ; X32AVX-NEXT:    vmovaps {{.*#+}} ymm0 = <42,1,2,3,4,5,u,7>
 ; X32AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X32AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
@@ -329,7 +329,7 @@ define <8 x float> @elt6_v8f32(float %x)
 ; X32AVX-NEXT:    retl
 ;
 ; X64AVX-LABEL: elt6_v8f32:
-; X64AVX:       # BB#0:
+; X64AVX:       # %bb.0:
 ; X64AVX-NEXT:    vmovaps {{.*#+}} ymm1 = <42,1,2,3,4,5,u,7>
 ; X64AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; X64AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1],xmm0[0],xmm2[3]
@@ -341,7 +341,7 @@ define <8 x float> @elt6_v8f32(float %x)
 
 define <8 x i64> @elt5_v8i64(i64 %x) {
 ; X32SSE-LABEL: elt5_v8i64:
-; X32SSE:       # BB#0:
+; X32SSE:       # %bb.0:
 ; X32SSE-NEXT:    movl $4, %eax
 ; X32SSE-NEXT:    movd %eax, %xmm2
 ; X32SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
@@ -352,7 +352,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
 ; X32SSE-NEXT:    retl
 ;
 ; X64SSE2-LABEL: elt5_v8i64:
-; X64SSE2:       # BB#0:
+; X64SSE2:       # %bb.0:
 ; X64SSE2-NEXT:    movq %rdi, %xmm0
 ; X64SSE2-NEXT:    movdqa {{.*#+}} xmm2 = <4,u>
 ; X64SSE2-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
@@ -362,7 +362,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
 ; X64SSE2-NEXT:    retq
 ;
 ; X64SSE4-LABEL: elt5_v8i64:
-; X64SSE4:       # BB#0:
+; X64SSE4:       # %bb.0:
 ; X64SSE4-NEXT:    movdqa {{.*#+}} xmm2 = <4,u>
 ; X64SSE4-NEXT:    pinsrq $1, %rdi, %xmm2
 ; X64SSE4-NEXT:    movaps {{.*#+}} xmm0 = [42,1]
@@ -371,7 +371,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
 ; X64SSE4-NEXT:    retq
 ;
 ; X32AVX2-LABEL: elt5_v8i64:
-; X32AVX2:       # BB#0:
+; X32AVX2:       # %bb.0:
 ; X32AVX2-NEXT:    movl $4, %eax
 ; X32AVX2-NEXT:    vmovd %eax, %xmm0
 ; X32AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -381,7 +381,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
 ; X32AVX2-NEXT:    retl
 ;
 ; X64AVX2-LABEL: elt5_v8i64:
-; X64AVX2:       # BB#0:
+; X64AVX2:       # %bb.0:
 ; X64AVX2-NEXT:    vmovdqa {{.*#+}} ymm0 = <4,u,6,7>
 ; X64AVX2-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm1
 ; X64AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
@@ -389,7 +389,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
 ; X64AVX2-NEXT:    retq
 ;
 ; X32AVX512F-LABEL: elt5_v8i64:
-; X32AVX512F:       # BB#0:
+; X32AVX512F:       # %bb.0:
 ; X32AVX512F-NEXT:    vmovdqa {{.*#+}} ymm0 = [42,0,1,0,2,0,3,0]
 ; X32AVX512F-NEXT:    movl $4, %eax
 ; X32AVX512F-NEXT:    vmovd %eax, %xmm1
@@ -400,7 +400,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
 ; X32AVX512F-NEXT:    retl
 ;
 ; X64AVX512F-LABEL: elt5_v8i64:
-; X64AVX512F:       # BB#0:
+; X64AVX512F:       # %bb.0:
 ; X64AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <42,1,2,3,4,u,6,7>
 ; X64AVX512F-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
 ; X64AVX512F-NEXT:    vpinsrq $1, %rdi, %xmm1, %xmm1
@@ -412,7 +412,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
 
 define <8 x double> @elt1_v8f64(double %x) {
 ; X32SSE-LABEL: elt1_v8f64:
-; X32SSE:       # BB#0:
+; X32SSE:       # %bb.0:
 ; X32SSE-NEXT:    movapd {{.*#+}} xmm0 = <42,u>
 ; X32SSE-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; X32SSE-NEXT:    movaps {{.*#+}} xmm1 = [2.000000e+00,3.000000e+00]
@@ -421,7 +421,7 @@ define <8 x double> @elt1_v8f64(double %
 ; X32SSE-NEXT:    retl
 ;
 ; X64SSE-LABEL: elt1_v8f64:
-; X64SSE:       # BB#0:
+; X64SSE:       # %bb.0:
 ; X64SSE-NEXT:    movaps {{.*#+}} xmm4 = <42,u>
 ; X64SSE-NEXT:    movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0]
 ; X64SSE-NEXT:    movaps {{.*#+}} xmm1 = [2.000000e+00,3.000000e+00]
@@ -431,7 +431,7 @@ define <8 x double> @elt1_v8f64(double %
 ; X64SSE-NEXT:    retq
 ;
 ; X32AVX2-LABEL: elt1_v8f64:
-; X32AVX2:       # BB#0:
+; X32AVX2:       # %bb.0:
 ; X32AVX2-NEXT:    vmovapd {{.*#+}} ymm0 = <42,u,2,3>
 ; X32AVX2-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm0[0],mem[0]
 ; X32AVX2-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
@@ -439,7 +439,7 @@ define <8 x double> @elt1_v8f64(double %
 ; X32AVX2-NEXT:    retl
 ;
 ; X64AVX2-LABEL: elt1_v8f64:
-; X64AVX2:       # BB#0:
+; X64AVX2:       # %bb.0:
 ; X64AVX2-NEXT:    vmovapd {{.*#+}} ymm1 = <42,u,2,3>
 ; X64AVX2-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; X64AVX2-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
@@ -447,14 +447,14 @@ define <8 x double> @elt1_v8f64(double %
 ; X64AVX2-NEXT:    retq
 ;
 ; X32AVX512F-LABEL: elt1_v8f64:
-; X32AVX512F:       # BB#0:
+; X32AVX512F:       # %bb.0:
 ; X32AVX512F-NEXT:    vmovapd {{.*#+}} zmm0 = <42,u,2,3,4,5,6,7>
 ; X32AVX512F-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm0[0],mem[0]
 ; X32AVX512F-NEXT:    vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
 ; X32AVX512F-NEXT:    retl
 ;
 ; X64AVX512F-LABEL: elt1_v8f64:
-; X64AVX512F:       # BB#0:
+; X64AVX512F:       # %bb.0:
 ; X64AVX512F-NEXT:    vmovaps {{.*#+}} zmm1 = <42,u,2,3,4,5,6,7>
 ; X64AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; X64AVX512F-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0

Modified: llvm/trunk/test/CodeGen/X86/insertelement-duplicates.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insertelement-duplicates.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insertelement-duplicates.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insertelement-duplicates.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @PR15298(<4 x float>* nocapture %source, <8 x float>* nocapture %dest) nounwind noinline {
 ; SSE-32-LABEL: PR15298:
-; SSE-32:       # BB#0: # %L.entry
+; SSE-32:       # %bb.0: # %L.entry
 ; SSE-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE-32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; SSE-32-NEXT:    movaps 304(%ecx), %xmm0
@@ -18,7 +18,7 @@ define void @PR15298(<4 x float>* nocapt
 ; SSE-32-NEXT:    retl
 ;
 ; SSE-64-LABEL: PR15298:
-; SSE-64:       # BB#0: # %L.entry
+; SSE-64:       # %bb.0: # %L.entry
 ; SSE-64-NEXT:    movaps 304(%rdi), %xmm0
 ; SSE-64-NEXT:    xorps %xmm1, %xmm1
 ; SSE-64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,1]
@@ -28,7 +28,7 @@ define void @PR15298(<4 x float>* nocapt
 ; SSE-64-NEXT:    retq
 ;
 ; AVX-32-LABEL: PR15298:
-; AVX-32:       # BB#0: # %L.entry
+; AVX-32:       # %bb.0: # %L.entry
 ; AVX-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; AVX-32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; AVX-32-NEXT:    vbroadcastss 304(%ecx), %xmm0
@@ -39,7 +39,7 @@ define void @PR15298(<4 x float>* nocapt
 ; AVX-32-NEXT:    retl
 ;
 ; AVX-64-LABEL: PR15298:
-; AVX-64:       # BB#0: # %L.entry
+; AVX-64:       # %bb.0: # %L.entry
 ; AVX-64-NEXT:    vbroadcastss 304(%rdi), %xmm0
 ; AVX-64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-64-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6,7]

Modified: llvm/trunk/test/CodeGen/X86/insertelement-ones.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insertelement-ones.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insertelement-ones.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insertelement-ones.ll Mon Dec  4 09:18:51 2017
@@ -10,40 +10,40 @@
 
 define <2 x i64> @insert_v2i64_x1(<2 x i64> %a) {
 ; SSE2-LABEL: insert_v2i64_x1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v2i64_x1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v2i64_x1:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v2i64_x1:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: insert_v2i64_x1:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_v2i64_x1:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: insert_v2i64_x1:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
 ; AVX512-NEXT:    retq
@@ -53,41 +53,41 @@ define <2 x i64> @insert_v2i64_x1(<2 x i
 
 define <4 x i64> @insert_v4i64_01x3(<4 x i64> %a) {
 ; SSE2-LABEL: insert_v4i64_01x3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movlpd {{.*#+}} xmm1 = mem[0],xmm1[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v4i64_01x3:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movlpd {{.*#+}} xmm1 = mem[0],xmm1[1]
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v4i64_01x3:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movlpd {{.*#+}} xmm1 = mem[0],xmm1[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v4i64_01x3:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pcmpeqd %xmm2, %xmm2
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: insert_v4i64_01x3:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_v4i64_01x3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: insert_v4i64_01x3:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; AVX512-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
 ; AVX512-NEXT:    retq
@@ -97,7 +97,7 @@ define <4 x i64> @insert_v4i64_01x3(<4 x
 
 define <4 x i32> @insert_v4i32_01x3(<4 x i32> %a) {
 ; SSE2-LABEL: insert_v4i32_01x3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movl $-1, %eax
 ; SSE2-NEXT:    movd %eax, %xmm1
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
@@ -105,7 +105,7 @@ define <4 x i32> @insert_v4i32_01x3(<4 x
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v4i32_01x3:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movl $-1, %eax
 ; SSE3-NEXT:    movd %eax, %xmm1
 ; SSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
@@ -113,7 +113,7 @@ define <4 x i32> @insert_v4i32_01x3(<4 x
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v4i32_01x3:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movl $-1, %eax
 ; SSSE3-NEXT:    movd %eax, %xmm1
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
@@ -121,25 +121,25 @@ define <4 x i32> @insert_v4i32_01x3(<4 x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v4i32_01x3:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: insert_v4i32_01x3:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_v4i32_01x3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: insert_v4i32_01x3:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
 ; AVX512-NEXT:    retq
@@ -149,7 +149,7 @@ define <4 x i32> @insert_v4i32_01x3(<4 x
 
 define <8 x i32> @insert_v8i32_x12345x7(<8 x i32> %a) {
 ; SSE2-LABEL: insert_v8i32_x12345x7:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSE2-NEXT:    movl $-1, %eax
@@ -159,7 +159,7 @@ define <8 x i32> @insert_v8i32_x12345x7(
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v8i32_x12345x7:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
 ; SSE3-NEXT:    movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSE3-NEXT:    movl $-1, %eax
@@ -169,7 +169,7 @@ define <8 x i32> @insert_v8i32_x12345x7(
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v8i32_x12345x7:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
 ; SSSE3-NEXT:    movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSSE3-NEXT:    movl $-1, %eax
@@ -179,27 +179,27 @@ define <8 x i32> @insert_v8i32_x12345x7(
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v8i32_x12345x7:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pcmpeqd %xmm2, %xmm2
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7]
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5],xmm1[6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: insert_v8i32_x12345x7:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_v8i32_x12345x7:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: insert_v8i32_x12345x7:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; AVX512-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
 ; AVX512-NEXT:    retq
@@ -210,34 +210,34 @@ define <8 x i32> @insert_v8i32_x12345x7(
 
 define <8 x i16> @insert_v8i16_x12345x7(<8 x i16> %a) {
 ; SSE2-LABEL: insert_v8i16_x12345x7:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movl $65535, %eax # imm = 0xFFFF
 ; SSE2-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSE2-NEXT:    pinsrw $6, %eax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v8i16_x12345x7:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movl $65535, %eax # imm = 0xFFFF
 ; SSE3-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSE3-NEXT:    pinsrw $6, %eax, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v8i16_x12345x7:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movl $65535, %eax # imm = 0xFFFF
 ; SSSE3-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSSE3-NEXT:    pinsrw $6, %eax, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v8i16_x12345x7:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_v8i16_x12345x7:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
 ; AVX-NEXT:    retq
@@ -248,7 +248,7 @@ define <8 x i16> @insert_v8i16_x12345x7(
 
 define <16 x i16> @insert_v16i16_x12345x789ABCDEx(<16 x i16> %a) {
 ; SSE2-LABEL: insert_v16i16_x12345x789ABCDEx:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movl $65535, %eax # imm = 0xFFFF
 ; SSE2-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSE2-NEXT:    pinsrw $6, %eax, %xmm0
@@ -256,7 +256,7 @@ define <16 x i16> @insert_v16i16_x12345x
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v16i16_x12345x789ABCDEx:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movl $65535, %eax # imm = 0xFFFF
 ; SSE3-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSE3-NEXT:    pinsrw $6, %eax, %xmm0
@@ -264,7 +264,7 @@ define <16 x i16> @insert_v16i16_x12345x
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v16i16_x12345x789ABCDEx:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movl $65535, %eax # imm = 0xFFFF
 ; SSSE3-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSSE3-NEXT:    pinsrw $6, %eax, %xmm0
@@ -272,14 +272,14 @@ define <16 x i16> @insert_v16i16_x12345x
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v16i16_x12345x789ABCDEx:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pcmpeqd %xmm2, %xmm2
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5],xmm2[6],xmm0[7]
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: insert_v16i16_x12345x789ABCDEx:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    vorps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
@@ -289,7 +289,7 @@ define <16 x i16> @insert_v16i16_x12345x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_v16i16_x12345x789ABCDEx:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
 ; AVX2-NEXT:    vpblendvb %ymm1, %ymm0, %ymm2, %ymm0
@@ -300,7 +300,7 @@ define <16 x i16> @insert_v16i16_x12345x
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: insert_v16i16_x12345x789ABCDEx:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX512F-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpblendvb %ymm1, %ymm0, %ymm2, %ymm0
@@ -311,7 +311,7 @@ define <16 x i16> @insert_v16i16_x12345x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: insert_v16i16_x12345x789ABCDEx:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; AVX512VL-NEXT:    movw $1, %ax
 ; AVX512VL-NEXT:    kmovd %eax, %k1
@@ -331,7 +331,7 @@ define <16 x i16> @insert_v16i16_x12345x
 
 define <16 x i8> @insert_v16i8_x123456789ABCDEx(<16 x i8> %a) {
 ; SSE2-LABEL: insert_v16i8_x123456789ABCDEx:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; SSE2-NEXT:    pand %xmm1, %xmm0
 ; SSE2-NEXT:    movl $255, %eax
@@ -344,7 +344,7 @@ define <16 x i8> @insert_v16i8_x12345678
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v16i8_x123456789ABCDEx:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; SSE3-NEXT:    pand %xmm1, %xmm0
 ; SSE3-NEXT:    movl $255, %eax
@@ -357,7 +357,7 @@ define <16 x i8> @insert_v16i8_x12345678
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v16i8_x123456789ABCDEx:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = zero,xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
 ; SSSE3-NEXT:    movl $255, %eax
 ; SSSE3-NEXT:    movd %eax, %xmm1
@@ -370,14 +370,14 @@ define <16 x i8> @insert_v16i8_x12345678
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v16i8_x123456789ABCDEx:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movl $255, %eax
 ; SSE41-NEXT:    pinsrb $0, %eax, %xmm0
 ; SSE41-NEXT:    pinsrb $15, %eax, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_v16i8_x123456789ABCDEx:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    movl $255, %eax
 ; AVX-NEXT:    vpinsrb $0, %eax, %xmm0, %xmm0
 ; AVX-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
@@ -389,7 +389,7 @@ define <16 x i8> @insert_v16i8_x12345678
 
 define <32 x i8> @insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx(<32 x i8> %a) {
 ; SSE2-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; SSE2-NEXT:    pand %xmm2, %xmm0
 ; SSE2-NEXT:    movl $255, %eax
@@ -411,7 +411,7 @@ define <32 x i8> @insert_v32i8_x12345678
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; SSE3-NEXT:    pand %xmm2, %xmm0
 ; SSE3-NEXT:    movl $255, %eax
@@ -433,7 +433,7 @@ define <32 x i8> @insert_v32i8_x12345678
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = zero,xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
 ; SSSE3-NEXT:    movl $255, %eax
 ; SSSE3-NEXT:    movd %eax, %xmm2
@@ -453,7 +453,7 @@ define <32 x i8> @insert_v32i8_x12345678
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movl $255, %eax
 ; SSE41-NEXT:    pinsrb $0, %eax, %xmm0
 ; SSE41-NEXT:    pinsrb $15, %eax, %xmm0
@@ -462,7 +462,7 @@ define <32 x i8> @insert_v32i8_x12345678
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movl $255, %eax
 ; AVX1-NEXT:    vpinsrb $0, %eax, %xmm0, %xmm1
 ; AVX1-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm1
@@ -474,7 +474,7 @@ define <32 x i8> @insert_v32i8_x12345678
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    movl $255, %eax
 ; AVX2-NEXT:    vpinsrb $0, %eax, %xmm0, %xmm1
 ; AVX2-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm1
@@ -486,7 +486,7 @@ define <32 x i8> @insert_v32i8_x12345678
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: insert_v32i8_x123456789ABCDEzGHIJKLMNOPQRSTxx:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    movl $255, %eax
 ; AVX512-NEXT:    vpinsrb $0, %eax, %xmm0, %xmm1
 ; AVX512-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/insertelement-shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insertelement-shuffle.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insertelement-shuffle.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insertelement-shuffle.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define <8 x float> @insert_subvector_256(i16 %x0, i16 %x1, <8 x float> %v) nounwind {
 ; X32_AVX256-LABEL: insert_subvector_256:
-; X32_AVX256:       # BB#0:
+; X32_AVX256:       # %bb.0:
 ; X32_AVX256-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32_AVX256-NEXT:    vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
 ; X32_AVX256-NEXT:    vpbroadcastd %xmm1, %xmm1
@@ -14,7 +14,7 @@ define <8 x float> @insert_subvector_256
 ; X32_AVX256-NEXT:    retl
 ;
 ; X64_AVX256-LABEL: insert_subvector_256:
-; X64_AVX256:       # BB#0:
+; X64_AVX256:       # %bb.0:
 ; X64_AVX256-NEXT:    vmovd %edi, %xmm1
 ; X64_AVX256-NEXT:    vpinsrw $1, %esi, %xmm1, %xmm1
 ; X64_AVX256-NEXT:    vpbroadcastd %xmm1, %xmm1
@@ -22,7 +22,7 @@ define <8 x float> @insert_subvector_256
 ; X64_AVX256-NEXT:    retq
 ;
 ; X32_AVX512-LABEL: insert_subvector_256:
-; X32_AVX512:       # BB#0:
+; X32_AVX512:       # %bb.0:
 ; X32_AVX512-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32_AVX512-NEXT:    vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
 ; X32_AVX512-NEXT:    vpbroadcastd %xmm1, %xmm1
@@ -30,7 +30,7 @@ define <8 x float> @insert_subvector_256
 ; X32_AVX512-NEXT:    retl
 ;
 ; X64_AVX512-LABEL: insert_subvector_256:
-; X64_AVX512:       # BB#0:
+; X64_AVX512:       # %bb.0:
 ; X64_AVX512-NEXT:    vmovd %edi, %xmm1
 ; X64_AVX512-NEXT:    vpinsrw $1, %esi, %xmm1, %xmm1
 ; X64_AVX512-NEXT:    vpbroadcastd %xmm1, %xmm1
@@ -45,7 +45,7 @@ define <8 x float> @insert_subvector_256
 
 define <8 x i64> @insert_subvector_512(i32 %x0, i32 %x1, <8 x i64> %v) nounwind {
 ; X32_AVX256-LABEL: insert_subvector_512:
-; X32_AVX256:       # BB#0:
+; X32_AVX256:       # %bb.0:
 ; X32_AVX256-NEXT:    pushl %ebp
 ; X32_AVX256-NEXT:    movl %esp, %ebp
 ; X32_AVX256-NEXT:    andl $-8, %esp
@@ -61,7 +61,7 @@ define <8 x i64> @insert_subvector_512(i
 ; X32_AVX256-NEXT:    retl
 ;
 ; X64_AVX256-LABEL: insert_subvector_512:
-; X64_AVX256:       # BB#0:
+; X64_AVX256:       # %bb.0:
 ; X64_AVX256-NEXT:    vmovd %edi, %xmm2
 ; X64_AVX256-NEXT:    vpinsrd $1, %esi, %xmm2, %xmm2
 ; X64_AVX256-NEXT:    vmovq %xmm2, %rax
@@ -71,14 +71,14 @@ define <8 x i64> @insert_subvector_512(i
 ; X64_AVX256-NEXT:    retq
 ;
 ; X32_AVX512-LABEL: insert_subvector_512:
-; X32_AVX512:       # BB#0:
+; X32_AVX512:       # %bb.0:
 ; X32_AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; X32_AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,0,1,0,8,0,3,0,4,0,5,0,6,0,7,0]
 ; X32_AVX512-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
 ; X32_AVX512-NEXT:    retl
 ;
 ; X64_AVX512-LABEL: insert_subvector_512:
-; X64_AVX512:       # BB#0:
+; X64_AVX512:       # %bb.0:
 ; X64_AVX512-NEXT:    vmovd %edi, %xmm1
 ; X64_AVX512-NEXT:    vpinsrd $1, %esi, %xmm1, %xmm1
 ; X64_AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,1,8,3,4,5,6,7]
@@ -96,7 +96,7 @@ define <8 x i64> @insert_subvector_512(i
 
 define <8 x i64> @insert_subvector_into_undef(i32 %x0, i32 %x1) nounwind {
 ; X32_AVX256-LABEL: insert_subvector_into_undef:
-; X32_AVX256:       # BB#0:
+; X32_AVX256:       # %bb.0:
 ; X32_AVX256-NEXT:    pushl %ebp
 ; X32_AVX256-NEXT:    movl %esp, %ebp
 ; X32_AVX256-NEXT:    andl $-8, %esp
@@ -116,7 +116,7 @@ define <8 x i64> @insert_subvector_into_
 ; X32_AVX256-NEXT:    retl
 ;
 ; X64_AVX256-LABEL: insert_subvector_into_undef:
-; X64_AVX256:       # BB#0:
+; X64_AVX256:       # %bb.0:
 ; X64_AVX256-NEXT:    vmovd %edi, %xmm0
 ; X64_AVX256-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0
 ; X64_AVX256-NEXT:    vpbroadcastq %xmm0, %ymm0
@@ -124,13 +124,13 @@ define <8 x i64> @insert_subvector_into_
 ; X64_AVX256-NEXT:    retq
 ;
 ; X32_AVX512-LABEL: insert_subvector_into_undef:
-; X32_AVX512:       # BB#0:
+; X32_AVX512:       # %bb.0:
 ; X32_AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32_AVX512-NEXT:    vbroadcastsd %xmm0, %zmm0
 ; X32_AVX512-NEXT:    retl
 ;
 ; X64_AVX512-LABEL: insert_subvector_into_undef:
-; X64_AVX512:       # BB#0:
+; X64_AVX512:       # %bb.0:
 ; X64_AVX512-NEXT:    vmovd %edi, %xmm0
 ; X64_AVX512-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0
 ; X64_AVX512-NEXT:    vpbroadcastq %xmm0, %zmm0

Modified: llvm/trunk/test/CodeGen/X86/insertelement-zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insertelement-zero.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insertelement-zero.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insertelement-zero.ll Mon Dec  4 09:18:51 2017
@@ -8,31 +8,31 @@
 
 define <2 x double> @insert_v2f64_z1(<2 x double> %a) {
 ; SSE2-LABEL: insert_v2f64_z1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorpd %xmm1, %xmm1
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v2f64_z1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    xorpd %xmm1, %xmm1
 ; SSE3-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v2f64_z1:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorpd %xmm1, %xmm1
 ; SSSE3-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v2f64_z1:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    xorpd %xmm1, %xmm1
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_v2f64_z1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; AVX-NEXT:    retq
@@ -42,35 +42,35 @@ define <2 x double> @insert_v2f64_z1(<2
 
 define <4 x double> @insert_v4f64_0zz3(<4 x double> %a) {
 ; SSE2-LABEL: insert_v4f64_0zz3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; SSE2-NEXT:    xorpd %xmm2, %xmm2
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v4f64_0zz3:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; SSE3-NEXT:    xorpd %xmm2, %xmm2
 ; SSE3-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v4f64_0zz3:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; SSSE3-NEXT:    xorpd %xmm2, %xmm2
 ; SSSE3-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v4f64_0zz3:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; SSE41-NEXT:    xorpd %xmm2, %xmm2
 ; SSE41-NEXT:    blendpd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_v4f64_0zz3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
 ; AVX-NEXT:    retq
@@ -81,37 +81,37 @@ define <4 x double> @insert_v4f64_0zz3(<
 
 define <2 x i64> @insert_v2i64_z1(<2 x i64> %a) {
 ; SSE2-LABEL: insert_v2i64_z1:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorpd %xmm1, %xmm1
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v2i64_z1:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    xorpd %xmm1, %xmm1
 ; SSE3-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v2i64_z1:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorpd %xmm1, %xmm1
 ; SSSE3-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v2i64_z1:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: insert_v2i64_z1:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_v2i64_z1:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
 ; AVX2-NEXT:    retq
@@ -121,37 +121,37 @@ define <2 x i64> @insert_v2i64_z1(<2 x i
 
 define <4 x i64> @insert_v4i64_01z3(<4 x i64> %a) {
 ; SSE2-LABEL: insert_v4i64_01z3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorpd %xmm2, %xmm2
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v4i64_01z3:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    xorpd %xmm2, %xmm2
 ; SSE3-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v4i64_01z3:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorpd %xmm2, %xmm2
 ; SSSE3-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v4i64_01z3:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: insert_v4i64_01z3:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_v4i64_01z3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
 ; AVX2-NEXT:    retq
@@ -161,34 +161,34 @@ define <4 x i64> @insert_v4i64_01z3(<4 x
 
 define <4 x float> @insert_v4f32_01z3(<4 x float> %a) {
 ; SSE2-LABEL: insert_v4f32_01z3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v4f32_01z3:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    xorps %xmm1, %xmm1
 ; SSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
 ; SSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v4f32_01z3:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorps %xmm1, %xmm1
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v4f32_01z3:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_v4f32_01z3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
 ; AVX-NEXT:    retq
@@ -198,7 +198,7 @@ define <4 x float> @insert_v4f32_01z3(<4
 
 define <8 x float> @insert_v8f32_z12345z7(<8 x float> %a) {
 ; SSE2-LABEL: insert_v8f32_z12345z7:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
@@ -206,7 +206,7 @@ define <8 x float> @insert_v8f32_z12345z
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v8f32_z12345z7:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    xorps %xmm2, %xmm2
 ; SSE3-NEXT:    movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSE3-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
@@ -214,7 +214,7 @@ define <8 x float> @insert_v8f32_z12345z
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v8f32_z12345z7:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorps %xmm2, %xmm2
 ; SSSE3-NEXT:    movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
@@ -222,14 +222,14 @@ define <8 x float> @insert_v8f32_z12345z
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v8f32_z12345z7:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSE41-NEXT:    blendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_v8f32_z12345z7:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
 ; AVX-NEXT:    retq
@@ -240,40 +240,40 @@ define <8 x float> @insert_v8f32_z12345z
 
 define <4 x i32> @insert_v4i32_01z3(<4 x i32> %a) {
 ; SSE2-LABEL: insert_v4i32_01z3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v4i32_01z3:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    xorps %xmm1, %xmm1
 ; SSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
 ; SSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v4i32_01z3:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorps %xmm1, %xmm1
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
 ; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v4i32_01z3:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: insert_v4i32_01z3:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_v4i32_01z3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
 ; AVX2-NEXT:    retq
@@ -283,7 +283,7 @@ define <4 x i32> @insert_v4i32_01z3(<4 x
 
 define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
 ; SSE2-LABEL: insert_v8i32_z12345z7:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
@@ -292,7 +292,7 @@ define <8 x i32> @insert_v8i32_z12345z7(
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v8i32_z12345z7:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    xorps %xmm2, %xmm2
 ; SSE3-NEXT:    movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSE3-NEXT:    xorps %xmm2, %xmm2
@@ -301,7 +301,7 @@ define <8 x i32> @insert_v8i32_z12345z7(
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v8i32_z12345z7:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorps %xmm2, %xmm2
 ; SSSE3-NEXT:    movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSSE3-NEXT:    xorps %xmm2, %xmm2
@@ -310,14 +310,14 @@ define <8 x i32> @insert_v8i32_z12345z7(
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v8i32_z12345z7:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7]
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5],xmm1[6,7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_v8i32_z12345z7:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
 ; AVX-NEXT:    retq
@@ -328,34 +328,34 @@ define <8 x i32> @insert_v8i32_z12345z7(
 
 define <8 x i16> @insert_v8i16_z12345z7(<8 x i16> %a) {
 ; SSE2-LABEL: insert_v8i16_z12345z7:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorl %eax, %eax
 ; SSE2-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSE2-NEXT:    pinsrw $6, %eax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v8i16_z12345z7:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    xorl %eax, %eax
 ; SSE3-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSE3-NEXT:    pinsrw $6, %eax, %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v8i16_z12345z7:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorl %eax, %eax
 ; SSSE3-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSSE3-NEXT:    pinsrw $6, %eax, %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v8i16_z12345z7:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm1, %xmm1
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_v8i16_z12345z7:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
 ; AVX-NEXT:    retq
@@ -366,7 +366,7 @@ define <8 x i16> @insert_v8i16_z12345z7(
 
 define <16 x i16> @insert_v16i16_z12345z789ABCDEz(<16 x i16> %a) {
 ; SSE2-LABEL: insert_v16i16_z12345z789ABCDEz:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorl %eax, %eax
 ; SSE2-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSE2-NEXT:    pinsrw $6, %eax, %xmm0
@@ -374,7 +374,7 @@ define <16 x i16> @insert_v16i16_z12345z
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v16i16_z12345z789ABCDEz:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    xorl %eax, %eax
 ; SSE3-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSE3-NEXT:    pinsrw $6, %eax, %xmm0
@@ -382,7 +382,7 @@ define <16 x i16> @insert_v16i16_z12345z
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v16i16_z12345z789ABCDEz:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    xorl %eax, %eax
 ; SSSE3-NEXT:    pinsrw $0, %eax, %xmm0
 ; SSSE3-NEXT:    pinsrw $6, %eax, %xmm0
@@ -390,14 +390,14 @@ define <16 x i16> @insert_v16i16_z12345z
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v16i16_z12345z789ABCDEz:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pxor %xmm2, %xmm2
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5],xmm2[6],xmm0[7]
 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_v16i16_z12345z789ABCDEz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %1 = insertelement <16 x i16> %a, i16 0, i32 0
@@ -408,29 +408,29 @@ define <16 x i16> @insert_v16i16_z12345z
 
 define <16 x i8> @insert_v16i8_z123456789ABCDEz(<16 x i8> %a) {
 ; SSE2-LABEL: insert_v16i8_z123456789ABCDEz:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v16i8_z123456789ABCDEz:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v16i8_z123456789ABCDEz:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v16i8_z123456789ABCDEz:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    xorl %eax, %eax
 ; SSE41-NEXT:    pinsrb $0, %eax, %xmm0
 ; SSE41-NEXT:    pinsrb $15, %eax, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: insert_v16i8_z123456789ABCDEz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    xorl %eax, %eax
 ; AVX-NEXT:    vpinsrb $0, %eax, %xmm0, %xmm0
 ; AVX-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
@@ -442,25 +442,25 @@ define <16 x i8> @insert_v16i8_z12345678
 
 define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
 ; SSE2-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE2-NEXT:    retq
 ;
 ; SSE3-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; SSE3:       # BB#0:
+; SSE3:       # %bb.0:
 ; SSE3-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE3-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSE3-NEXT:    retq
 ;
 ; SSSE3-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; SSSE3:       # BB#0:
+; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSSE3-NEXT:    andps {{.*}}(%rip), %xmm1
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    xorl %eax, %eax
 ; SSE41-NEXT:    pinsrb $0, %eax, %xmm0
 ; SSE41-NEXT:    pinsrb $15, %eax, %xmm0
@@ -469,7 +469,7 @@ define <32 x i8> @insert_v32i8_z12345678
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    xorl %eax, %eax
 ; AVX1-NEXT:    vpinsrb $0, %eax, %xmm0, %xmm1
 ; AVX1-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm1
@@ -480,7 +480,7 @@ define <32 x i8> @insert_v32i8_z12345678
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    xorl %eax, %eax
 ; AVX2-NEXT:    vpinsrb $0, %eax, %xmm0, %xmm1
 ; AVX2-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/insertps-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insertps-combine.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insertps-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insertps-combine.ll Mon Dec  4 09:18:51 2017
@@ -5,12 +5,12 @@
 
 define <4 x float> @shuffle_v4f32_0z27(<4 x float> %x, <4 x float> %a) {
 ; SSE-LABEL: shuffle_v4f32_0z27:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v4f32_0z27:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %x, i32 0
@@ -23,12 +23,12 @@ define <4 x float> @shuffle_v4f32_0z27(<
 
 define <4 x float> @shuffle_v4f32_0zz4(<4 x float> %xyzw, <4 x float> %abcd) {
 ; SSE-LABEL: shuffle_v4f32_0zz4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v4f32_0zz4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %xyzw, i32 0
@@ -41,12 +41,12 @@ define <4 x float> @shuffle_v4f32_0zz4(<
 
 define <4 x float> @shuffle_v4f32_0z24(<4 x float> %xyzw, <4 x float> %abcd) {
 ; SSE-LABEL: shuffle_v4f32_0z24:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v4f32_0z24:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[0]
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %xyzw, i32 0
@@ -59,12 +59,12 @@ define <4 x float> @shuffle_v4f32_0z24(<
 
 define <4 x float> @shuffle_v4f32_0zz0(float %a) {
 ; SSE-LABEL: shuffle_v4f32_0zz0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v4f32_0zz0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
 ; AVX-NEXT:    retq
   %vecinit = insertelement <4 x float> undef, float %a, i32 0
@@ -76,12 +76,12 @@ define <4 x float> @shuffle_v4f32_0zz0(f
 
 define <4 x float> @shuffle_v4f32_0z6z(<4 x float> %A, <4 x float> %B) {
 ; SSE-LABEL: shuffle_v4f32_0z6z:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v4f32_0z6z:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
 ; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %A, i32 0
@@ -95,13 +95,13 @@ define <4 x float> @shuffle_v4f32_0z6z(<
 
 define <4 x float> @shuffle_v4f32_z06z(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: shuffle_v4f32_z06z:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v4f32_z06z:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm0[0],xmm1[2],zero
 ; AVX-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 undef, i32 0, i32 6, i32 undef>
@@ -111,12 +111,12 @@ define <4 x float> @shuffle_v4f32_z06z(<
 
 define <4 x float> @shuffle_v4f32_05zz(<4 x float> %a, <4 x float> %b) {
 ; SSE-LABEL: shuffle_v4f32_05zz:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: shuffle_v4f32_05zz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
 ; AVX-NEXT:    retq
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 undef, i32 undef>
@@ -126,12 +126,12 @@ define <4 x float> @shuffle_v4f32_05zz(<
 
 define <4 x float> @insertps_undef_input0(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: insertps_undef_input0:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm1[0],zero,zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insertps_undef_input0:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm1[0],zero,zero
 ; AVX-NEXT:    retq
   %res0 = fadd <4 x float> %a0, <float 1.0, float 1.0, float 1.0, float 1.0>
@@ -142,13 +142,13 @@ define <4 x float> @insertps_undef_input
 
 define <4 x float> @insertps_undef_input1(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: insertps_undef_input1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm1, %xmm1
 ; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insertps_undef_input1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
 ; AVX-NEXT:    retq
@@ -160,7 +160,7 @@ define <4 x float> @insertps_undef_input
 
 define <4 x float> @insertps_zero_from_v2f64(<4 x float> %a0, <2 x double>* %a1) nounwind {
 ; SSE-LABEL: insertps_zero_from_v2f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movapd (%rdi), %xmm1
 ; SSE-NEXT:    addpd {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -168,7 +168,7 @@ define <4 x float> @insertps_zero_from_v
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insertps_zero_from_v2f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovapd (%rdi), %xmm1
 ; AVX-NEXT:    vaddpd {{.*}}(%rip), %xmm1, %xmm1
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -184,7 +184,7 @@ define <4 x float> @insertps_zero_from_v
 
 define <4 x float> @insertps_zero_from_v2i64(<4 x float> %a0, <2 x i64>* %a1) nounwind {
 ; SSE-LABEL: insertps_zero_from_v2i64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa (%rdi), %xmm1
 ; SSE-NEXT:    paddq {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -192,7 +192,7 @@ define <4 x float> @insertps_zero_from_v
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insertps_zero_from_v2i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX-NEXT:    vpaddq {{.*}}(%rip), %xmm1, %xmm1
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -208,7 +208,7 @@ define <4 x float> @insertps_zero_from_v
 
 define <4 x float> @insertps_zero_from_v8i16(<4 x float> %a0, <8 x i16>* %a1) nounwind {
 ; SSE-LABEL: insertps_zero_from_v8i16:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa (%rdi), %xmm1
 ; SSE-NEXT:    paddw {{.*}}(%rip), %xmm1
 ; SSE-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -216,7 +216,7 @@ define <4 x float> @insertps_zero_from_v
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: insertps_zero_from_v8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX-NEXT:    vpaddw {{.*}}(%rip), %xmm1, %xmm1
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
@@ -232,12 +232,12 @@ define <4 x float> @insertps_zero_from_v
 
 define <4 x float> @consecutive_load_insertps_04zz(float* %p) {
 ; SSE-LABEL: consecutive_load_insertps_04zz:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: consecutive_load_insertps_04zz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
   %p0 = getelementptr inbounds float, float* %p, i64 1
@@ -252,12 +252,12 @@ define <4 x float> @consecutive_load_ins
 
 define float @extract_zero_insertps_z0z7(<4 x float> %a0, <4 x float> %a1) {
 ; SSE-LABEL: extract_zero_insertps_z0z7:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    xorps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extract_zero_insertps_z0z7:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 21)
@@ -267,12 +267,12 @@ define float @extract_zero_insertps_z0z7
 
 define float @extract_lane_insertps_5123(<4 x float> %a0, <4 x float> *%p1) {
 ; SSE-LABEL: extract_lane_insertps_5123:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extract_lane_insertps_5123:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
   %a1 = load <4 x float>, <4 x float> *%p1

Modified: llvm/trunk/test/CodeGen/X86/insertps-from-constantpool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insertps-from-constantpool.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insertps-from-constantpool.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insertps-from-constantpool.ll Mon Dec  4 09:18:51 2017
@@ -5,12 +5,12 @@
 
 define <4 x float> @fold_from_constantpool(<4 x float> %a) {
 ; X32-LABEL: fold_from_constantpool:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: fold_from_constantpool:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
 ; X64-NEXT:    retq
   %1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> <float 0.0, float 1.0, float 0.0, float 0.0>, i8 64)

Modified: llvm/trunk/test/CodeGen/X86/insertps-unfold-load-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insertps-unfold-load-bug.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insertps-unfold-load-bug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insertps-unfold-load-bug.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define <4 x float> @insertps_unfold(<4 x float>* %v0, <4 x float>* %v1) {
 ; X32-LABEL: insertps_unfold:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -16,7 +16,7 @@ define <4 x float> @insertps_unfold(<4 x
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: insertps_unfold:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X64-NEXT:    movaps (%rdi), %xmm0
 ; X64-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]

Modified: llvm/trunk/test/CodeGen/X86/jump_sign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/jump_sign.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/jump_sign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/jump_sign.ll Mon Dec  4 09:18:51 2017
@@ -3,11 +3,11 @@
 
 define i32 @func_f(i32 %X) {
 ; CHECK-LABEL: func_f:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    incl %eax
 ; CHECK-NEXT:    jns .LBB0_2
-; CHECK-NEXT:  # BB#1: # %cond_true
+; CHECK-NEXT:  # %bb.1: # %cond_true
 ; CHECK-NEXT:    calll bar
 ; CHECK-NEXT:  .LBB0_2: # %cond_next
 ; CHECK-NEXT:    jmp baz # TAILCALL
@@ -32,7 +32,7 @@ declare i32 @baz(...)
 ; rdar://11355268
 define i32 @func_g(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: func_g:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    subl {{[0-9]+}}(%esp), %eax
@@ -47,7 +47,7 @@ define i32 @func_g(i32 %a, i32 %b) nounw
 ; rdar://10734411
 define i32 @func_h(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: func_h:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    xorl %edx, %edx
@@ -62,7 +62,7 @@ define i32 @func_h(i32 %a, i32 %b) nounw
 
 define i32 @func_i(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: func_i:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    subl {{[0-9]+}}(%esp), %eax
@@ -76,7 +76,7 @@ define i32 @func_i(i32 %a, i32 %b) nounw
 
 define i32 @func_j(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: func_j:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    subl {{[0-9]+}}(%esp), %eax
@@ -90,7 +90,7 @@ define i32 @func_j(i32 %a, i32 %b) nounw
 
 define i32 @func_k(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: func_k:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    xorl %edx, %edx
@@ -106,7 +106,7 @@ define i32 @func_k(i32 %a, i32 %b) nounw
 ; redundant cmp instruction
 define i32 @func_l(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: func_l:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    movl %edx, %eax
@@ -121,7 +121,7 @@ define i32 @func_l(i32 %a, i32 %b) nounw
 
 define i32 @func_m(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: func_m:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    subl %ecx, %eax
@@ -137,14 +137,14 @@ define i32 @func_m(i32 %a, i32 %b) nounw
 ; a swapped sub.
 define i32 @func_l2(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: func_l2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl %eax, %ecx
 ; CHECK-NEXT:    subl %edx, %ecx
 ; CHECK-NEXT:    cmpl %eax, %edx
 ; CHECK-NEXT:    jne .LBB8_2
-; CHECK-NEXT:  # BB#1: # %if.then
+; CHECK-NEXT:  # %bb.1: # %if.then
 ; CHECK-NEXT:    cmovgl %ecx, %eax
 ; CHECK-NEXT:    retl
 ; CHECK-NEXT:  .LBB8_2: # %if.else
@@ -165,12 +165,12 @@ if.else:
 
 define i32 @func_l3(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: func_l3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    subl %ecx, %eax
 ; CHECK-NEXT:    jge .LBB9_2
-; CHECK-NEXT:  # BB#1: # %if.then
+; CHECK-NEXT:  # %bb.1: # %if.then
 ; CHECK-NEXT:    retl
 ; CHECK-NEXT:  .LBB9_2: # %if.else
 ; CHECK-NEXT:    incl %eax
@@ -191,7 +191,7 @@ if.else:
 ; When Movr0 is between sub and cmp, we need to move "Movr0" before sub.
 define i32 @func_l4(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: func_l4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    xorl %edx, %edx
@@ -207,7 +207,7 @@ define i32 @func_l4(i32 %a, i32 %b) noun
 ; rdar://11540023
 define i32 @func_n(i32 %x, i32 %y) nounwind {
 ; CHECK-LABEL: func_n:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    cmpl %ecx, %eax
@@ -222,19 +222,19 @@ define i32 @func_n(i32 %x, i32 %y) nounw
 ; PR://13046
 define void @func_o() nounwind uwtable {
 ; CHECK-LABEL: func_o:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    je .LBB12_1
-; CHECK-NEXT:  # BB#2: # %if.end.i
+; CHECK-NEXT:  # %bb.2: # %if.end.i
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jne .LBB12_5
-; CHECK-NEXT:  # BB#3: # %sw.bb
+; CHECK-NEXT:  # %bb.3: # %sw.bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jne .LBB12_8
-; CHECK-NEXT:  # BB#4: # %if.end29
+; CHECK-NEXT:  # %bb.4: # %if.end29
 ; CHECK-NEXT:    movzwl (%eax), %eax
 ; CHECK-NEXT:    movzwl %ax, %eax
 ; CHECK-NEXT:    imull $52429, %eax, %ecx # imm = 0xCCCD
@@ -247,13 +247,13 @@ define void @func_o() nounwind uwtable {
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    je .LBB12_9
-; CHECK-NEXT:  # BB#10: # %if.else.i104
+; CHECK-NEXT:  # %bb.10: # %if.else.i104
 ; CHECK-NEXT:    retl
 ; CHECK-NEXT:  .LBB12_5: # %sw.default
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jne .LBB12_7
-; CHECK-NEXT:  # BB#6: # %if.then.i96
+; CHECK-NEXT:  # %bb.6: # %if.then.i96
 ; CHECK-NEXT:  .LBB12_1: # %if.then.i
 ; CHECK-NEXT:  .LBB12_9: # %if.then.i103
 ; CHECK-NEXT:  .LBB12_7: # %if.else.i97
@@ -299,7 +299,7 @@ if.else.i104:
 ; rdar://11855129
 define i32 @func_p(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: func_p:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %eax
@@ -316,7 +316,7 @@ define i32 @func_p(i32 %a, i32 %b) nounw
 ; by sbb, we should not optimize cmp away.
 define i32 @func_q(i32 %a0, i32 %a1, i32 %a2) {
 ; CHECK-LABEL: func_q:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl %ecx, %edx
@@ -335,13 +335,13 @@ define i32 @func_q(i32 %a0, i32 %a1, i32
 ; rdar://11873276
 define i8* @func_r(i8* %base, i32* nocapture %offset, i32 %size) nounwind {
 ; CHECK-LABEL: func_r:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    movl (%edx), %ecx
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    subl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    jl .LBB15_2
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl %ecx, (%edx)
 ; CHECK-NEXT:    addl %ecx, %eax
@@ -366,7 +366,7 @@ return:
 ; Test optimizations of dec/inc.
 define i32 @func_dec(i32 %a) nounwind {
 ; CHECK-LABEL: func_dec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    decl %eax
@@ -380,7 +380,7 @@ define i32 @func_dec(i32 %a) nounwind {
 
 define i32 @func_inc(i32 %a) nounwind {
 ; CHECK-LABEL: func_inc:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    incl %eax
@@ -397,7 +397,7 @@ define i32 @func_inc(i32 %a) nounwind {
 @a = common global i32 0, align 4
 define i32 @func_test1(i32 %p1) nounwind uwtable {
 ; CHECK-LABEL: func_test1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl b, %eax
 ; CHECK-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    setb %cl
@@ -405,7 +405,7 @@ define i32 @func_test1(i32 %p1) nounwind
 ; CHECK-NEXT:    movl %eax, %edx
 ; CHECK-NEXT:    andb %cl, %dl
 ; CHECK-NEXT:    je .LBB18_2
-; CHECK-NEXT:  # BB#1: # %if.then
+; CHECK-NEXT:  # %bb.1: # %if.then
 ; CHECK-NEXT:    decl %eax
 ; CHECK-NEXT:    movl %eax, a
 ; CHECK-NEXT:  .LBB18_2: # %if.end

Modified: llvm/trunk/test/CodeGen/X86/known-bits-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/known-bits-vector.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/known-bits-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/known-bits-vector.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define i32 @knownbits_mask_extract_sext(<8 x i16> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_extract_sext:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpand {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpextrw $0, %xmm0, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_extract_sext:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpextrw $0, %xmm0, %eax
 ; X64-NEXT:    retq
@@ -22,7 +22,7 @@ define i32 @knownbits_mask_extract_sext(
 
 define float @knownbits_mask_extract_uitofp(<2 x i64> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_extract_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X32-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
@@ -34,7 +34,7 @@ define float @knownbits_mask_extract_uit
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_extract_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
 ; X64-NEXT:    vmovq %xmm0, %rax
@@ -48,7 +48,7 @@ define float @knownbits_mask_extract_uit
 
 define <4 x float> @knownbits_insert_uitofp(<4 x i32> %a0, i16 %a1, i16 %a2) nounwind {
 ; X32-LABEL: knownbits_insert_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vpinsrd $0, %eax, %xmm0, %xmm0
@@ -58,7 +58,7 @@ define <4 x float> @knownbits_insert_uit
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_insert_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl %di, %eax
 ; X64-NEXT:    movzwl %si, %ecx
 ; X64-NEXT:    vpinsrd $0, %eax, %xmm0, %xmm0
@@ -77,14 +77,14 @@ define <4 x float> @knownbits_insert_uit
 
 define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_shuffle_sext:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpand {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X32-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_shuffle_sext:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X64-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -97,14 +97,14 @@ define <4 x i32> @knownbits_mask_shuffle
 
 define <4 x i32> @knownbits_mask_shuffle_shuffle_sext(<8 x i16> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_shuffle_shuffle_sext:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpand {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X32-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_shuffle_shuffle_sext:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X64-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -118,14 +118,14 @@ define <4 x i32> @knownbits_mask_shuffle
 
 define <4 x i32> @knownbits_mask_shuffle_shuffle_undef_sext(<8 x i16> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_shuffle_shuffle_undef_sext:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpand {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X32-NEXT:    vpmovsxwd %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_shuffle_shuffle_undef_sext:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X64-NEXT:    vpmovsxwd %xmm0, %xmm0
@@ -139,14 +139,14 @@ define <4 x i32> @knownbits_mask_shuffle
 
 define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_shuffle_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vandps {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; X32-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_shuffle_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; X64-NEXT:    vcvtdq2ps %xmm0, %xmm0
@@ -159,7 +159,7 @@ define <4 x float> @knownbits_mask_shuff
 
 define <4 x float> @knownbits_mask_or_shuffle_uitofp(<4 x i32> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_or_shuffle_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vandps {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vorps {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -167,7 +167,7 @@ define <4 x float> @knownbits_mask_or_sh
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_or_shuffle_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vorps {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -182,7 +182,7 @@ define <4 x float> @knownbits_mask_or_sh
 
 define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_xor_shuffle_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vandps {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vxorps {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -190,7 +190,7 @@ define <4 x float> @knownbits_mask_xor_s
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_xor_shuffle_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vxorps {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -205,12 +205,12 @@ define <4 x float> @knownbits_mask_xor_s
 
 define <4 x i32> @knownbits_mask_shl_shuffle_lshr(<4 x i32> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_shl_shuffle_lshr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_shl_shuffle_lshr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 -65536, i32 -7, i32 -7, i32 -65536>
@@ -222,12 +222,12 @@ define <4 x i32> @knownbits_mask_shl_shu
 
 define <4 x i32> @knownbits_mask_ashr_shuffle_lshr(<4 x i32> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_ashr_shuffle_lshr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_ashr_shuffle_lshr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 131071, i32 -1, i32 -1, i32 131071>
@@ -239,12 +239,12 @@ define <4 x i32> @knownbits_mask_ashr_sh
 
 define <4 x i32> @knownbits_mask_mul_shuffle_shl(<4 x i32> %a0, <4 x i32> %a1) nounwind {
 ; X32-LABEL: knownbits_mask_mul_shuffle_shl:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_mul_shuffle_shl:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 -65536, i32 -7, i32 -7, i32 -65536>
@@ -256,12 +256,12 @@ define <4 x i32> @knownbits_mask_mul_shu
 
 define <4 x i32> @knownbits_mask_trunc_shuffle_shl(<4 x i64> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_trunc_shuffle_shl:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_trunc_shuffle_shl:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = and <4 x i64> %a0, <i64 -65536, i64 -7, i64 7, i64 -65536>
@@ -273,12 +273,12 @@ define <4 x i32> @knownbits_mask_trunc_s
 
 define <4 x i32> @knownbits_mask_add_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
 ; X32-LABEL: knownbits_mask_add_shuffle_lshr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_add_shuffle_lshr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
@@ -291,12 +291,12 @@ define <4 x i32> @knownbits_mask_add_shu
 
 define <4 x i32> @knownbits_mask_sub_shuffle_lshr(<4 x i32> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_sub_shuffle_lshr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_sub_shuffle_lshr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 15, i32 -1, i32 -1, i32 15>
@@ -308,12 +308,12 @@ define <4 x i32> @knownbits_mask_sub_shu
 
 define <4 x i32> @knownbits_mask_udiv_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
 ; X32-LABEL: knownbits_mask_udiv_shuffle_lshr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_udiv_shuffle_lshr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
@@ -325,12 +325,12 @@ define <4 x i32> @knownbits_mask_udiv_sh
 
 define <4 x i32> @knownbits_urem_lshr(<4 x i32> %a0) nounwind {
 ; X32-LABEL: knownbits_urem_lshr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_urem_lshr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = urem <4 x i32> %a0, <i32 16, i32 16, i32 16, i32 16>
@@ -340,12 +340,12 @@ define <4 x i32> @knownbits_urem_lshr(<4
 
 define <4 x i32> @knownbits_mask_urem_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
 ; X32-LABEL: knownbits_mask_urem_shuffle_lshr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_urem_shuffle_lshr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
@@ -358,12 +358,12 @@ define <4 x i32> @knownbits_mask_urem_sh
 
 define <4 x i32> @knownbits_mask_srem_shuffle_lshr(<4 x i32> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_srem_shuffle_lshr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_srem_shuffle_lshr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 -32768, i32 -1, i32 -1, i32 -32768>
@@ -375,12 +375,12 @@ define <4 x i32> @knownbits_mask_srem_sh
 
 define <4 x i32> @knownbits_mask_bswap_shuffle_shl(<4 x i32> %a0) nounwind {
 ; X32-LABEL: knownbits_mask_bswap_shuffle_shl:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_bswap_shuffle_shl:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
@@ -393,7 +393,7 @@ declare <4 x i32> @llvm.bswap.v4i32(<4 x
 
 define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) nounwind {
 ; X32-LABEL: knownbits_mask_concat_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vandps {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vandps {{\.LCPI.*}}, %xmm1, %xmm1
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
@@ -403,7 +403,7 @@ define <8 x float> @knownbits_mask_conca
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_concat_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
@@ -420,14 +420,14 @@ define <8 x float> @knownbits_mask_conca
 
 define <4 x float> @knownbits_lshr_bitcast_shuffle_uitofp(<2 x i64> %a0, <4 x i32> %a1) nounwind {
 ; X32-LABEL: knownbits_lshr_bitcast_shuffle_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; X32-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_lshr_bitcast_shuffle_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; X64-NEXT:    vcvtdq2ps %xmm0, %xmm0
@@ -441,7 +441,7 @@ define <4 x float> @knownbits_lshr_bitca
 
 define <4 x float> @knownbits_smax_smin_shuffle_uitofp(<4 x i32> %a0) {
 ; X32-LABEL: knownbits_smax_smin_shuffle_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpminsd {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpmaxsd {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
@@ -453,7 +453,7 @@ define <4 x float> @knownbits_smax_smin_
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_smax_smin_shuffle_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpminsd {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpmaxsd {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
@@ -474,14 +474,14 @@ declare <4 x i32> @llvm.x86.sse41.pminsd
 
 define <4 x float> @knownbits_umin_shuffle_uitofp(<4 x i32> %a0) {
 ; X32-LABEL: knownbits_umin_shuffle_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpminud {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
 ; X32-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_umin_shuffle_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
 ; X64-NEXT:    vcvtdq2ps %xmm0, %xmm0
@@ -496,13 +496,13 @@ declare <4 x i32> @llvm.x86.sse41.pminud
 
 define <4 x i32> @knownbits_umax_shuffle_ashr(<4 x i32> %a0) {
 ; X32-LABEL: knownbits_umax_shuffle_ashr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpmaxud {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_umax_shuffle_ashr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmaxud {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
 ; X64-NEXT:    retq
@@ -514,7 +514,7 @@ define <4 x i32> @knownbits_umax_shuffle
 
 define <4 x float> @knownbits_mask_umax_shuffle_uitofp(<4 x i32> %a0) {
 ; X32-LABEL: knownbits_mask_umax_shuffle_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpand {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpmaxud {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
@@ -522,7 +522,7 @@ define <4 x float> @knownbits_mask_umax_
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_umax_shuffle_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpmaxud {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
@@ -537,12 +537,12 @@ define <4 x float> @knownbits_mask_umax_
 
 define <4 x i32> @knownbits_mask_bitreverse_ashr(<4 x i32> %a0) {
 ; X32-LABEL: knownbits_mask_bitreverse_ashr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_bitreverse_ashr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 -2, i32 -2, i32 -2, i32 -2>
@@ -555,7 +555,7 @@ declare <4 x i32> @llvm.bitreverse.v4i32
 ; If we don't know that the input isn't INT_MIN we can't combine to sitofp
 define <4 x float> @knownbits_abs_uitofp(<4 x i32> %a0) {
 ; X32-LABEL: knownbits_abs_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpabsd %xmm0, %xmm0
 ; X32-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
 ; X32-NEXT:    vpsrld $16, %xmm0, %xmm0
@@ -565,7 +565,7 @@ define <4 x float> @knownbits_abs_uitofp
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_abs_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpabsd %xmm0, %xmm0
 ; X64-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
 ; X64-NEXT:    vpsrld $16, %xmm0, %xmm0
@@ -582,7 +582,7 @@ define <4 x float> @knownbits_abs_uitofp
 
 define <4 x float> @knownbits_or_abs_uitofp(<4 x i32> %a0) {
 ; X32-LABEL: knownbits_or_abs_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpor {{\.LCPI.*}}, %xmm0, %xmm0
 ; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
 ; X32-NEXT:    vpabsd %xmm0, %xmm0
@@ -590,7 +590,7 @@ define <4 x float> @knownbits_or_abs_uit
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_or_abs_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpor {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
 ; X64-NEXT:    vpabsd %xmm0, %xmm0
@@ -607,7 +607,7 @@ define <4 x float> @knownbits_or_abs_uit
 
 define <4 x float> @knownbits_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) nounwind {
 ; X32-LABEL: knownbits_and_select_shuffle_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-16, %esp
@@ -624,7 +624,7 @@ define <4 x float> @knownbits_and_select
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_and_select_shuffle_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vandps {{.*}}(%rip), %xmm2, %xmm2
 ; X64-NEXT:    vandps {{.*}}(%rip), %xmm3, %xmm3
 ; X64-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
@@ -643,7 +643,7 @@ define <4 x float> @knownbits_and_select
 
 define <4 x float> @knownbits_lshr_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) nounwind {
 ; X32-LABEL: knownbits_lshr_and_select_shuffle_uitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-16, %esp
@@ -662,7 +662,7 @@ define <4 x float> @knownbits_lshr_and_s
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_lshr_and_select_shuffle_uitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpsrld $1, %xmm2, %xmm4
 ; X64-NEXT:    vpsrld $5, %xmm2, %xmm2
 ; X64-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]

Modified: llvm/trunk/test/CodeGen/X86/known-bits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/known-bits.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/known-bits.ll (original)
+++ llvm/trunk/test/CodeGen/X86/known-bits.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define void @knownbits_zext_in_reg(i8*) nounwind {
 ; X32-LABEL: knownbits_zext_in_reg:
-; X32:       # BB#0: # %BB
+; X32:       # %bb.0: # %BB
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    pushl %edi
@@ -47,7 +47,7 @@ define void @knownbits_zext_in_reg(i8*)
 ; X32-NEXT:    jmp .LBB0_1
 ;
 ; X64-LABEL: knownbits_zext_in_reg:
-; X64:       # BB#0: # %BB
+; X64:       # %bb.0: # %BB
 ; X64-NEXT:    movzbl (%rdi), %eax
 ; X64-NEXT:    imull $101, %eax, %eax
 ; X64-NEXT:    shrl $14, %eax
@@ -106,12 +106,12 @@ CF246:
 
 define i32 @knownbits_mask_add_lshr(i32 %a0, i32 %a1) nounwind {
 ; X32-LABEL: knownbits_mask_add_lshr:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_mask_add_lshr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    retq
   %1 = and i32 %a0, 32767
@@ -123,7 +123,7 @@ define i32 @knownbits_mask_add_lshr(i32
 
 define i128 @knownbits_mask_addc_shl(i64 %a0, i64 %a1, i64 %a2) nounwind {
 ; X32-LABEL: knownbits_mask_addc_shl:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %edi
 ; X32-NEXT:    pushl %esi
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -147,7 +147,7 @@ define i128 @knownbits_mask_addc_shl(i64
 ; X32-NEXT:    retl $4
 ;
 ; X64-LABEL: knownbits_mask_addc_shl:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andq $-1024, %rdi # imm = 0xFC00
 ; X64-NEXT:    andq $-1024, %rsi # imm = 0xFC00
 ; X64-NEXT:    addq %rdi, %rsi
@@ -169,7 +169,7 @@ define i128 @knownbits_mask_addc_shl(i64
 
 define {i32, i1} @knownbits_uaddo_saddo(i64 %a0, i64 %a1) nounwind {
 ; X32-LABEL: knownbits_uaddo_saddo:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -193,7 +193,7 @@ define {i32, i1} @knownbits_uaddo_saddo(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_uaddo_saddo:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shlq $32, %rdi
 ; X64-NEXT:    shlq $32, %rsi
 ; X64-NEXT:    addq %rdi, %rsi
@@ -220,7 +220,7 @@ define {i32, i1} @knownbits_uaddo_saddo(
 
 define {i32, i1} @knownbits_usubo_ssubo(i64 %a0, i64 %a1) nounwind {
 ; X32-LABEL: knownbits_usubo_ssubo:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -244,7 +244,7 @@ define {i32, i1} @knownbits_usubo_ssubo(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_usubo_ssubo:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    shlq $32, %rdi
 ; X64-NEXT:    shlq $32, %rsi
 ; X64-NEXT:    cmpq %rsi, %rdi

Modified: llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll Mon Dec  4 09:18:51 2017
@@ -4,12 +4,12 @@
 
 define <2 x double> @signbits_sext_v2i64_sitofp_v2f64(i32 %a0, i32 %a1) nounwind {
 ; X32-LABEL: signbits_sext_v2i64_sitofp_v2f64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtdq2pd {{[0-9]+}}(%esp), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: signbits_sext_v2i64_sitofp_v2f64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovd %edi, %xmm0
 ; X64-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0
 ; X64-NEXT:    vcvtdq2pd %xmm0, %xmm0
@@ -24,7 +24,7 @@ define <2 x double> @signbits_sext_v2i64
 
 define <4 x float> @signbits_sext_v4i64_sitofp_v4f32(i8 signext %a0, i16 signext %a1, i32 %a2, i32 %a3) nounwind {
 ; X32-LABEL: signbits_sext_v4i64_sitofp_v4f32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movswl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovd %eax, %xmm0
@@ -46,7 +46,7 @@ define <4 x float> @signbits_sext_v4i64_
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: signbits_sext_v4i64_sitofp_v4f32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movslq %edi, %rax
 ; X64-NEXT:    movslq %esi, %rsi
 ; X64-NEXT:    movslq %edx, %rdx
@@ -74,7 +74,7 @@ define <4 x float> @signbits_sext_v4i64_
 
 define float @signbits_ashr_extract_sitofp_0(<2 x i64> %a0) nounwind {
 ; X32-LABEL: signbits_ashr_extract_sitofp_0:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    vextractps $1, %xmm0, %eax
 ; X32-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
@@ -84,7 +84,7 @@ define float @signbits_ashr_extract_sito
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: signbits_ashr_extract_sitofp_0:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpsrad $31, %xmm0, %xmm1
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
@@ -99,7 +99,7 @@ define float @signbits_ashr_extract_sito
 
 define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind {
 ; X32-LABEL: signbits_ashr_extract_sitofp_1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
 ; X32-NEXT:    vpsrlq $63, %xmm1, %xmm2
@@ -118,7 +118,7 @@ define float @signbits_ashr_extract_sito
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: signbits_ashr_extract_sitofp_1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpsrlq $63, %xmm0, %xmm1
 ; X64-NEXT:    vpsrlq $32, %xmm0, %xmm0
 ; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
@@ -136,7 +136,7 @@ define float @signbits_ashr_extract_sito
 
 define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind {
 ; X32-LABEL: signbits_ashr_shl_extract_sitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
 ; X32-NEXT:    vpsrlq $60, %xmm1, %xmm2
@@ -156,7 +156,7 @@ define float @signbits_ashr_shl_extract_
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: signbits_ashr_shl_extract_sitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpsrlq $60, %xmm0, %xmm1
 ; X64-NEXT:    vpsrlq $61, %xmm0, %xmm0
 ; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
@@ -176,7 +176,7 @@ define float @signbits_ashr_shl_extract_
 
 define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwind {
 ; X32-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -195,7 +195,7 @@ define float @signbits_ashr_insert_ashr_
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    sarq $30, %rdi
 ; X64-NEXT:    vmovq %rsi, %xmm0
 ; X64-NEXT:    vmovq %rdi, %xmm1
@@ -217,7 +217,7 @@ define float @signbits_ashr_insert_ashr_
 
 define <4 x double> @signbits_sext_shuffle_sitofp(<4 x i32> %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: signbits_sext_shuffle_sitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpmovsxdq %xmm0, %xmm1
 ; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X32-NEXT:    vpmovsxdq %xmm0, %xmm0
@@ -230,7 +230,7 @@ define <4 x double> @signbits_sext_shuff
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: signbits_sext_shuffle_sitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpmovsxdq %xmm0, %xmm1
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X64-NEXT:    vpmovsxdq %xmm0, %xmm0
@@ -249,7 +249,7 @@ define <4 x double> @signbits_sext_shuff
 
 define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: signbits_ashr_concat_ashr_extract_sitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpsrad $16, %xmm0, %xmm1
 ; X32-NEXT:    vpsrlq $16, %xmm0, %xmm0
 ; X32-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
@@ -259,7 +259,7 @@ define <2 x double> @signbits_ashr_conca
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: signbits_ashr_concat_ashr_extract_sitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpsrad $16, %xmm0, %xmm1
 ; X64-NEXT:    vpsrlq $16, %xmm0, %xmm0
 ; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
@@ -278,7 +278,7 @@ define <2 x double> @signbits_ashr_conca
 
 define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 x i64> %a1, i32 %a2) nounwind {
 ; X32-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
 ; X32-NEXT:    vpsrlq $60, %xmm2, %xmm3
@@ -306,7 +306,7 @@ define float @signbits_ashr_sext_sextinr
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpsrlq $60, %xmm0, %xmm2
 ; X64-NEXT:    vpsrlq $61, %xmm0, %xmm0
 ; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
@@ -336,7 +336,7 @@ define float @signbits_ashr_sext_sextinr
 
 define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4 x i32> %a1) nounwind {
 ; X32-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
 ; X32-NEXT:    vpsrlq $60, %xmm2, %xmm3
@@ -359,7 +359,7 @@ define float @signbits_ashr_sextvecinreg
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpsrlq $60, %xmm0, %xmm2
 ; X64-NEXT:    vpsrlq $61, %xmm0, %xmm0
 ; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
@@ -386,7 +386,7 @@ define float @signbits_ashr_sextvecinreg
 
 define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2, <4 x i32> %a3) nounwind {
 ; X32-LABEL: signbits_ashr_sext_select_shuffle_sitofp:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-16, %esp
@@ -423,7 +423,7 @@ define <4 x float> @signbits_ashr_sext_s
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: signbits_ashr_sext_select_shuffle_sitofp:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; X64-NEXT:    vpsrlq $63, %xmm4, %xmm5
 ; X64-NEXT:    vpsrlq $33, %xmm4, %xmm4

Modified: llvm/trunk/test/CodeGen/X86/lea-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea-3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea-3.ll Mon Dec  4 09:18:51 2017
@@ -6,25 +6,25 @@
 
 define i64 @test2(i64 %a) {
 ; LNX1-LABEL: test2:
-; LNX1:       # BB#0:
+; LNX1:       # %bb.0:
 ; LNX1-NEXT:    leaq (,%rdi,4), %rax
 ; LNX1-NEXT:    orq %rdi, %rax
 ; LNX1-NEXT:    retq
 ;
 ; LNX2-LABEL: test2:
-; LNX2:       # BB#0:
+; LNX2:       # %bb.0:
 ; LNX2-NEXT:    leaq (,%rdi,4), %rax
 ; LNX2-NEXT:    orq %rdi, %rax
 ; LNX2-NEXT:    retq
 ;
 ; NACL-LABEL: test2:
-; NACL:       # BB#0:
+; NACL:       # %bb.0:
 ; NACL-NEXT:    leaq (,%rdi,4), %rax
 ; NACL-NEXT:    orq %rdi, %rax
 ; NACL-NEXT:    retq
 ;
 ; WIN-LABEL: test2:
-; WIN:       # BB#0:
+; WIN:       # %bb.0:
 ; WIN-NEXT:    leaq (,%rcx,4), %rax
 ; WIN-NEXT:    orq %rcx, %rax
 ; WIN-NEXT:    retq
@@ -35,25 +35,25 @@ define i64 @test2(i64 %a) {
 
 define i32 @test(i32 %a) {
 ; LNX1-LABEL: test:
-; LNX1:       # BB#0:
+; LNX1:       # %bb.0:
 ; LNX1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; LNX1-NEXT:    leal (%rdi,%rdi,2), %eax
 ; LNX1-NEXT:    retq
 ;
 ; LNX2-LABEL: test:
-; LNX2:       # BB#0:
+; LNX2:       # %bb.0:
 ; LNX2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; LNX2-NEXT:    leal (%rdi,%rdi,2), %eax
 ; LNX2-NEXT:    retq
 ;
 ; NACL-LABEL: test:
-; NACL:       # BB#0:
+; NACL:       # %bb.0:
 ; NACL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; NACL-NEXT:    leal (%rdi,%rdi,2), %eax
 ; NACL-NEXT:    retq
 ;
 ; WIN-LABEL: test:
-; WIN:       # BB#0:
+; WIN:       # %bb.0:
 ; WIN-NEXT:    # kill: %ecx<def> %ecx<kill> %rcx<def>
 ; WIN-NEXT:    leal (%rcx,%rcx,2), %eax
 ; WIN-NEXT:    retq
@@ -63,22 +63,22 @@ define i32 @test(i32 %a) {
 
 define i64 @test3(i64 %a) {
 ; LNX1-LABEL: test3:
-; LNX1:       # BB#0:
+; LNX1:       # %bb.0:
 ; LNX1-NEXT:    leaq (,%rdi,8), %rax
 ; LNX1-NEXT:    retq
 ;
 ; LNX2-LABEL: test3:
-; LNX2:       # BB#0:
+; LNX2:       # %bb.0:
 ; LNX2-NEXT:    leaq (,%rdi,8), %rax
 ; LNX2-NEXT:    retq
 ;
 ; NACL-LABEL: test3:
-; NACL:       # BB#0:
+; NACL:       # %bb.0:
 ; NACL-NEXT:    leaq (,%rdi,8), %rax
 ; NACL-NEXT:    retq
 ;
 ; WIN-LABEL: test3:
-; WIN:       # BB#0:
+; WIN:       # %bb.0:
 ; WIN-NEXT:    leaq (,%rcx,8), %rax
 ; WIN-NEXT:    retq
   %tmp2 = shl i64 %a, 3

Modified: llvm/trunk/test/CodeGen/X86/lea-opt-cse1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea-opt-cse1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea-opt-cse1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea-opt-cse1.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @test_func(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr {
 ; X64-LABEL: test_func:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    movl 16(%rdi), %ecx
 ; X64-NEXT:    leal (%rax,%rcx), %edx
@@ -17,7 +17,7 @@ define void @test_func(%struct.SA* nocap
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_func:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8

Modified: llvm/trunk/test/CodeGen/X86/lea-opt-cse2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea-opt-cse2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea-opt-cse2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea-opt-cse2.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
 ; X64-LABEL: foo:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    .p2align 4, 0x90
 ; X64-NEXT:  .LBB0_1: # %loop
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -16,14 +16,14 @@ define void @foo(%struct.SA* nocapture %
 ; X64-NEXT:    movl %edx, 12(%rdi)
 ; X64-NEXT:    decl %esi
 ; X64-NEXT:    jne .LBB0_1
-; X64-NEXT:  # BB#2: # %exit
+; X64-NEXT:  # %bb.2: # %exit
 ; X64-NEXT:    addl %ecx, %eax
 ; X64-NEXT:    leal 1(%rcx,%rax), %eax
 ; X64-NEXT:    movl %eax, 16(%rdi)
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: foo:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    pushl %esi
@@ -41,7 +41,7 @@ define void @foo(%struct.SA* nocapture %
 ; X86-NEXT:    movl %edi, 12(%eax)
 ; X86-NEXT:    decl %ecx
 ; X86-NEXT:    jne .LBB0_1
-; X86-NEXT:  # BB#2: # %exit
+; X86-NEXT:  # %bb.2: # %exit
 ; X86-NEXT:    addl %esi, %edx
 ; X86-NEXT:    leal 1(%esi,%edx), %ecx
 ; X86-NEXT:    movl %ecx, 16(%eax)

Modified: llvm/trunk/test/CodeGen/X86/lea-opt-cse3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea-opt-cse3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea-opt-cse3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea-opt-cse3.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 {
 ; X64-LABEL: foo:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    leal 4(%rdi,%rsi,2), %ecx
@@ -13,7 +13,7 @@ define i32 @foo(i32 %a, i32 %b) local_un
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: foo:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    leal 4(%ecx,%eax,2), %edx
@@ -32,7 +32,7 @@ entry:
 
 define i32 @foo1(i32 %a, i32 %b) local_unnamed_addr #0 {
 ; X64-LABEL: foo1:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    leal 4(%rdi,%rsi,4), %ecx
@@ -41,7 +41,7 @@ define i32 @foo1(i32 %a, i32 %b) local_u
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: foo1:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    leal 4(%ecx,%eax,4), %edx
@@ -60,14 +60,14 @@ entry:
 
 define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 {
 ; X64-LABEL: foo1_mult_basic_blocks:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    leal 4(%rdi,%rsi,4), %ecx
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpl $10, %ecx
 ; X64-NEXT:    je .LBB2_2
-; X64-NEXT:  # BB#1: # %mid
+; X64-NEXT:  # %bb.1: # %mid
 ; X64-NEXT:    leal 4(%rdi,%rsi,8), %eax
 ; X64-NEXT:    imull %eax, %ecx
 ; X64-NEXT:    movl %ecx, %eax
@@ -75,7 +75,7 @@ define i32 @foo1_mult_basic_blocks(i32 %
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: foo1_mult_basic_blocks:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -85,7 +85,7 @@ define i32 @foo1_mult_basic_blocks(i32 %
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    cmpl $10, %ecx
 ; X86-NEXT:    je .LBB2_2
-; X86-NEXT:  # BB#1: # %mid
+; X86-NEXT:  # %bb.1: # %mid
 ; X86-NEXT:    leal 4(%esi,%edx,8), %eax
 ; X86-NEXT:    imull %eax, %ecx
 ; X86-NEXT:    movl %ecx, %eax
@@ -112,14 +112,14 @@ exit:
 
 define i32 @foo1_mult_basic_blocks_illegal_scale(i32 %a, i32 %b) local_unnamed_addr #0 {
 ; X64-LABEL: foo1_mult_basic_blocks_illegal_scale:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    leal 4(%rdi,%rsi,2), %ecx
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpl $10, %ecx
 ; X64-NEXT:    je .LBB3_2
-; X64-NEXT:  # BB#1: # %mid
+; X64-NEXT:  # %bb.1: # %mid
 ; X64-NEXT:    leal 4(%rdi,%rsi,8), %eax
 ; X64-NEXT:    imull %eax, %ecx
 ; X64-NEXT:    movl %ecx, %eax
@@ -127,7 +127,7 @@ define i32 @foo1_mult_basic_blocks_illeg
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: foo1_mult_basic_blocks_illegal_scale:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -137,7 +137,7 @@ define i32 @foo1_mult_basic_blocks_illeg
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    cmpl $10, %ecx
 ; X86-NEXT:    je .LBB3_2
-; X86-NEXT:  # BB#1: # %mid
+; X86-NEXT:  # %bb.1: # %mid
 ; X86-NEXT:    leal 4(%esi,%edx,8), %eax
 ; X86-NEXT:    imull %eax, %ecx
 ; X86-NEXT:    movl %ecx, %eax

Modified: llvm/trunk/test/CodeGen/X86/lea-opt-cse4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea-opt-cse4.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea-opt-cse4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea-opt-cse4.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
 ; X64-LABEL: foo:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl 16(%rdi), %eax
 ; X64-NEXT:    movl (%rdi), %ecx
 ; X64-NEXT:    addl %eax, %ecx
@@ -20,7 +20,7 @@ define void @foo(%struct.SA* nocapture %
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: foo:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
@@ -58,7 +58,7 @@ define void @foo(%struct.SA* nocapture %
 
 define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
 ; X64-LABEL: foo_loop:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    .p2align 4, 0x90
 ; X64-NEXT:  .LBB1_1: # %loop
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -68,7 +68,7 @@ define void @foo_loop(%struct.SA* nocapt
 ; X64-NEXT:    movl %edx, 12(%rdi)
 ; X64-NEXT:    decl %esi
 ; X64-NEXT:    jne .LBB1_1
-; X64-NEXT:  # BB#2: # %exit
+; X64-NEXT:  # %bb.2: # %exit
 ; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    leal 1(%rax,%rcx), %ecx
 ; X64-NEXT:    addl %eax, %ecx
@@ -81,7 +81,7 @@ define void @foo_loop(%struct.SA* nocapt
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: foo_loop:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    pushl %esi
@@ -99,7 +99,7 @@ define void @foo_loop(%struct.SA* nocapt
 ; X86-NEXT:    movl %edi, 12(%eax)
 ; X86-NEXT:    decl %edx
 ; X86-NEXT:    jne .LBB1_1
-; X86-NEXT:  # BB#2: # %exit
+; X86-NEXT:  # %bb.2: # %exit
 ; X86-NEXT:    addl %ecx, %esi
 ; X86-NEXT:    leal 1(%ecx,%esi), %edx
 ; X86-NEXT:    addl %ecx, %edx

Modified: llvm/trunk/test/CodeGen/X86/lea32-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea32-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea32-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea32-schedule.ll Mon Dec  4 09:18:51 2017
@@ -13,13 +13,13 @@
 
 define i32 @test_lea_offset(i32) {
 ; GENERIC-LABEL: test_lea_offset:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; GENERIC-NEXT:    leal -24(%rdi), %eax # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_offset:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ATOM-NEXT:    leal -24(%rdi), %eax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -31,43 +31,43 @@ define i32 @test_lea_offset(i32) {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_offset:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SLM-NEXT:    leal -24(%rdi), %eax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_offset:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SANDY-NEXT:    leal -24(%rdi), %eax # sched: [1:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_offset:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; HASWELL-NEXT:    leal -24(%rdi), %eax # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_offset:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BROADWELL-NEXT:    leal -24(%rdi), %eax # sched: [1:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_offset:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SKYLAKE-NEXT:    leal -24(%rdi), %eax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_offset:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BTVER2-NEXT:    leal -24(%rdi), %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_offset:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ZNVER1-NEXT:    leal -24(%rdi), %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -77,13 +77,13 @@ define i32 @test_lea_offset(i32) {
 
 define i32 @test_lea_offset_big(i32) {
 ; GENERIC-LABEL: test_lea_offset_big:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; GENERIC-NEXT:    leal 1024(%rdi), %eax # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_offset_big:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ATOM-NEXT:    leal 1024(%rdi), %eax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -95,43 +95,43 @@ define i32 @test_lea_offset_big(i32) {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_offset_big:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SLM-NEXT:    leal 1024(%rdi), %eax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_offset_big:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SANDY-NEXT:    leal 1024(%rdi), %eax # sched: [1:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_offset_big:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; HASWELL-NEXT:    leal 1024(%rdi), %eax # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_offset_big:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BROADWELL-NEXT:    leal 1024(%rdi), %eax # sched: [1:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_offset_big:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SKYLAKE-NEXT:    leal 1024(%rdi), %eax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_offset_big:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BTVER2-NEXT:    leal 1024(%rdi), %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_offset_big:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ZNVER1-NEXT:    leal 1024(%rdi), %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -142,14 +142,14 @@ define i32 @test_lea_offset_big(i32) {
 ; Function Attrs: norecurse nounwind readnone uwtable
 define i32 @test_lea_add(i32, i32) {
 ; GENERIC-LABEL: test_lea_add:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; GENERIC-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; GENERIC-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ATOM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ATOM-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:1.00]
@@ -162,49 +162,49 @@ define i32 @test_lea_add(i32, i32) {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SLM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SLM-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SANDY-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SANDY-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; HASWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; HASWELL-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BROADWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BROADWELL-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SKYLAKE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SKYLAKE-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BTVER2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BTVER2-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ZNVER1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ZNVER1-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.25]
@@ -215,7 +215,7 @@ define i32 @test_lea_add(i32, i32) {
 
 define i32 @test_lea_add_offset(i32, i32) {
 ; GENERIC-LABEL: test_lea_add_offset:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; GENERIC-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; GENERIC-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -223,7 +223,7 @@ define i32 @test_lea_add_offset(i32, i32
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add_offset:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ATOM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ATOM-NEXT:    leal 16(%rdi,%rsi), %eax # sched: [1:1.00]
@@ -236,14 +236,14 @@ define i32 @test_lea_add_offset(i32, i32
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add_offset:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SLM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SLM-NEXT:    leal 16(%rdi,%rsi), %eax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add_offset:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SANDY-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SANDY-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -251,7 +251,7 @@ define i32 @test_lea_add_offset(i32, i32
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add_offset:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; HASWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; HASWELL-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -259,7 +259,7 @@ define i32 @test_lea_add_offset(i32, i32
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add_offset:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BROADWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BROADWELL-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -267,7 +267,7 @@ define i32 @test_lea_add_offset(i32, i32
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add_offset:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SKYLAKE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SKYLAKE-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -275,14 +275,14 @@ define i32 @test_lea_add_offset(i32, i32
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add_offset:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BTVER2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BTVER2-NEXT:    leal 16(%rdi,%rsi), %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add_offset:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ZNVER1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ZNVER1-NEXT:    leal 16(%rdi,%rsi), %eax # sched: [1:0.25]
@@ -294,7 +294,7 @@ define i32 @test_lea_add_offset(i32, i32
 
 define i32 @test_lea_add_offset_big(i32, i32) {
 ; GENERIC-LABEL: test_lea_add_offset_big:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; GENERIC-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; GENERIC-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -303,7 +303,7 @@ define i32 @test_lea_add_offset_big(i32,
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add_offset_big:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ATOM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ATOM-NEXT:    leal -4096(%rdi,%rsi), %eax # sched: [1:1.00]
@@ -316,14 +316,14 @@ define i32 @test_lea_add_offset_big(i32,
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add_offset_big:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SLM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SLM-NEXT:    leal -4096(%rdi,%rsi), %eax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add_offset_big:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SANDY-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SANDY-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -332,7 +332,7 @@ define i32 @test_lea_add_offset_big(i32,
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add_offset_big:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; HASWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; HASWELL-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -341,7 +341,7 @@ define i32 @test_lea_add_offset_big(i32,
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add_offset_big:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BROADWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BROADWELL-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -350,7 +350,7 @@ define i32 @test_lea_add_offset_big(i32,
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add_offset_big:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SKYLAKE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SKYLAKE-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.50]
@@ -359,14 +359,14 @@ define i32 @test_lea_add_offset_big(i32,
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add_offset_big:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BTVER2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BTVER2-NEXT:    leal -4096(%rdi,%rsi), %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add_offset_big:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ZNVER1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ZNVER1-NEXT:    leal -4096(%rdi,%rsi), %eax # sched: [1:0.25]
@@ -378,13 +378,13 @@ define i32 @test_lea_add_offset_big(i32,
 
 define i32 @test_lea_mul(i32) {
 ; GENERIC-LABEL: test_lea_mul:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; GENERIC-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_mul:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ATOM-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -396,43 +396,43 @@ define i32 @test_lea_mul(i32) {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_mul:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SLM-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_mul:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SANDY-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_mul:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; HASWELL-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_mul:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BROADWELL-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_mul:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SKYLAKE-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_mul:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BTVER2-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_mul:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ZNVER1-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -442,14 +442,14 @@ define i32 @test_lea_mul(i32) {
 
 define i32 @test_lea_mul_offset(i32) {
 ; GENERIC-LABEL: test_lea_mul_offset:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; GENERIC-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; GENERIC-NEXT:    addl $-32, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_mul_offset:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ATOM-NEXT:    leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -461,47 +461,47 @@ define i32 @test_lea_mul_offset(i32) {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_mul_offset:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SLM-NEXT:    leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_mul_offset:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SANDY-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; SANDY-NEXT:    addl $-32, %eax # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_mul_offset:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; HASWELL-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; HASWELL-NEXT:    addl $-32, %eax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_mul_offset:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BROADWELL-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; BROADWELL-NEXT:    addl $-32, %eax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_mul_offset:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SKYLAKE-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    addl $-32, %eax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_mul_offset:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BTVER2-NEXT:    leal -32(%rdi,%rdi,2), %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_mul_offset:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ZNVER1-NEXT:    leal -32(%rdi,%rdi,2), %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -512,7 +512,7 @@ define i32 @test_lea_mul_offset(i32) {
 
 define i32 @test_lea_mul_offset_big(i32) {
 ; GENERIC-LABEL: test_lea_mul_offset_big:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; GENERIC-NEXT:    leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
 ; GENERIC-NEXT:    addl $10000, %eax # imm = 0x2710
@@ -520,7 +520,7 @@ define i32 @test_lea_mul_offset_big(i32)
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_mul_offset_big:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ATOM-NEXT:    leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -532,13 +532,13 @@ define i32 @test_lea_mul_offset_big(i32)
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_mul_offset_big:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SLM-NEXT:    leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_mul_offset_big:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SANDY-NEXT:    leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
 ; SANDY-NEXT:    addl $10000, %eax # imm = 0x2710
@@ -546,7 +546,7 @@ define i32 @test_lea_mul_offset_big(i32)
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_mul_offset_big:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; HASWELL-NEXT:    leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
 ; HASWELL-NEXT:    addl $10000, %eax # imm = 0x2710
@@ -554,7 +554,7 @@ define i32 @test_lea_mul_offset_big(i32)
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_mul_offset_big:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BROADWELL-NEXT:    leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
 ; BROADWELL-NEXT:    addl $10000, %eax # imm = 0x2710
@@ -562,7 +562,7 @@ define i32 @test_lea_mul_offset_big(i32)
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_mul_offset_big:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SKYLAKE-NEXT:    leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    addl $10000, %eax # imm = 0x2710
@@ -570,13 +570,13 @@ define i32 @test_lea_mul_offset_big(i32)
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_mul_offset_big:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BTVER2-NEXT:    leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_mul_offset_big:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ZNVER1-NEXT:    leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -587,14 +587,14 @@ define i32 @test_lea_mul_offset_big(i32)
 
 define i32 @test_lea_add_scale(i32, i32) {
 ; GENERIC-LABEL: test_lea_add_scale:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; GENERIC-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; GENERIC-NEXT:    leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add_scale:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ATOM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ATOM-NEXT:    leal (%rdi,%rsi,2), %eax # sched: [1:1.00]
@@ -607,49 +607,49 @@ define i32 @test_lea_add_scale(i32, i32)
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add_scale:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SLM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SLM-NEXT:    leal (%rdi,%rsi,2), %eax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add_scale:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SANDY-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SANDY-NEXT:    leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add_scale:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; HASWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; HASWELL-NEXT:    leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add_scale:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BROADWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BROADWELL-NEXT:    leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add_scale:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SKYLAKE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SKYLAKE-NEXT:    leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add_scale:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BTVER2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BTVER2-NEXT:    leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add_scale:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ZNVER1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ZNVER1-NEXT:    leal (%rdi,%rsi,2), %eax # sched: [1:0.25]
@@ -661,7 +661,7 @@ define i32 @test_lea_add_scale(i32, i32)
 
 define i32 @test_lea_add_scale_offset(i32, i32) {
 ; GENERIC-LABEL: test_lea_add_scale_offset:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; GENERIC-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; GENERIC-NEXT:    leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
@@ -669,7 +669,7 @@ define i32 @test_lea_add_scale_offset(i3
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add_scale_offset:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ATOM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ATOM-NEXT:    leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00]
@@ -682,14 +682,14 @@ define i32 @test_lea_add_scale_offset(i3
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add_scale_offset:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SLM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SLM-NEXT:    leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add_scale_offset:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SANDY-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SANDY-NEXT:    leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
@@ -697,7 +697,7 @@ define i32 @test_lea_add_scale_offset(i3
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add_scale_offset:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; HASWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; HASWELL-NEXT:    leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
@@ -705,7 +705,7 @@ define i32 @test_lea_add_scale_offset(i3
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add_scale_offset:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BROADWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BROADWELL-NEXT:    leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
@@ -713,7 +713,7 @@ define i32 @test_lea_add_scale_offset(i3
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add_scale_offset:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SKYLAKE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SKYLAKE-NEXT:    leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
@@ -721,14 +721,14 @@ define i32 @test_lea_add_scale_offset(i3
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add_scale_offset:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BTVER2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BTVER2-NEXT:    leal 96(%rdi,%rsi,4), %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add_scale_offset:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ZNVER1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ZNVER1-NEXT:    leal 96(%rdi,%rsi,4), %eax # sched: [1:0.25]
@@ -741,7 +741,7 @@ define i32 @test_lea_add_scale_offset(i3
 
 define i32 @test_lea_add_scale_offset_big(i32, i32) {
 ; GENERIC-LABEL: test_lea_add_scale_offset_big:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; GENERIC-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; GENERIC-NEXT:    leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
@@ -750,7 +750,7 @@ define i32 @test_lea_add_scale_offset_bi
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add_scale_offset_big:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ATOM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ATOM-NEXT:    leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00]
@@ -763,14 +763,14 @@ define i32 @test_lea_add_scale_offset_bi
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add_scale_offset_big:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SLM-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SLM-NEXT:    leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add_scale_offset_big:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SANDY-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SANDY-NEXT:    leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
@@ -779,7 +779,7 @@ define i32 @test_lea_add_scale_offset_bi
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add_scale_offset_big:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; HASWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; HASWELL-NEXT:    leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
@@ -788,7 +788,7 @@ define i32 @test_lea_add_scale_offset_bi
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add_scale_offset_big:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BROADWELL-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BROADWELL-NEXT:    leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
@@ -797,7 +797,7 @@ define i32 @test_lea_add_scale_offset_bi
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add_scale_offset_big:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; SKYLAKE-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; SKYLAKE-NEXT:    leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
@@ -806,14 +806,14 @@ define i32 @test_lea_add_scale_offset_bi
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add_scale_offset_big:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; BTVER2-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; BTVER2-NEXT:    leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add_scale_offset_big:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; ZNVER1-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; ZNVER1-NEXT:    leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.25]

Modified: llvm/trunk/test/CodeGen/X86/lea64-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea64-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea64-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea64-schedule.ll Mon Dec  4 09:18:51 2017
@@ -13,12 +13,12 @@
 
 define i64 @test_lea_offset(i64) {
 ; GENERIC-LABEL: test_lea_offset:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    leaq -24(%rdi), %rax # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_offset:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    leaq -24(%rdi), %rax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -29,37 +29,37 @@ define i64 @test_lea_offset(i64) {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_offset:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    leaq -24(%rdi), %rax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_offset:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    leaq -24(%rdi), %rax # sched: [1:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_offset:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    leaq -24(%rdi), %rax # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_offset:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    leaq -24(%rdi), %rax # sched: [1:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_offset:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    leaq -24(%rdi), %rax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_offset:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    leaq -24(%rdi), %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_offset:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    leaq -24(%rdi), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = add nsw i64 %0, -24
@@ -68,12 +68,12 @@ define i64 @test_lea_offset(i64) {
 
 define i64 @test_lea_offset_big(i64) {
 ; GENERIC-LABEL: test_lea_offset_big:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    leaq 1024(%rdi), %rax # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_offset_big:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    leaq 1024(%rdi), %rax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -84,37 +84,37 @@ define i64 @test_lea_offset_big(i64) {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_offset_big:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    leaq 1024(%rdi), %rax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_offset_big:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    leaq 1024(%rdi), %rax # sched: [1:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_offset_big:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    leaq 1024(%rdi), %rax # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_offset_big:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    leaq 1024(%rdi), %rax # sched: [1:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_offset_big:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    leaq 1024(%rdi), %rax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_offset_big:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    leaq 1024(%rdi), %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_offset_big:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    leaq 1024(%rdi), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = add nsw i64 %0, 1024
@@ -124,12 +124,12 @@ define i64 @test_lea_offset_big(i64) {
 ; Function Attrs: norecurse nounwind readnone uwtable
 define i64 @test_lea_add(i64, i64) {
 ; GENERIC-LABEL: test_lea_add:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -140,37 +140,37 @@ define i64 @test_lea_add(i64, i64) {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = add nsw i64 %1, %0
@@ -179,13 +179,13 @@ define i64 @test_lea_add(i64, i64) {
 
 define i64 @test_lea_add_offset(i64, i64) {
 ; GENERIC-LABEL: test_lea_add_offset:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; GENERIC-NEXT:    addq $16, %rax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add_offset:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    leaq 16(%rdi,%rsi), %rax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -196,41 +196,41 @@ define i64 @test_lea_add_offset(i64, i64
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add_offset:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    leaq 16(%rdi,%rsi), %rax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add_offset:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; SANDY-NEXT:    addq $16, %rax # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add_offset:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; HASWELL-NEXT:    addq $16, %rax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add_offset:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; BROADWELL-NEXT:    addq $16, %rax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add_offset:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    addq $16, %rax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add_offset:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    leaq 16(%rdi,%rsi), %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add_offset:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    leaq 16(%rdi,%rsi), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = add i64 %0, 16
@@ -240,14 +240,14 @@ define i64 @test_lea_add_offset(i64, i64
 
 define i64 @test_lea_add_offset_big(i64, i64) {
 ; GENERIC-LABEL: test_lea_add_offset_big:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; GENERIC-NEXT:    addq $-4096, %rax # imm = 0xF000
 ; GENERIC-NEXT:    # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add_offset_big:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    leaq -4096(%rdi,%rsi), %rax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -258,45 +258,45 @@ define i64 @test_lea_add_offset_big(i64,
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add_offset_big:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    leaq -4096(%rdi,%rsi), %rax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add_offset_big:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; SANDY-NEXT:    addq $-4096, %rax # imm = 0xF000
 ; SANDY-NEXT:    # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add_offset_big:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; HASWELL-NEXT:    addq $-4096, %rax # imm = 0xF000
 ; HASWELL-NEXT:    # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add_offset_big:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; BROADWELL-NEXT:    addq $-4096, %rax # imm = 0xF000
 ; BROADWELL-NEXT:    # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add_offset_big:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    addq $-4096, %rax # imm = 0xF000
 ; SKYLAKE-NEXT:    # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add_offset_big:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    leaq -4096(%rdi,%rsi), %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add_offset_big:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    leaq -4096(%rdi,%rsi), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = add i64 %0, -4096
@@ -306,12 +306,12 @@ define i64 @test_lea_add_offset_big(i64,
 
 define i64 @test_lea_mul(i64) {
 ; GENERIC-LABEL: test_lea_mul:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_mul:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -322,37 +322,37 @@ define i64 @test_lea_mul(i64) {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_mul:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_mul:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_mul:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_mul:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_mul:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_mul:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_mul:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = mul nsw i64 %0, 3
@@ -361,13 +361,13 @@ define i64 @test_lea_mul(i64) {
 
 define i64 @test_lea_mul_offset(i64) {
 ; GENERIC-LABEL: test_lea_mul_offset:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; GENERIC-NEXT:    addq $-32, %rax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_mul_offset:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    leaq -32(%rdi,%rdi,2), %rax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -378,41 +378,41 @@ define i64 @test_lea_mul_offset(i64) {
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_mul_offset:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    leaq -32(%rdi,%rdi,2), %rax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_mul_offset:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; SANDY-NEXT:    addq $-32, %rax # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_mul_offset:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; HASWELL-NEXT:    addq $-32, %rax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_mul_offset:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; BROADWELL-NEXT:    addq $-32, %rax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_mul_offset:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    addq $-32, %rax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_mul_offset:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    leaq -32(%rdi,%rdi,2), %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_mul_offset:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    leaq -32(%rdi,%rdi,2), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = mul nsw i64 %0, 3
@@ -422,14 +422,14 @@ define i64 @test_lea_mul_offset(i64) {
 
 define i64 @test_lea_mul_offset_big(i64) {
 ; GENERIC-LABEL: test_lea_mul_offset_big:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
 ; GENERIC-NEXT:    addq $10000, %rax # imm = 0x2710
 ; GENERIC-NEXT:    # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_mul_offset_big:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    leaq 10000(%rdi,%rdi,8), %rax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -440,45 +440,45 @@ define i64 @test_lea_mul_offset_big(i64)
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_mul_offset_big:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    leaq 10000(%rdi,%rdi,8), %rax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_mul_offset_big:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
 ; SANDY-NEXT:    addq $10000, %rax # imm = 0x2710
 ; SANDY-NEXT:    # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_mul_offset_big:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
 ; HASWELL-NEXT:    addq $10000, %rax # imm = 0x2710
 ; HASWELL-NEXT:    # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_mul_offset_big:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
 ; BROADWELL-NEXT:    addq $10000, %rax # imm = 0x2710
 ; BROADWELL-NEXT:    # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_mul_offset_big:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    addq $10000, %rax # imm = 0x2710
 ; SKYLAKE-NEXT:    # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_mul_offset_big:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    leaq 10000(%rdi,%rdi,8), %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_mul_offset_big:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    leaq 10000(%rdi,%rdi,8), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = mul nsw i64 %0, 9
@@ -488,12 +488,12 @@ define i64 @test_lea_mul_offset_big(i64)
 
 define i64 @test_lea_add_scale(i64, i64) {
 ; GENERIC-LABEL: test_lea_add_scale:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add_scale:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    leaq (%rdi,%rsi,2), %rax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -504,37 +504,37 @@ define i64 @test_lea_add_scale(i64, i64)
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add_scale:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    leaq (%rdi,%rsi,2), %rax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add_scale:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add_scale:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add_scale:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add_scale:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add_scale:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add_scale:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    leaq (%rdi,%rsi,2), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = shl i64 %1, 1
@@ -544,13 +544,13 @@ define i64 @test_lea_add_scale(i64, i64)
 
 define i64 @test_lea_add_scale_offset(i64, i64) {
 ; GENERIC-LABEL: test_lea_add_scale_offset:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
 ; GENERIC-NEXT:    addq $96, %rax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add_scale_offset:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    leaq 96(%rdi,%rsi,4), %rax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -561,41 +561,41 @@ define i64 @test_lea_add_scale_offset(i6
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add_scale_offset:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    leaq 96(%rdi,%rsi,4), %rax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add_scale_offset:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
 ; SANDY-NEXT:    addq $96, %rax # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add_scale_offset:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
 ; HASWELL-NEXT:    addq $96, %rax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add_scale_offset:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
 ; BROADWELL-NEXT:    addq $96, %rax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add_scale_offset:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    addq $96, %rax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add_scale_offset:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    leaq 96(%rdi,%rsi,4), %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add_scale_offset:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    leaq 96(%rdi,%rsi,4), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = shl i64 %1, 2
@@ -606,14 +606,14 @@ define i64 @test_lea_add_scale_offset(i6
 
 define i64 @test_lea_add_scale_offset_big(i64, i64) {
 ; GENERIC-LABEL: test_lea_add_scale_offset_big:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
 ; GENERIC-NEXT:    addq $-1200, %rax # imm = 0xFB50
 ; GENERIC-NEXT:    # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; ATOM-LABEL: test_lea_add_scale_offset_big:
-; ATOM:       # BB#0:
+; ATOM:       # %bb.0:
 ; ATOM-NEXT:    leaq -1200(%rdi,%rsi,8), %rax # sched: [1:1.00]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
 ; ATOM-NEXT:    nop # sched: [1:0.50]
@@ -624,45 +624,45 @@ define i64 @test_lea_add_scale_offset_bi
 ; ATOM-NEXT:    retq # sched: [79:39.50]
 ;
 ; SLM-LABEL: test_lea_add_scale_offset_big:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    leaq -1200(%rdi,%rsi,8), %rax # sched: [1:1.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_lea_add_scale_offset_big:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
 ; SANDY-NEXT:    addq $-1200, %rax # imm = 0xFB50
 ; SANDY-NEXT:    # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lea_add_scale_offset_big:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
 ; HASWELL-NEXT:    addq $-1200, %rax # imm = 0xFB50
 ; HASWELL-NEXT:    # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lea_add_scale_offset_big:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
 ; BROADWELL-NEXT:    addq $-1200, %rax # imm = 0xFB50
 ; BROADWELL-NEXT:    # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lea_add_scale_offset_big:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
 ; SKYLAKE-NEXT:    addq $-1200, %rax # imm = 0xFB50
 ; SKYLAKE-NEXT:    # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lea_add_scale_offset_big:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    leaq -1200(%rdi,%rsi,8), %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lea_add_scale_offset_big:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    leaq -1200(%rdi,%rsi,8), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = shl i64 %1, 3

Modified: llvm/trunk/test/CodeGen/X86/legalize-shift-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/legalize-shift-64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/legalize-shift-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/legalize-shift-64.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i64 @test1(i32 %xx, i32 %test) nounwind {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    andb $7, %cl
@@ -22,7 +22,7 @@ define i64 @test1(i32 %xx, i32 %test) no
 
 define i64 @test2(i64 %xx, i32 %test) nounwind {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushl %esi
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -41,7 +41,7 @@ define i64 @test2(i64 %xx, i32 %test) no
 
 define i64 @test3(i64 %xx, i32 %test) nounwind {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %cl
@@ -57,7 +57,7 @@ define i64 @test3(i64 %xx, i32 %test) no
 
 define i64 @test4(i64 %xx, i32 %test) nounwind {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %cl
@@ -74,7 +74,7 @@ define i64 @test4(i64 %xx, i32 %test) no
 ; PR14668
 define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushl %ebp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NEXT:    pushl %ebx
@@ -97,7 +97,7 @@ define <2 x i64> @test5(<2 x i64> %A, <2
 ; CHECK-NEXT:    testb $32, %cl
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ebp
 ; CHECK-NEXT:    je .LBB4_2
-; CHECK-NEXT:  # BB#1:
+; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    movl %edi, %esi
 ; CHECK-NEXT:    xorl %edi, %edi
 ; CHECK-NEXT:  .LBB4_2:
@@ -108,7 +108,7 @@ define <2 x i64> @test5(<2 x i64> %A, <2
 ; CHECK-NEXT:    shldl %cl, %edx, %ebp
 ; CHECK-NEXT:    testb $32, %cl
 ; CHECK-NEXT:    je .LBB4_4
-; CHECK-NEXT:  # BB#3:
+; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    movl %ebx, %ebp
 ; CHECK-NEXT:    xorl %ebx, %ebx
 ; CHECK-NEXT:  .LBB4_4:
@@ -128,7 +128,7 @@ define <2 x i64> @test5(<2 x i64> %A, <2
 ; PR16108
 define i32 @test6() {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushl %ebp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NEXT:    .cfi_offset %ebp, -8
@@ -144,7 +144,7 @@ define i32 @test6() {
 ; CHECK-NEXT:    movb $32, %dl
 ; CHECK-NEXT:    testb %dl, %dl
 ; CHECK-NEXT:    jne .LBB5_2
-; CHECK-NEXT:  # BB#1:
+; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    movl %ecx, %eax
 ; CHECK-NEXT:  .LBB5_2:
 ; CHECK-NEXT:    sete %cl
@@ -152,7 +152,7 @@ define i32 @test6() {
 ; CHECK-NEXT:    xorl $1, %eax
 ; CHECK-NEXT:    orl %ecx, %eax
 ; CHECK-NEXT:    je .LBB5_5
-; CHECK-NEXT:  # BB#3: # %if.then
+; CHECK-NEXT:  # %bb.3: # %if.then
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    jmp .LBB5_4
 ; CHECK-NEXT:  .LBB5_5: # %if.end

Modified: llvm/trunk/test/CodeGen/X86/legalize-shl-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/legalize-shl-vec.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/legalize-shl-vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/legalize-shl-vec.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <2 x i256> @test_shl(<2 x i256> %In) {
 ; X32-LABEL: test_shl:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl $0, 60(%eax)
 ; X32-NEXT:    movl $0, 56(%eax)
@@ -25,7 +25,7 @@ define <2 x i256> @test_shl(<2 x i256> %
 ; X32-NEXT:    retl $4
 ;
 ; X64-LABEL: test_shl:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    movaps %xmm0, 48(%rdi)
 ; X64-NEXT:    movaps %xmm0, 32(%rdi)
@@ -40,7 +40,7 @@ define <2 x i256> @test_shl(<2 x i256> %
 
 define <2 x i256> @test_srl(<2 x i256> %In) {
 ; X32-LABEL: test_srl:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl $0, 60(%eax)
 ; X32-NEXT:    movl $0, 56(%eax)
@@ -61,7 +61,7 @@ define <2 x i256> @test_srl(<2 x i256> %
 ; X32-NEXT:    retl $4
 ;
 ; X64-LABEL: test_srl:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    movaps %xmm0, 48(%rdi)
 ; X64-NEXT:    movaps %xmm0, 32(%rdi)
@@ -76,7 +76,7 @@ define <2 x i256> @test_srl(<2 x i256> %
 
 define <2 x i256> @test_sra(<2 x i256> %In) {
 ; X32-LABEL: test_sra:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl %ecx, 60(%eax)
@@ -107,7 +107,7 @@ define <2 x i256> @test_sra(<2 x i256> %
 ; X32-NEXT:    retl $4
 ;
 ; X64-LABEL: test_sra:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
 ; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rdx

Modified: llvm/trunk/test/CodeGen/X86/live-out-reg-info.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/live-out-reg-info.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/live-out-reg-info.ll (original)
+++ llvm/trunk/test/CodeGen/X86/live-out-reg-info.ll Mon Dec  4 09:18:51 2017
@@ -8,13 +8,13 @@ declare void @qux()
 
 define void @foo(i32 %a) {
 ; CHECK-LABEL: foo:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    shrl $23, %edi
 ; CHECK-NEXT:    btl $8, %edi
 ; CHECK-NEXT:    jb .LBB0_2
-; CHECK-NEXT:  # BB#1: # %true
+; CHECK-NEXT:  # %bb.1: # %true
 ; CHECK-NEXT:    callq qux
 ; CHECK-NEXT:  .LBB0_2: # %false
 ; CHECK-NEXT:    popq %rax

Modified: llvm/trunk/test/CodeGen/X86/load-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/load-combine.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/load-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/load-combine.ll Mon Dec  4 09:18:51 2017
@@ -8,13 +8,13 @@
 ; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
 define i32 @load_i32_by_i8(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_i8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl (%eax), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl (%rdi), %eax
 ; CHECK64-NEXT:    retq
   %tmp = bitcast i32* %arg to i8*
@@ -42,26 +42,26 @@ define i32 @load_i32_by_i8(i32* %arg) {
 ; ((i32) p[0] << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
 define i32 @load_i32_by_i8_bswap(i32* %arg) {
 ; BSWAP-LABEL: load_i32_by_i8_bswap:
-; BSWAP:       # BB#0:
+; BSWAP:       # %bb.0:
 ; BSWAP-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BSWAP-NEXT:    movl (%eax), %eax
 ; BSWAP-NEXT:    bswapl %eax
 ; BSWAP-NEXT:    retl
 ;
 ; MOVBE-LABEL: load_i32_by_i8_bswap:
-; MOVBE:       # BB#0:
+; MOVBE:       # %bb.0:
 ; MOVBE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; MOVBE-NEXT:    movbel (%eax), %eax
 ; MOVBE-NEXT:    retl
 ;
 ; BSWAP64-LABEL: load_i32_by_i8_bswap:
-; BSWAP64:       # BB#0:
+; BSWAP64:       # %bb.0:
 ; BSWAP64-NEXT:    movl (%rdi), %eax
 ; BSWAP64-NEXT:    bswapl %eax
 ; BSWAP64-NEXT:    retq
 ;
 ; MOVBE64-LABEL: load_i32_by_i8_bswap:
-; MOVBE64:       # BB#0:
+; MOVBE64:       # %bb.0:
 ; MOVBE64-NEXT:    movbel (%rdi), %eax
 ; MOVBE64-NEXT:    retq
   %tmp = bitcast i32* %arg to i8*
@@ -89,13 +89,13 @@ define i32 @load_i32_by_i8_bswap(i32* %a
 ; (i32) p[0] | ((i32) p[1] << 16)
 define i32 @load_i32_by_i16(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_i16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl (%eax), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i16:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl (%rdi), %eax
 ; CHECK64-NEXT:    retq
   %tmp = bitcast i32* %arg to i16*
@@ -114,13 +114,13 @@ define i32 @load_i32_by_i16(i32* %arg) {
 ; (i32) p_16[0] | ((i32) p[2] << 16) | ((i32) p[3] << 24)
 define i32 @load_i32_by_i16_i8(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_i16_i8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl (%eax), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i16_i8:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl (%rdi), %eax
 ; CHECK64-NEXT:    retq
   %tmp = bitcast i32* %arg to i16*
@@ -145,13 +145,13 @@ define i32 @load_i32_by_i16_i8(i32* %arg
 ; (i32) ((i16) p[0] | ((i16) p[1] << 8)) | (((i32) ((i16) p[3] | ((i16) p[4] << 8)) << 16)
 define i32 @load_i32_by_i16_by_i8(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_i16_by_i8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl (%eax), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i16_by_i8:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl (%rdi), %eax
 ; CHECK64-NEXT:    retq
   %tmp = bitcast i32* %arg to i8*
@@ -181,26 +181,26 @@ define i32 @load_i32_by_i16_by_i8(i32* %
 ; ((i32) (((i16) p[0] << 8) | (i16) p[1]) << 16) | (i32) (((i16) p[3] << 8) | (i16) p[4])
 define i32 @load_i32_by_i16_by_i8_bswap(i32* %arg) {
 ; BSWAP-LABEL: load_i32_by_i16_by_i8_bswap:
-; BSWAP:       # BB#0:
+; BSWAP:       # %bb.0:
 ; BSWAP-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BSWAP-NEXT:    movl (%eax), %eax
 ; BSWAP-NEXT:    bswapl %eax
 ; BSWAP-NEXT:    retl
 ;
 ; MOVBE-LABEL: load_i32_by_i16_by_i8_bswap:
-; MOVBE:       # BB#0:
+; MOVBE:       # %bb.0:
 ; MOVBE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; MOVBE-NEXT:    movbel (%eax), %eax
 ; MOVBE-NEXT:    retl
 ;
 ; BSWAP64-LABEL: load_i32_by_i16_by_i8_bswap:
-; BSWAP64:       # BB#0:
+; BSWAP64:       # %bb.0:
 ; BSWAP64-NEXT:    movl (%rdi), %eax
 ; BSWAP64-NEXT:    bswapl %eax
 ; BSWAP64-NEXT:    retq
 ;
 ; MOVBE64-LABEL: load_i32_by_i16_by_i8_bswap:
-; MOVBE64:       # BB#0:
+; MOVBE64:       # %bb.0:
 ; MOVBE64-NEXT:    movbel (%rdi), %eax
 ; MOVBE64-NEXT:    retq
   %tmp = bitcast i32* %arg to i8*
@@ -230,14 +230,14 @@ define i32 @load_i32_by_i16_by_i8_bswap(
 ; (i64) p[0] | ((i64) p[1] << 8) | ((i64) p[2] << 16) | ((i64) p[3] << 24) | ((i64) p[4] << 32) | ((i64) p[5] << 40) | ((i64) p[6] << 48) | ((i64) p[7] << 56)
 define i64 @load_i64_by_i8(i64* %arg) {
 ; CHECK-LABEL: load_i64_by_i8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl (%ecx), %eax
 ; CHECK-NEXT:    movl 4(%ecx), %edx
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i64_by_i8:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movq (%rdi), %rax
 ; CHECK64-NEXT:    retq
   %tmp = bitcast i64* %arg to i8*
@@ -285,7 +285,7 @@ define i64 @load_i64_by_i8(i64* %arg) {
 ; ((i64) p[0] << 56) | ((i64) p[1] << 48) | ((i64) p[2] << 40) | ((i64) p[3] << 32) | ((i64) p[4] << 24) | ((i64) p[5] << 16) | ((i64) p[6] << 8) | (i64) p[7]
 define i64 @load_i64_by_i8_bswap(i64* %arg) {
 ; BSWAP-LABEL: load_i64_by_i8_bswap:
-; BSWAP:       # BB#0:
+; BSWAP:       # %bb.0:
 ; BSWAP-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BSWAP-NEXT:    movl (%eax), %edx
 ; BSWAP-NEXT:    movl 4(%eax), %eax
@@ -294,20 +294,20 @@ define i64 @load_i64_by_i8_bswap(i64* %a
 ; BSWAP-NEXT:    retl
 ;
 ; MOVBE-LABEL: load_i64_by_i8_bswap:
-; MOVBE:       # BB#0:
+; MOVBE:       # %bb.0:
 ; MOVBE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; MOVBE-NEXT:    movbel 4(%ecx), %eax
 ; MOVBE-NEXT:    movbel (%ecx), %edx
 ; MOVBE-NEXT:    retl
 ;
 ; BSWAP64-LABEL: load_i64_by_i8_bswap:
-; BSWAP64:       # BB#0:
+; BSWAP64:       # %bb.0:
 ; BSWAP64-NEXT:    movq (%rdi), %rax
 ; BSWAP64-NEXT:    bswapq %rax
 ; BSWAP64-NEXT:    retq
 ;
 ; MOVBE64-LABEL: load_i64_by_i8_bswap:
-; MOVBE64:       # BB#0:
+; MOVBE64:       # %bb.0:
 ; MOVBE64-NEXT:    movbeq (%rdi), %rax
 ; MOVBE64-NEXT:    retq
   %tmp = bitcast i64* %arg to i8*
@@ -358,7 +358,7 @@ define i64 @load_i64_by_i8_bswap(i64* %a
 ; x | res
 define i32 @load_i32_by_i8_bswap_uses(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_i8_bswap_uses:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushl %esi
 ; CHECK-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NEXT:    .cfi_offset %esi, -8
@@ -379,7 +379,7 @@ define i32 @load_i32_by_i8_bswap_uses(i3
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_bswap_uses:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movzbl (%rdi), %eax
 ; CHECK64-NEXT:    shll $24, %eax
 ; CHECK64-NEXT:    movzbl 1(%rdi), %ecx
@@ -422,7 +422,7 @@ define i32 @load_i32_by_i8_bswap_uses(i3
 ; ((i32) p0 << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
 define i32 @load_i32_by_i8_bswap_volatile(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_i8_bswap_volatile:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movzbl (%eax), %ecx
 ; CHECK-NEXT:    shll $24, %ecx
@@ -437,7 +437,7 @@ define i32 @load_i32_by_i8_bswap_volatil
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_bswap_volatile:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movzbl (%rdi), %eax
 ; CHECK64-NEXT:    shll $24, %eax
 ; CHECK64-NEXT:    movzbl 1(%rdi), %ecx
@@ -478,7 +478,7 @@ define i32 @load_i32_by_i8_bswap_volatil
 ; res1 | res2
 define i32 @load_i32_by_i8_bswap_store_in_between(i32* %arg, i32* %arg1) {
 ; CHECK-LABEL: load_i32_by_i8_bswap_store_in_between:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushl %esi
 ; CHECK-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NEXT:    .cfi_offset %esi, -8
@@ -499,7 +499,7 @@ define i32 @load_i32_by_i8_bswap_store_i
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_bswap_store_in_between:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movzbl (%rdi), %eax
 ; CHECK64-NEXT:    shll $24, %eax
 ; CHECK64-NEXT:    movzbl 1(%rdi), %ecx
@@ -540,7 +540,7 @@ define i32 @load_i32_by_i8_bswap_store_i
 ; ((i32) p[0] << 24) | ((i32) q[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
 define i32 @load_i32_by_i8_bswap_unrelated_load(i32* %arg, i32* %arg1) {
 ; CHECK-LABEL: load_i32_by_i8_bswap_unrelated_load:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movzbl (%ecx), %edx
@@ -556,7 +556,7 @@ define i32 @load_i32_by_i8_bswap_unrelat
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_bswap_unrelated_load:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movzbl (%rdi), %eax
 ; CHECK64-NEXT:    shll $24, %eax
 ; CHECK64-NEXT:    movzbl 1(%rsi), %ecx
@@ -595,13 +595,13 @@ define i32 @load_i32_by_i8_bswap_unrelat
 ; (i32) p[1] | ((i32) p[2] << 8) | ((i32) p[3] << 16) | ((i32) p[4] << 24)
 define i32 @load_i32_by_i8_nonzero_offset(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_i8_nonzero_offset:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl 1(%eax), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_nonzero_offset:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl 1(%rdi), %eax
 ; CHECK64-NEXT:    retq
   %tmp = bitcast i32* %arg to i8*
@@ -630,13 +630,13 @@ define i32 @load_i32_by_i8_nonzero_offse
 ; (i32) p[-4] | ((i32) p[-3] << 8) | ((i32) p[-2] << 16) | ((i32) p[-1] << 24)
 define i32 @load_i32_by_i8_neg_offset(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_i8_neg_offset:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl -4(%eax), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_neg_offset:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl -4(%rdi), %eax
 ; CHECK64-NEXT:    retq
   %tmp = bitcast i32* %arg to i8*
@@ -665,26 +665,26 @@ define i32 @load_i32_by_i8_neg_offset(i3
 ; (i32) p[4] | ((i32) p[3] << 8) | ((i32) p[2] << 16) | ((i32) p[1] << 24)
 define i32 @load_i32_by_i8_nonzero_offset_bswap(i32* %arg) {
 ; BSWAP-LABEL: load_i32_by_i8_nonzero_offset_bswap:
-; BSWAP:       # BB#0:
+; BSWAP:       # %bb.0:
 ; BSWAP-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BSWAP-NEXT:    movl 1(%eax), %eax
 ; BSWAP-NEXT:    bswapl %eax
 ; BSWAP-NEXT:    retl
 ;
 ; MOVBE-LABEL: load_i32_by_i8_nonzero_offset_bswap:
-; MOVBE:       # BB#0:
+; MOVBE:       # %bb.0:
 ; MOVBE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; MOVBE-NEXT:    movbel 1(%eax), %eax
 ; MOVBE-NEXT:    retl
 ;
 ; BSWAP64-LABEL: load_i32_by_i8_nonzero_offset_bswap:
-; BSWAP64:       # BB#0:
+; BSWAP64:       # %bb.0:
 ; BSWAP64-NEXT:    movl 1(%rdi), %eax
 ; BSWAP64-NEXT:    bswapl %eax
 ; BSWAP64-NEXT:    retq
 ;
 ; MOVBE64-LABEL: load_i32_by_i8_nonzero_offset_bswap:
-; MOVBE64:       # BB#0:
+; MOVBE64:       # %bb.0:
 ; MOVBE64-NEXT:    movbel 1(%rdi), %eax
 ; MOVBE64-NEXT:    retq
   %tmp = bitcast i32* %arg to i8*
@@ -713,26 +713,26 @@ define i32 @load_i32_by_i8_nonzero_offse
 ; (i32) p[-1] | ((i32) p[-2] << 8) | ((i32) p[-3] << 16) | ((i32) p[-4] << 24)
 define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
 ; BSWAP-LABEL: load_i32_by_i8_neg_offset_bswap:
-; BSWAP:       # BB#0:
+; BSWAP:       # %bb.0:
 ; BSWAP-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BSWAP-NEXT:    movl -4(%eax), %eax
 ; BSWAP-NEXT:    bswapl %eax
 ; BSWAP-NEXT:    retl
 ;
 ; MOVBE-LABEL: load_i32_by_i8_neg_offset_bswap:
-; MOVBE:       # BB#0:
+; MOVBE:       # %bb.0:
 ; MOVBE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; MOVBE-NEXT:    movbel -4(%eax), %eax
 ; MOVBE-NEXT:    retl
 ;
 ; BSWAP64-LABEL: load_i32_by_i8_neg_offset_bswap:
-; BSWAP64:       # BB#0:
+; BSWAP64:       # %bb.0:
 ; BSWAP64-NEXT:    movl -4(%rdi), %eax
 ; BSWAP64-NEXT:    bswapl %eax
 ; BSWAP64-NEXT:    retq
 ;
 ; MOVBE64-LABEL: load_i32_by_i8_neg_offset_bswap:
-; MOVBE64:       # BB#0:
+; MOVBE64:       # %bb.0:
 ; MOVBE64-NEXT:    movbel -4(%rdi), %eax
 ; MOVBE64-NEXT:    retq
   %tmp = bitcast i32* %arg to i8*
@@ -761,7 +761,7 @@ define i32 @load_i32_by_i8_neg_offset_bs
 ; ((i32) p[i] << 24) | ((i32) p[i + 1] << 16) | ((i32) p[i + 2] << 8) | (i32) p[i + 3]
 define i32 @load_i32_by_i8_bswap_base_index_offset(i32* %arg, i32 %arg1) {
 ; BSWAP-LABEL: load_i32_by_i8_bswap_base_index_offset:
-; BSWAP:       # BB#0:
+; BSWAP:       # %bb.0:
 ; BSWAP-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BSWAP-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; BSWAP-NEXT:    movl (%ecx,%eax), %eax
@@ -769,21 +769,21 @@ define i32 @load_i32_by_i8_bswap_base_in
 ; BSWAP-NEXT:    retl
 ;
 ; MOVBE-LABEL: load_i32_by_i8_bswap_base_index_offset:
-; MOVBE:       # BB#0:
+; MOVBE:       # %bb.0:
 ; MOVBE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; MOVBE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; MOVBE-NEXT:    movbel (%ecx,%eax), %eax
 ; MOVBE-NEXT:    retl
 ;
 ; BSWAP64-LABEL: load_i32_by_i8_bswap_base_index_offset:
-; BSWAP64:       # BB#0:
+; BSWAP64:       # %bb.0:
 ; BSWAP64-NEXT:    movslq %esi, %rax
 ; BSWAP64-NEXT:    movl (%rdi,%rax), %eax
 ; BSWAP64-NEXT:    bswapl %eax
 ; BSWAP64-NEXT:    retq
 ;
 ; MOVBE64-LABEL: load_i32_by_i8_bswap_base_index_offset:
-; MOVBE64:       # BB#0:
+; MOVBE64:       # %bb.0:
 ; MOVBE64-NEXT:    movslq %esi, %rax
 ; MOVBE64-NEXT:    movbel (%rdi,%rax), %eax
 ; MOVBE64-NEXT:    retq
@@ -815,14 +815,14 @@ define i32 @load_i32_by_i8_bswap_base_in
 ; Verify that we don't crash handling shl i32 %conv57, 32
 define void @shift_i32_by_32(i8* %src1, i8* %src2, i64* %dst) {
 ; CHECK-LABEL: shift_i32_by_32:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl $-1, 4(%eax)
 ; CHECK-NEXT:    movl $-1, (%eax)
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: shift_i32_by_32:
-; CHECK64:       # BB#0: # %entry
+; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    movq $-1, (%rdx)
 ; CHECK64-NEXT:    retq
 entry:
@@ -846,26 +846,26 @@ declare i16 @llvm.bswap.i16(i16)
 ; (i32) bswap(p[1]) | (i32) bswap(p[0] << 16)
 define i32 @load_i32_by_bswap_i16(i32* %arg) {
 ; BSWAP-LABEL: load_i32_by_bswap_i16:
-; BSWAP:       # BB#0:
+; BSWAP:       # %bb.0:
 ; BSWAP-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; BSWAP-NEXT:    movl (%eax), %eax
 ; BSWAP-NEXT:    bswapl %eax
 ; BSWAP-NEXT:    retl
 ;
 ; MOVBE-LABEL: load_i32_by_bswap_i16:
-; MOVBE:       # BB#0:
+; MOVBE:       # %bb.0:
 ; MOVBE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; MOVBE-NEXT:    movbel (%eax), %eax
 ; MOVBE-NEXT:    retl
 ;
 ; BSWAP64-LABEL: load_i32_by_bswap_i16:
-; BSWAP64:       # BB#0:
+; BSWAP64:       # %bb.0:
 ; BSWAP64-NEXT:    movl (%rdi), %eax
 ; BSWAP64-NEXT:    bswapl %eax
 ; BSWAP64-NEXT:    retq
 ;
 ; MOVBE64-LABEL: load_i32_by_bswap_i16:
-; MOVBE64:       # BB#0:
+; MOVBE64:       # %bb.0:
 ; MOVBE64-NEXT:    movbel (%rdi), %eax
 ; MOVBE64-NEXT:    retq
   %tmp = bitcast i32* %arg to i16*
@@ -885,13 +885,13 @@ define i32 @load_i32_by_bswap_i16(i32* %
 ; (i32) p[0] | (sext(p[1] << 16) to i32)
 define i32 @load_i32_by_sext_i16(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_sext_i16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl (%eax), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_sext_i16:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl (%rdi), %eax
 ; CHECK64-NEXT:    retq
   %tmp = bitcast i32* %arg to i16*
@@ -910,14 +910,14 @@ define i32 @load_i32_by_sext_i16(i32* %a
 ; (i32) p[i] | ((i32) p[i + 1] << 8) | ((i32) p[i + 2] << 16) | ((i32) p[i + 3] << 24)
 define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
 ; CHECK-LABEL: load_i32_by_i8_base_offset_index:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl 12(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_base_offset_index:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl %esi, %eax
 ; CHECK64-NEXT:    movl 12(%rdi,%rax), %eax
 ; CHECK64-NEXT:    retq
@@ -955,14 +955,14 @@ define i32 @load_i32_by_i8_base_offset_i
 ; (i32) p[i + 1] | ((i32) p[i + 2] << 8) | ((i32) p[i + 3] << 16) | ((i32) p[i + 4] << 24)
 define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
 ; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl 13(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_base_offset_index_2:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl %esi, %eax
 ; CHECK64-NEXT:    movl 13(%rdi,%rax), %eax
 ; CHECK64-NEXT:    retq
@@ -1011,14 +1011,14 @@ define i32 @load_i32_by_i8_base_offset_i
 ; to zext and aext loads.
 define i32 @load_i32_by_i8_zaext_loads(i8* %arg, i32 %arg1) {
 ; CHECK-LABEL: load_i32_by_i8_zaext_loads:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl 12(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_zaext_loads:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl %esi, %eax
 ; CHECK64-NEXT:    movl 12(%rdi,%rax), %eax
 ; CHECK64-NEXT:    retq
@@ -1067,14 +1067,14 @@ define i32 @load_i32_by_i8_zaext_loads(i
 ; (i32) p0[12] | ((i32) p1[12] << 8) | ((i32) p2[12] << 16) | ((i32) p3[12] << 24)
 define i32 @load_i32_by_i8_zsext_loads(i8* %arg, i32 %arg1) {
 ; CHECK-LABEL: load_i32_by_i8_zsext_loads:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl 12(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_zsext_loads:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movl %esi, %eax
 ; CHECK64-NEXT:    movl 12(%rdi,%rax), %eax
 ; CHECK64-NEXT:    retq
@@ -1115,7 +1115,7 @@ define i32 @load_i32_by_i8_zsext_loads(i
 ; (i32) p[0] | ((i32) p[1] << 8)
 define i32 @zext_load_i32_by_i8(i32* %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movzbl (%eax), %ecx
 ; CHECK-NEXT:    movzbl 1(%eax), %eax
@@ -1124,7 +1124,7 @@ define i32 @zext_load_i32_by_i8(i32* %ar
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: zext_load_i32_by_i8:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movzbl (%rdi), %ecx
 ; CHECK64-NEXT:    movzbl 1(%rdi), %eax
 ; CHECK64-NEXT:    shll $8, %eax
@@ -1146,7 +1146,7 @@ define i32 @zext_load_i32_by_i8(i32* %ar
 ; ((i32) p[0] << 8) | ((i32) p[1] << 16)
 define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_shl_8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movzbl (%eax), %ecx
 ; CHECK-NEXT:    shll $8, %ecx
@@ -1156,7 +1156,7 @@ define i32 @zext_load_i32_by_i8_shl_8(i3
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: zext_load_i32_by_i8_shl_8:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movzbl (%rdi), %ecx
 ; CHECK64-NEXT:    shll $8, %ecx
 ; CHECK64-NEXT:    movzbl 1(%rdi), %eax
@@ -1180,7 +1180,7 @@ define i32 @zext_load_i32_by_i8_shl_8(i3
 ; ((i32) p[0] << 16) | ((i32) p[1] << 24)
 define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_shl_16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movzbl (%eax), %ecx
 ; CHECK-NEXT:    shll $16, %ecx
@@ -1190,7 +1190,7 @@ define i32 @zext_load_i32_by_i8_shl_16(i
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: zext_load_i32_by_i8_shl_16:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movzbl (%rdi), %ecx
 ; CHECK64-NEXT:    shll $16, %ecx
 ; CHECK64-NEXT:    movzbl 1(%rdi), %eax
@@ -1214,7 +1214,7 @@ define i32 @zext_load_i32_by_i8_shl_16(i
 ; (i32) p[1] | ((i32) p[0] << 8)
 define i32 @zext_load_i32_by_i8_bswap(i32* %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_bswap:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movzbl 1(%eax), %ecx
 ; CHECK-NEXT:    movzbl (%eax), %eax
@@ -1223,7 +1223,7 @@ define i32 @zext_load_i32_by_i8_bswap(i3
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: zext_load_i32_by_i8_bswap:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movzbl 1(%rdi), %ecx
 ; CHECK64-NEXT:    movzbl (%rdi), %eax
 ; CHECK64-NEXT:    shll $8, %eax
@@ -1245,7 +1245,7 @@ define i32 @zext_load_i32_by_i8_bswap(i3
 ; ((i32) p[1] << 8) | ((i32) p[0] << 16)
 define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movzbl 1(%eax), %ecx
 ; CHECK-NEXT:    shll $8, %ecx
@@ -1255,7 +1255,7 @@ define i32 @zext_load_i32_by_i8_bswap_sh
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: zext_load_i32_by_i8_bswap_shl_8:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movzbl 1(%rdi), %ecx
 ; CHECK64-NEXT:    shll $8, %ecx
 ; CHECK64-NEXT:    movzbl (%rdi), %eax
@@ -1279,7 +1279,7 @@ define i32 @zext_load_i32_by_i8_bswap_sh
 ; ((i32) p[1] << 16) | ((i32) p[0] << 24)
 define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movzbl 1(%eax), %ecx
 ; CHECK-NEXT:    shll $16, %ecx
@@ -1289,7 +1289,7 @@ define i32 @zext_load_i32_by_i8_bswap_sh
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: zext_load_i32_by_i8_bswap_shl_16:
-; CHECK64:       # BB#0:
+; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    movzbl 1(%rdi), %ecx
 ; CHECK64-NEXT:    shll $16, %ecx
 ; CHECK64-NEXT:    movzbl (%rdi), %eax

Modified: llvm/trunk/test/CodeGen/X86/logical-load-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/logical-load-fold.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/logical-load-fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/logical-load-fold.ll Mon Dec  4 09:18:51 2017
@@ -12,14 +12,14 @@
 
 define double @load_double_no_fold(double %x, double %y) {
 ; SSE2-LABEL: load_double_no_fold:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    cmplesd %xmm0, %xmm1
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    andpd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: load_double_no_fold:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmplesd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
 ; AVX-NEXT:    vandpd %xmm1, %xmm0, %xmm0
@@ -33,14 +33,14 @@ define double @load_double_no_fold(doubl
 
 define float @load_float_no_fold(float %x, float %y) {
 ; SSE2-LABEL: load_float_no_fold:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    cmpless %xmm0, %xmm1
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    andps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: load_float_no_fold:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcmpless %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/longlong-deadload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/longlong-deadload.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/longlong-deadload.ll (original)
+++ llvm/trunk/test/CodeGen/X86/longlong-deadload.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define void @test(i64* %P) nounwind  {
 ; CHECK-LABEL: test:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl (%eax), %ecx
 ; CHECK-NEXT:    xorl $1, %ecx

Modified: llvm/trunk/test/CodeGen/X86/loop-search.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/loop-search.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/loop-search.ll (original)
+++ llvm/trunk/test/CodeGen/X86/loop-search.ll Mon Dec  4 09:18:51 2017
@@ -6,10 +6,10 @@
 
 define zeroext i1 @search(i32 %needle, i32* nocapture readonly %haystack, i32 %count) {
 ; CHECK-LABEL: search:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    testl %edx, %edx
 ; CHECK-NEXT:    jle LBB0_1
-; CHECK-NEXT:  ## BB#4: ## %for.body.preheader
+; CHECK-NEXT:  ## %bb.4: ## %for.body.preheader
 ; CHECK-NEXT:    movslq %edx, %rax
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    .p2align 4, 0x90
@@ -17,13 +17,13 @@ define zeroext i1 @search(i32 %needle, i
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpl %edi, (%rsi,%rcx,4)
 ; CHECK-NEXT:    je LBB0_6
-; CHECK-NEXT:  ## BB#2: ## %for.cond
+; CHECK-NEXT:  ## %bb.2: ## %for.cond
 ; CHECK-NEXT:    ## in Loop: Header=BB0_5 Depth=1
 ; CHECK-NEXT:    incq %rcx
 ; CHECK-NEXT:    cmpq %rax, %rcx
 ; CHECK-NEXT:    jl LBB0_5
-;            ### FIXME: BB#3 and LBB0_1 should be merged
-; CHECK-NEXT:  ## BB#3:
+;            ### FIXME: %bb.3 and LBB0_1 should be merged
+; CHECK-NEXT:  ## %bb.3:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    ## kill: %al<def> %al<kill> %eax<kill>
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/lower-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lower-bitcast.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lower-bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lower-bitcast.ll Mon Dec  4 09:18:51 2017
@@ -8,14 +8,14 @@
 
 define double @test1(double %A) {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
 ; CHECK-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; CHECK-NEXT:    retq
 ;
 ; CHECK-WIDE-LABEL: test1:
-; CHECK-WIDE:       # BB#0:
+; CHECK-WIDE:       # %bb.0:
 ; CHECK-WIDE-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; CHECK-WIDE-NEXT:    retq
   %1 = bitcast double %A to <2 x i32>
@@ -26,12 +26,12 @@ define double @test1(double %A) {
 
 define double @test2(double %A, double %B) {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    paddd %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 ;
 ; CHECK-WIDE-LABEL: test2:
-; CHECK-WIDE:       # BB#0:
+; CHECK-WIDE:       # %bb.0:
 ; CHECK-WIDE-NEXT:    paddd %xmm1, %xmm0
 ; CHECK-WIDE-NEXT:    retq
   %1 = bitcast double %A to <2 x i32>
@@ -43,14 +43,14 @@ define double @test2(double %A, double %
 
 define i64 @test3(i64 %A) {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rdi, %xmm0
 ; CHECK-NEXT:    addps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    movq %xmm0, %rax
 ; CHECK-NEXT:    retq
 ;
 ; CHECK-WIDE-LABEL: test3:
-; CHECK-WIDE:       # BB#0:
+; CHECK-WIDE:       # %bb.0:
 ; CHECK-WIDE-NEXT:    movq %rdi, %xmm0
 ; CHECK-WIDE-NEXT:    addps {{.*}}(%rip), %xmm0
 ; CHECK-WIDE-NEXT:    movq %xmm0, %rax
@@ -66,7 +66,7 @@ define i64 @test3(i64 %A) {
 
 define i64 @test4(i64 %A) {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq %rdi, %xmm0
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
 ; CHECK-NEXT:    paddd {{.*}}(%rip), %xmm0
@@ -75,7 +75,7 @@ define i64 @test4(i64 %A) {
 ; CHECK-NEXT:    retq
 ;
 ; CHECK-WIDE-LABEL: test4:
-; CHECK-WIDE:       # BB#0:
+; CHECK-WIDE:       # %bb.0:
 ; CHECK-WIDE-NEXT:    movq %rdi, %xmm0
 ; CHECK-WIDE-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; CHECK-WIDE-NEXT:    movq %xmm0, %rax
@@ -88,12 +88,12 @@ define i64 @test4(i64 %A) {
 
 define double @test5(double %A) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
 ;
 ; CHECK-WIDE-LABEL: test5:
-; CHECK-WIDE:       # BB#0:
+; CHECK-WIDE:       # %bb.0:
 ; CHECK-WIDE-NEXT:    addps {{.*}}(%rip), %xmm0
 ; CHECK-WIDE-NEXT:    retq
   %1 = bitcast double %A to <2 x float>
@@ -107,14 +107,14 @@ define double @test5(double %A) {
 
 define double @test6(double %A) {
 ; CHECK-LABEL: test6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
 ; CHECK-NEXT:    paddw {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; CHECK-NEXT:    retq
 ;
 ; CHECK-WIDE-LABEL: test6:
-; CHECK-WIDE:       # BB#0:
+; CHECK-WIDE:       # %bb.0:
 ; CHECK-WIDE-NEXT:    paddw {{.*}}(%rip), %xmm0
 ; CHECK-WIDE-NEXT:    retq
   %1 = bitcast double %A to <4 x i16>
@@ -125,12 +125,12 @@ define double @test6(double %A) {
 
 define double @test7(double %A, double %B) {
 ; CHECK-LABEL: test7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    paddw %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 ;
 ; CHECK-WIDE-LABEL: test7:
-; CHECK-WIDE:       # BB#0:
+; CHECK-WIDE:       # %bb.0:
 ; CHECK-WIDE-NEXT:    paddw %xmm1, %xmm0
 ; CHECK-WIDE-NEXT:    retq
   %1 = bitcast double %A to <4 x i16>
@@ -146,14 +146,14 @@ define double @test7(double %A, double %
 
 define double @test8(double %A) {
 ; CHECK-LABEL: test8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; CHECK-NEXT:    paddb {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; CHECK-NEXT:    retq
 ;
 ; CHECK-WIDE-LABEL: test8:
-; CHECK-WIDE:       # BB#0:
+; CHECK-WIDE:       # %bb.0:
 ; CHECK-WIDE-NEXT:    paddb {{.*}}(%rip), %xmm0
 ; CHECK-WIDE-NEXT:    retq
   %1 = bitcast double %A to <8 x i8>
@@ -164,12 +164,12 @@ define double @test8(double %A) {
 
 define double @test9(double %A, double %B) {
 ; CHECK-LABEL: test9:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    paddb %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 ;
 ; CHECK-WIDE-LABEL: test9:
-; CHECK-WIDE:       # BB#0:
+; CHECK-WIDE:       # %bb.0:
 ; CHECK-WIDE-NEXT:    paddb %xmm1, %xmm0
 ; CHECK-WIDE-NEXT:    retq
   %1 = bitcast double %A to <8 x i8>

Modified: llvm/trunk/test/CodeGen/X86/lower-vec-shift-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lower-vec-shift-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lower-vec-shift-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lower-vec-shift-2.ll Mon Dec  4 09:18:51 2017
@@ -4,14 +4,14 @@
 
 define <8 x i16> @test1(<8 x i16> %A, <8 x i16> %B) {
 ; SSE2-LABEL: test1:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pextrw $0, %xmm1, %eax
 ; SSE2-NEXT:    movd %eax, %xmm1
 ; SSE2-NEXT:    psllw %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: test1:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 ; AVX-NEXT:    vpsllw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -23,14 +23,14 @@ entry:
 
 define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
 ; SSE2-LABEL: test2:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
 ; SSE2-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
 ; SSE2-NEXT:    pslld %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: test2:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
 ; AVX-NEXT:    vpslld %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -42,12 +42,12 @@ entry:
 
 define <2 x i64> @test3(<2 x i64> %A, <2 x i64> %B) {
 ; SSE2-LABEL: test3:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    psllq %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: test3:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -58,14 +58,14 @@ entry:
 
 define <8 x i16> @test4(<8 x i16> %A, <8 x i16> %B) {
 ; SSE2-LABEL: test4:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pextrw $0, %xmm1, %eax
 ; SSE2-NEXT:    movd %eax, %xmm1
 ; SSE2-NEXT:    psrlw %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: test4:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 ; AVX-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -77,14 +77,14 @@ entry:
 
 define <4 x i32> @test5(<4 x i32> %A, <4 x i32> %B) {
 ; SSE2-LABEL: test5:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
 ; SSE2-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
 ; SSE2-NEXT:    psrld %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: test5:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
 ; AVX-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -96,12 +96,12 @@ entry:
 
 define <2 x i64> @test6(<2 x i64> %A, <2 x i64> %B) {
 ; SSE2-LABEL: test6:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    psrlq %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: test6:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
@@ -112,14 +112,14 @@ entry:
 
 define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) {
 ; SSE2-LABEL: test7:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pextrw $0, %xmm1, %eax
 ; SSE2-NEXT:    movd %eax, %xmm1
 ; SSE2-NEXT:    psraw %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: test7:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 ; AVX-NEXT:    vpsraw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -131,14 +131,14 @@ entry:
 
 define <4 x i32> @test8(<4 x i32> %A, <4 x i32> %B) {
 ; SSE2-LABEL: test8:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
 ; SSE2-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
 ; SSE2-NEXT:    psrad %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: test8:
-; AVX:       # BB#0: # %entry
+; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
 ; AVX-NEXT:    vpsrad %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/lower-vec-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lower-vec-shift.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lower-vec-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lower-vec-shift.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define <8 x i16> @test1(<8 x i16> %a) {
 ; SSE-LABEL: test1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrlw $3, %xmm1
 ; SSE-NEXT:    psrlw $2, %xmm0
@@ -18,14 +18,14 @@ define <8 x i16> @test1(<8 x i16> %a) {
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test1:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsrlw $3, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrlw $2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test1:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsrlw $3, %xmm0, %xmm1
 ; AVX2-NEXT:    vpsrlw $2, %xmm0, %xmm0
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
@@ -36,7 +36,7 @@ define <8 x i16> @test1(<8 x i16> %a) {
 
 define <8 x i16> @test2(<8 x i16> %a) {
 ; SSE-LABEL: test2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrlw $3, %xmm1
 ; SSE-NEXT:    psrlw $2, %xmm0
@@ -44,14 +44,14 @@ define <8 x i16> @test2(<8 x i16> %a) {
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test2:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsrlw $2, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrlw $3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsrlw $2, %xmm0, %xmm1
 ; AVX2-NEXT:    vpsrlw $3, %xmm0, %xmm0
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
@@ -62,7 +62,7 @@ define <8 x i16> @test2(<8 x i16> %a) {
 
 define <4 x i32> @test3(<4 x i32> %a) {
 ; SSE-LABEL: test3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrld $3, %xmm1
 ; SSE-NEXT:    psrld $2, %xmm0
@@ -70,14 +70,14 @@ define <4 x i32> @test3(<4 x i32> %a) {
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test3:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsrld $3, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrld $2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    retq
   %lshr = lshr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2>
@@ -86,7 +86,7 @@ define <4 x i32> @test3(<4 x i32> %a) {
 
 define <4 x i32> @test4(<4 x i32> %a) {
 ; SSE-LABEL: test4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrld $3, %xmm1
 ; SSE-NEXT:    psrld $2, %xmm0
@@ -94,14 +94,14 @@ define <4 x i32> @test4(<4 x i32> %a) {
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test4:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsrld $2, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrld $3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test4:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    retq
   %lshr = lshr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2>
@@ -110,7 +110,7 @@ define <4 x i32> @test4(<4 x i32> %a) {
 
 define <8 x i16> @test5(<8 x i16> %a) {
 ; SSE-LABEL: test5:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psraw $3, %xmm1
 ; SSE-NEXT:    psraw $2, %xmm0
@@ -118,14 +118,14 @@ define <8 x i16> @test5(<8 x i16> %a) {
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test5:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsraw $3, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsraw $2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test5:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsraw $3, %xmm0, %xmm1
 ; AVX2-NEXT:    vpsraw $2, %xmm0, %xmm0
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
@@ -136,7 +136,7 @@ define <8 x i16> @test5(<8 x i16> %a) {
 
 define <8 x i16> @test6(<8 x i16> %a) {
 ; SSE-LABEL: test6:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psraw $3, %xmm1
 ; SSE-NEXT:    psraw $2, %xmm0
@@ -144,14 +144,14 @@ define <8 x i16> @test6(<8 x i16> %a) {
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test6:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsraw $2, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsraw $3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test6:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsraw $2, %xmm0, %xmm1
 ; AVX2-NEXT:    vpsraw $3, %xmm0, %xmm0
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
@@ -162,7 +162,7 @@ define <8 x i16> @test6(<8 x i16> %a) {
 
 define <4 x i32> @test7(<4 x i32> %a) {
 ; SSE-LABEL: test7:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrad $3, %xmm1
 ; SSE-NEXT:    psrad $2, %xmm0
@@ -170,14 +170,14 @@ define <4 x i32> @test7(<4 x i32> %a) {
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test7:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsrad $3, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrad $2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test7:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    retq
   %lshr = ashr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2>
@@ -186,7 +186,7 @@ define <4 x i32> @test7(<4 x i32> %a) {
 
 define <4 x i32> @test8(<4 x i32> %a) {
 ; SSE-LABEL: test8:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrad $3, %xmm1
 ; SSE-NEXT:    psrad $2, %xmm0
@@ -194,14 +194,14 @@ define <4 x i32> @test8(<4 x i32> %a) {
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: test8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpsrad $2, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsrad $3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    retq
   %lshr = ashr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2>

Modified: llvm/trunk/test/CodeGen/X86/lower-vec-shuffle-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lower-vec-shuffle-bug.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lower-vec-shuffle-bug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lower-vec-shuffle-bug.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <4 x double> @test1(<4 x double> %A, <4 x double> %B) {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -13,7 +13,7 @@ entry:
 
 define <4 x double> @test2(<4 x double> %A, <4 x double> %B) {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -23,7 +23,7 @@ entry:
 
 define <4 x double> @test3(<4 x double> %A, <4 x double> %B) {
 ; CHECK-LABEL: test3:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -33,7 +33,7 @@ entry:
 
 define <4 x double> @test4(<4 x double> %A, <4 x double> %B) {
 ; CHECK-LABEL: test4:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/lwp-intrinsics-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lwp-intrinsics-x86_64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lwp-intrinsics-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lwp-intrinsics-x86_64.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define i8 @test_lwpins64_rri(i64 %a0, i32 %a1) nounwind {
 ; X64-LABEL: test_lwpins64_rri:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    lwpins $-1985229329, %esi, %rdi # imm = 0x89ABCDEF
 ; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
@@ -17,7 +17,7 @@ define i8 @test_lwpins64_rri(i64 %a0, i3
 
 define i8 @test_lwpins64_rmi(i64 %a0, i32 *%p1) nounwind {
 ; X64-LABEL: test_lwpins64_rmi:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    lwpins $1985229328, (%rsi), %rdi # imm = 0x76543210
 ; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
@@ -28,7 +28,7 @@ define i8 @test_lwpins64_rmi(i64 %a0, i3
 
 define void @test_lwpval64_rri(i64 %a0, i32 %a1) nounwind {
 ; X64-LABEL: test_lwpval64_rri:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    lwpval $-19088744, %esi, %rdi # imm = 0xFEDCBA98
 ; X64-NEXT:    retq
   tail call void @llvm.x86.lwpval64(i64 %a0, i32 %a1, i32 4275878552)
@@ -37,7 +37,7 @@ define void @test_lwpval64_rri(i64 %a0,
 
 define void @test_lwpval64_rmi(i64 %a0, i32 *%p1) nounwind {
 ; X64-LABEL: test_lwpval64_rmi:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    lwpval $305419896, (%rsi), %rdi # imm = 0x12345678
 ; X64-NEXT:    retq
   %a1 = load i32, i32 *%p1

Modified: llvm/trunk/test/CodeGen/X86/lwp-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lwp-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lwp-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lwp-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -12,13 +12,13 @@
 
 define void @test_llwpcb(i8 *%a0) nounwind {
 ; X86-LABEL: test_llwpcb:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    llwpcb %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llwpcb:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    llwpcb %rdi
 ; X64-NEXT:    retq
   tail call void @llvm.x86.llwpcb(i8 *%a0)
@@ -27,12 +27,12 @@ define void @test_llwpcb(i8 *%a0) nounwi
 
 define i8* @test_slwpcb(i8 *%a0) nounwind {
 ; X86-LABEL: test_slwpcb:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    slwpcb %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_slwpcb:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    slwpcb %rax
 ; X64-NEXT:    retq
   %1 = tail call i8* @llvm.x86.slwpcb()
@@ -41,7 +41,7 @@ define i8* @test_slwpcb(i8 *%a0) nounwin
 
 define i8 @test_lwpins32_rri(i32 %a0, i32 %a1) nounwind {
 ; X86-LABEL: test_lwpins32_rri:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    addl %ecx, %ecx
@@ -50,7 +50,7 @@ define i8 @test_lwpins32_rri(i32 %a0, i3
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_lwpins32_rri:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    addl %esi, %esi
 ; X64-NEXT:    lwpins $-1985229329, %esi, %edi # imm = 0x89ABCDEF
 ; X64-NEXT:    setb %al
@@ -62,7 +62,7 @@ define i8 @test_lwpins32_rri(i32 %a0, i3
 
 define i8 @test_lwpins32_rmi(i32 %a0, i32 *%p1) nounwind {
 ; X86-LABEL: test_lwpins32_rmi:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    lwpins $1985229328, (%eax), %ecx # imm = 0x76543210
@@ -70,7 +70,7 @@ define i8 @test_lwpins32_rmi(i32 %a0, i3
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_lwpins32_rmi:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    lwpins $1985229328, (%rsi), %edi # imm = 0x76543210
 ; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
@@ -81,7 +81,7 @@ define i8 @test_lwpins32_rmi(i32 %a0, i3
 
 define void @test_lwpval32_rri(i32 %a0, i32 %a1) nounwind {
 ; X86-LABEL: test_lwpval32_rri:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    addl %ecx, %ecx
@@ -89,7 +89,7 @@ define void @test_lwpval32_rri(i32 %a0,
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_lwpval32_rri:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    addl %esi, %esi
 ; X64-NEXT:    lwpval $-19088744, %esi, %edi # imm = 0xFEDCBA98
 ; X64-NEXT:    retq
@@ -100,14 +100,14 @@ define void @test_lwpval32_rri(i32 %a0,
 
 define void @test_lwpval32_rmi(i32 %a0, i32 *%p1) nounwind {
 ; X86-LABEL: test_lwpval32_rmi:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    lwpval $305419896, (%eax), %ecx # imm = 0x12345678
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_lwpval32_rmi:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    lwpval $305419896, (%rsi), %edi # imm = 0x12345678
 ; X64-NEXT:    retq
   %a1 = load i32, i32 *%p1

Modified: llvm/trunk/test/CodeGen/X86/lzcnt-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lzcnt-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lzcnt-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lzcnt-schedule.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 
 define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
 ; GENERIC-LABEL: test_ctlz_i16:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    lzcntw (%rsi), %cx
 ; GENERIC-NEXT:    lzcntw %di, %ax
 ; GENERIC-NEXT:    orl %ecx, %eax # sched: [1:0.33]
@@ -17,7 +17,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_ctlz_i16:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    lzcntw (%rsi), %cx # sched: [3:1.00]
 ; HASWELL-NEXT:    lzcntw %di, %ax # sched: [3:1.00]
 ; HASWELL-NEXT:    orl %ecx, %eax # sched: [1:0.25]
@@ -25,7 +25,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_ctlz_i16:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    lzcntw (%rsi), %cx # sched: [8:1.00]
 ; BROADWELL-NEXT:    lzcntw %di, %ax # sched: [3:1.00]
 ; BROADWELL-NEXT:    orl %ecx, %eax # sched: [1:0.25]
@@ -33,7 +33,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_ctlz_i16:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    lzcntw (%rsi), %cx # sched: [8:1.00]
 ; SKYLAKE-NEXT:    lzcntw %di, %ax # sched: [3:1.00]
 ; SKYLAKE-NEXT:    orl %ecx, %eax # sched: [1:0.25]
@@ -41,7 +41,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_ctlz_i16:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    lzcntw (%rsi), %cx
 ; BTVER2-NEXT:    lzcntw %di, %ax
 ; BTVER2-NEXT:    orl %ecx, %eax # sched: [1:0.50]
@@ -49,7 +49,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_ctlz_i16:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    lzcntw (%rsi), %cx # sched: [6:0.50]
 ; ZNVER1-NEXT:    lzcntw %di, %ax # sched: [2:0.25]
 ; ZNVER1-NEXT:    orl %ecx, %eax # sched: [1:0.25]
@@ -65,42 +65,42 @@ declare i16 @llvm.ctlz.i16(i16, i1)
 
 define i32 @test_ctlz_i32(i32 %a0, i32 *%a1) {
 ; GENERIC-LABEL: test_ctlz_i32:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    lzcntl (%rsi), %ecx
 ; GENERIC-NEXT:    lzcntl %edi, %eax
 ; GENERIC-NEXT:    orl %ecx, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_ctlz_i32:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    lzcntl (%rsi), %ecx # sched: [3:1.00]
 ; HASWELL-NEXT:    lzcntl %edi, %eax # sched: [3:1.00]
 ; HASWELL-NEXT:    orl %ecx, %eax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_ctlz_i32:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    lzcntl (%rsi), %ecx # sched: [8:1.00]
 ; BROADWELL-NEXT:    lzcntl %edi, %eax # sched: [3:1.00]
 ; BROADWELL-NEXT:    orl %ecx, %eax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_ctlz_i32:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    lzcntl (%rsi), %ecx # sched: [8:1.00]
 ; SKYLAKE-NEXT:    lzcntl %edi, %eax # sched: [3:1.00]
 ; SKYLAKE-NEXT:    orl %ecx, %eax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_ctlz_i32:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    lzcntl (%rsi), %ecx
 ; BTVER2-NEXT:    lzcntl %edi, %eax
 ; BTVER2-NEXT:    orl %ecx, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_ctlz_i32:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    lzcntl (%rsi), %ecx # sched: [6:0.50]
 ; ZNVER1-NEXT:    lzcntl %edi, %eax # sched: [2:0.25]
 ; ZNVER1-NEXT:    orl %ecx, %eax # sched: [1:0.25]
@@ -115,42 +115,42 @@ declare i32 @llvm.ctlz.i32(i32, i1)
 
 define i64 @test_ctlz_i64(i64 %a0, i64 *%a1) {
 ; GENERIC-LABEL: test_ctlz_i64:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    lzcntq (%rsi), %rcx
 ; GENERIC-NEXT:    lzcntq %rdi, %rax
 ; GENERIC-NEXT:    orq %rcx, %rax # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_ctlz_i64:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    lzcntq (%rsi), %rcx # sched: [3:1.00]
 ; HASWELL-NEXT:    lzcntq %rdi, %rax # sched: [3:1.00]
 ; HASWELL-NEXT:    orq %rcx, %rax # sched: [1:0.25]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_ctlz_i64:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    lzcntq (%rsi), %rcx # sched: [8:1.00]
 ; BROADWELL-NEXT:    lzcntq %rdi, %rax # sched: [3:1.00]
 ; BROADWELL-NEXT:    orq %rcx, %rax # sched: [1:0.25]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_ctlz_i64:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    lzcntq (%rsi), %rcx # sched: [8:1.00]
 ; SKYLAKE-NEXT:    lzcntq %rdi, %rax # sched: [3:1.00]
 ; SKYLAKE-NEXT:    orq %rcx, %rax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_ctlz_i64:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    lzcntq (%rsi), %rcx
 ; BTVER2-NEXT:    lzcntq %rdi, %rax
 ; BTVER2-NEXT:    orq %rcx, %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_ctlz_i64:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    lzcntq (%rsi), %rcx # sched: [6:0.50]
 ; ZNVER1-NEXT:    lzcntq %rdi, %rax # sched: [2:0.25]
 ; ZNVER1-NEXT:    orq %rcx, %rax # sched: [1:0.25]

Modified: llvm/trunk/test/CodeGen/X86/lzcnt-zext-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lzcnt-zext-cmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lzcnt-zext-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lzcnt-zext-cmp.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 ; Test one 32-bit input, output is 32-bit, no transformations expected.
 define i32 @test_zext_cmp0(i32 %a) {
 ; ALL-LABEL: test_zext_cmp0:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    xorl %eax, %eax
 ; ALL-NEXT:    testl %edi, %edi
 ; ALL-NEXT:    sete %al
@@ -23,7 +23,7 @@ entry:
 ; Test two 32-bit inputs, output is 32-bit.
 define i32 @test_zext_cmp1(i32 %a, i32 %b) {
 ; FASTLZCNT-LABEL: test_zext_cmp1:
-; FASTLZCNT:       # BB#0:
+; FASTLZCNT:       # %bb.0:
 ; FASTLZCNT-NEXT:    lzcntl %edi, %ecx
 ; FASTLZCNT-NEXT:    lzcntl %esi, %eax
 ; FASTLZCNT-NEXT:    orl %ecx, %eax
@@ -31,7 +31,7 @@ define i32 @test_zext_cmp1(i32 %a, i32 %
 ; FASTLZCNT-NEXT:    retq
 ;
 ; NOFASTLZCNT-LABEL: test_zext_cmp1:
-; NOFASTLZCNT:       # BB#0:
+; NOFASTLZCNT:       # %bb.0:
 ; NOFASTLZCNT-NEXT:    testl %edi, %edi
 ; NOFASTLZCNT-NEXT:    sete %al
 ; NOFASTLZCNT-NEXT:    testl %esi, %esi
@@ -49,7 +49,7 @@ define i32 @test_zext_cmp1(i32 %a, i32 %
 ; Test two 64-bit inputs, output is 64-bit.
 define i64 @test_zext_cmp2(i64 %a, i64 %b) {
 ; FASTLZCNT-LABEL: test_zext_cmp2:
-; FASTLZCNT:       # BB#0:
+; FASTLZCNT:       # %bb.0:
 ; FASTLZCNT-NEXT:    lzcntq %rdi, %rcx
 ; FASTLZCNT-NEXT:    lzcntq %rsi, %rax
 ; FASTLZCNT-NEXT:    orl %ecx, %eax
@@ -57,7 +57,7 @@ define i64 @test_zext_cmp2(i64 %a, i64 %
 ; FASTLZCNT-NEXT:    retq
 ;
 ; NOFASTLZCNT-LABEL: test_zext_cmp2:
-; NOFASTLZCNT:       # BB#0:
+; NOFASTLZCNT:       # %bb.0:
 ; NOFASTLZCNT-NEXT:    testq %rdi, %rdi
 ; NOFASTLZCNT-NEXT:    sete %al
 ; NOFASTLZCNT-NEXT:    testq %rsi, %rsi
@@ -77,7 +77,7 @@ define i64 @test_zext_cmp2(i64 %a, i64 %
 ; upper 16-bits, adding one more instruction.
 define i16 @test_zext_cmp3(i16 %a, i16 %b) {
 ; ALL-LABEL: test_zext_cmp3:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    testw %di, %di
 ; ALL-NEXT:    sete %al
 ; ALL-NEXT:    testw %si, %si
@@ -96,7 +96,7 @@ define i16 @test_zext_cmp3(i16 %a, i16 %
 ; Test two 32-bit inputs, output is 64-bit.
 define i64 @test_zext_cmp4(i32 %a, i32 %b) {
 ; FASTLZCNT-LABEL: test_zext_cmp4:
-; FASTLZCNT:       # BB#0: # %entry
+; FASTLZCNT:       # %bb.0: # %entry
 ; FASTLZCNT-NEXT:    lzcntl %edi, %ecx
 ; FASTLZCNT-NEXT:    lzcntl %esi, %eax
 ; FASTLZCNT-NEXT:    orl %ecx, %eax
@@ -104,7 +104,7 @@ define i64 @test_zext_cmp4(i32 %a, i32 %
 ; FASTLZCNT-NEXT:    retq
 ;
 ; NOFASTLZCNT-LABEL: test_zext_cmp4:
-; NOFASTLZCNT:       # BB#0: # %entry
+; NOFASTLZCNT:       # %bb.0: # %entry
 ; NOFASTLZCNT-NEXT:    testl %edi, %edi
 ; NOFASTLZCNT-NEXT:    sete %al
 ; NOFASTLZCNT-NEXT:    testl %esi, %esi
@@ -123,7 +123,7 @@ entry:
 ; Test two 64-bit inputs, output is 32-bit.
 define i32 @test_zext_cmp5(i64 %a, i64 %b) {
 ; FASTLZCNT-LABEL: test_zext_cmp5:
-; FASTLZCNT:       # BB#0: # %entry
+; FASTLZCNT:       # %bb.0: # %entry
 ; FASTLZCNT-NEXT:    lzcntq %rdi, %rcx
 ; FASTLZCNT-NEXT:    lzcntq %rsi, %rax
 ; FASTLZCNT-NEXT:    orl %ecx, %eax
@@ -132,7 +132,7 @@ define i32 @test_zext_cmp5(i64 %a, i64 %
 ; FASTLZCNT-NEXT:    retq
 ;
 ; NOFASTLZCNT-LABEL: test_zext_cmp5:
-; NOFASTLZCNT:       # BB#0: # %entry
+; NOFASTLZCNT:       # %bb.0: # %entry
 ; NOFASTLZCNT-NEXT:    testq %rdi, %rdi
 ; NOFASTLZCNT-NEXT:    sete %al
 ; NOFASTLZCNT-NEXT:    testq %rsi, %rsi
@@ -151,7 +151,7 @@ entry:
 ; Test three 32-bit inputs, output is 32-bit.
 define i32 @test_zext_cmp6(i32 %a, i32 %b, i32 %c) {
 ; FASTLZCNT-LABEL: test_zext_cmp6:
-; FASTLZCNT:       # BB#0: # %entry
+; FASTLZCNT:       # %bb.0: # %entry
 ; FASTLZCNT-NEXT:    lzcntl %edi, %eax
 ; FASTLZCNT-NEXT:    lzcntl %esi, %ecx
 ; FASTLZCNT-NEXT:    orl %eax, %ecx
@@ -161,7 +161,7 @@ define i32 @test_zext_cmp6(i32 %a, i32 %
 ; FASTLZCNT-NEXT:    retq
 ;
 ; NOFASTLZCNT-LABEL: test_zext_cmp6:
-; NOFASTLZCNT:       # BB#0: # %entry
+; NOFASTLZCNT:       # %bb.0: # %entry
 ; NOFASTLZCNT-NEXT:    testl %edi, %edi
 ; NOFASTLZCNT-NEXT:    sete %al
 ; NOFASTLZCNT-NEXT:    testl %esi, %esi
@@ -186,7 +186,7 @@ entry:
 ; %.cmp2 inputs' order is inverted.
 define i32 @test_zext_cmp7(i32 %a, i32 %b, i32 %c) {
 ; FASTLZCNT-LABEL: test_zext_cmp7:
-; FASTLZCNT:       # BB#0: # %entry
+; FASTLZCNT:       # %bb.0: # %entry
 ; FASTLZCNT-NEXT:    lzcntl %edi, %eax
 ; FASTLZCNT-NEXT:    lzcntl %esi, %ecx
 ; FASTLZCNT-NEXT:    orl %eax, %ecx
@@ -196,7 +196,7 @@ define i32 @test_zext_cmp7(i32 %a, i32 %
 ; FASTLZCNT-NEXT:    retq
 ;
 ; NOFASTLZCNT-LABEL: test_zext_cmp7:
-; NOFASTLZCNT:       # BB#0: # %entry
+; NOFASTLZCNT:       # %bb.0: # %entry
 ; NOFASTLZCNT-NEXT:    testl %edi, %edi
 ; NOFASTLZCNT-NEXT:    sete %al
 ; NOFASTLZCNT-NEXT:    testl %esi, %esi
@@ -220,7 +220,7 @@ entry:
 ; Test four 32-bit inputs, output is 32-bit.
 define i32 @test_zext_cmp8(i32 %a, i32 %b, i32 %c, i32 %d) {
 ; FASTLZCNT-LABEL: test_zext_cmp8:
-; FASTLZCNT:       # BB#0: # %entry
+; FASTLZCNT:       # %bb.0: # %entry
 ; FASTLZCNT-NEXT:    lzcntl %edi, %eax
 ; FASTLZCNT-NEXT:    lzcntl %esi, %esi
 ; FASTLZCNT-NEXT:    lzcntl %edx, %edx
@@ -232,7 +232,7 @@ define i32 @test_zext_cmp8(i32 %a, i32 %
 ; FASTLZCNT-NEXT:    retq
 ;
 ; NOFASTLZCNT-LABEL: test_zext_cmp8:
-; NOFASTLZCNT:       # BB#0: # %entry
+; NOFASTLZCNT:       # %bb.0: # %entry
 ; NOFASTLZCNT-NEXT:    testl %edi, %edi
 ; NOFASTLZCNT-NEXT:    sete %dil
 ; NOFASTLZCNT-NEXT:    testl %esi, %esi
@@ -261,7 +261,7 @@ entry:
 ; Test one 32-bit input, one 64-bit input, output is 32-bit.
 define i32 @test_zext_cmp9(i32 %a, i64 %b) {
 ; FASTLZCNT-LABEL: test_zext_cmp9:
-; FASTLZCNT:       # BB#0: # %entry
+; FASTLZCNT:       # %bb.0: # %entry
 ; FASTLZCNT-NEXT:    lzcntq %rsi, %rax
 ; FASTLZCNT-NEXT:    lzcntl %edi, %ecx
 ; FASTLZCNT-NEXT:    shrl $5, %ecx
@@ -271,7 +271,7 @@ define i32 @test_zext_cmp9(i32 %a, i64 %
 ; FASTLZCNT-NEXT:    retq
 ;
 ; NOFASTLZCNT-LABEL: test_zext_cmp9:
-; NOFASTLZCNT:       # BB#0: # %entry
+; NOFASTLZCNT:       # %bb.0: # %entry
 ; NOFASTLZCNT-NEXT:    testl %edi, %edi
 ; NOFASTLZCNT-NEXT:    sete %al
 ; NOFASTLZCNT-NEXT:    testq %rsi, %rsi
@@ -290,7 +290,7 @@ entry:
 ; Test 2 128-bit inputs, output is 32-bit, no transformations expected.
 define i32 @test_zext_cmp10(i64 %a.coerce0, i64 %a.coerce1, i64 %b.coerce0, i64 %b.coerce1) {
 ; ALL-LABEL: test_zext_cmp10:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    orq %rsi, %rdi
 ; ALL-NEXT:    sete %al
 ; ALL-NEXT:    orq %rcx, %rdx
@@ -318,7 +318,7 @@ entry:
 define i32 @test_zext_cmp11(double %a, double %b) "no-nans-fp-math"="true" {
 ;
 ; ALL-LABEL: test_zext_cmp11:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; ALL-NEXT:    vucomisd %xmm2, %xmm0
 ; ALL-NEXT:    sete %al

Modified: llvm/trunk/test/CodeGen/X86/machine-combiner-int-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/machine-combiner-int-vec.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/machine-combiner-int-vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/machine-combiner-int-vec.ll Mon Dec  4 09:18:51 2017
@@ -5,14 +5,14 @@
 
 define <4 x i32> @reassociate_and_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
 ; SSE-LABEL: reassociate_and_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    pand %xmm3, %xmm2
 ; SSE-NEXT:    pand %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_and_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpand %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -26,14 +26,14 @@ define <4 x i32> @reassociate_and_v4i32(
 
 define <4 x i32> @reassociate_or_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
 ; SSE-LABEL: reassociate_or_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    por %xmm3, %xmm2
 ; SSE-NEXT:    por %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_or_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpor %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
@@ -47,14 +47,14 @@ define <4 x i32> @reassociate_or_v4i32(<
 
 define <4 x i32> @reassociate_xor_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
 ; SSE-LABEL: reassociate_xor_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    pxor %xmm3, %xmm2
 ; SSE-NEXT:    pxor %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_xor_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpxor %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
@@ -70,7 +70,7 @@ define <4 x i32> @reassociate_xor_v4i32(
 
 define <8 x i32> @reassociate_and_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) {
 ; AVX-LABEL: reassociate_and_v8i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vpand %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -84,7 +84,7 @@ define <8 x i32> @reassociate_and_v8i32(
 
 define <8 x i32> @reassociate_or_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) {
 ; AVX-LABEL: reassociate_or_v8i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vpor %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    vpor %ymm1, %ymm0, %ymm0
@@ -98,7 +98,7 @@ define <8 x i32> @reassociate_or_v8i32(<
 
 define <8 x i32> @reassociate_xor_v8i32(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, <8 x i32> %x3) {
 ; AVX-LABEL: reassociate_xor_v8i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vpxor %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    vpxor %ymm1, %ymm0, %ymm0

Modified: llvm/trunk/test/CodeGen/X86/machine-combiner-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/machine-combiner-int.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/machine-combiner-int.ll (original)
+++ llvm/trunk/test/CodeGen/X86/machine-combiner-int.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 
 define i16 @reassociate_muls_i16(i16 %x0, i16 %x1, i16 %x2, i16 %x3) {
 ; CHECK-LABEL: reassociate_muls_i16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill
 ; CHECK-NEXT:    # kill
 ; CHECK-NEXT:    leal   (%rdi,%rsi), %eax
@@ -25,7 +25,7 @@ define i16 @reassociate_muls_i16(i16 %x0
 
 define i32 @reassociate_muls_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
 ; CHECK-LABEL: reassociate_muls_i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill
 ; CHECK-NEXT:    # kill
 ; CHECK-NEXT:    leal   (%rdi,%rsi), %eax
@@ -45,7 +45,7 @@ define i32 @reassociate_muls_i32(i32 %x0
 
 define i64 @reassociate_muls_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
 ; CHECK-LABEL: reassociate_muls_i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    leaq   (%rdi,%rsi), %rax
 ; CHECK-NEXT:    imulq  %rcx, %rdx
 ; CHECK-NEXT:    imulq  %rdx, %rax
@@ -61,7 +61,7 @@ define i64 @reassociate_muls_i64(i64 %x0
 
 define i8 @reassociate_ands_i8(i8 %x0, i8 %x1, i8 %x2, i8 %x3) {
 ; CHECK-LABEL: reassociate_ands_i8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subb  %sil, %dil
 ; CHECK-NEXT:    andb  %cl, %dl
 ; CHECK-NEXT:    andb  %dil, %dl
@@ -77,7 +77,7 @@ define i8 @reassociate_ands_i8(i8 %x0, i
 
 define i32 @reassociate_ands_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
 ; CHECK-LABEL: reassociate_ands_i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subl  %esi, %edi
 ; CHECK-NEXT:    andl  %ecx, %edx
 ; CHECK-NEXT:    andl  %edi, %edx
@@ -91,7 +91,7 @@ define i32 @reassociate_ands_i32(i32 %x0
 
 define i64 @reassociate_ands_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
 ; CHECK-LABEL: reassociate_ands_i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subq  %rsi, %rdi
 ; CHECK-NEXT:    andq  %rcx, %rdx
 ; CHECK-NEXT:    andq  %rdi, %rdx
@@ -108,7 +108,7 @@ define i64 @reassociate_ands_i64(i64 %x0
 
 define i8 @reassociate_ors_i8(i8 %x0, i8 %x1, i8 %x2, i8 %x3) {
 ; CHECK-LABEL: reassociate_ors_i8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subb  %sil, %dil
 ; CHECK-NEXT:    orb   %cl, %dl
 ; CHECK-NEXT:    orb   %dil, %dl
@@ -124,7 +124,7 @@ define i8 @reassociate_ors_i8(i8 %x0, i8
 
 define i32 @reassociate_ors_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
 ; CHECK-LABEL: reassociate_ors_i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subl  %esi, %edi
 ; CHECK-NEXT:    orl   %ecx, %edx
 ; CHECK-NEXT:    orl   %edi, %edx
@@ -138,7 +138,7 @@ define i32 @reassociate_ors_i32(i32 %x0,
 
 define i64 @reassociate_ors_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
 ; CHECK-LABEL: reassociate_ors_i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subq  %rsi, %rdi
 ; CHECK-NEXT:    orq   %rcx, %rdx
 ; CHECK-NEXT:    orq   %rdi, %rdx
@@ -155,7 +155,7 @@ define i64 @reassociate_ors_i64(i64 %x0,
 
 define i8 @reassociate_xors_i8(i8 %x0, i8 %x1, i8 %x2, i8 %x3) {
 ; CHECK-LABEL: reassociate_xors_i8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subb  %sil, %dil
 ; CHECK-NEXT:    xorb  %cl, %dl
 ; CHECK-NEXT:    xorb  %dil, %dl
@@ -171,7 +171,7 @@ define i8 @reassociate_xors_i8(i8 %x0, i
 
 define i32 @reassociate_xors_i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
 ; CHECK-LABEL: reassociate_xors_i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subl  %esi, %edi
 ; CHECK-NEXT:    xorl  %ecx, %edx
 ; CHECK-NEXT:    xorl  %edi, %edx
@@ -185,7 +185,7 @@ define i32 @reassociate_xors_i32(i32 %x0
 
 define i64 @reassociate_xors_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
 ; CHECK-LABEL: reassociate_xors_i64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    subq  %rsi, %rdi
 ; CHECK-NEXT:    xorq  %rcx, %rdx
 ; CHECK-NEXT:    xorq  %rdi, %rdx

Modified: llvm/trunk/test/CodeGen/X86/machine-combiner.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/machine-combiner.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/machine-combiner.ll (original)
+++ llvm/trunk/test/CodeGen/X86/machine-combiner.ll Mon Dec  4 09:18:51 2017
@@ -11,14 +11,14 @@
 
 define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) {
 ; SSE-LABEL: reassociate_adds1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    addss %xmm3, %xmm2
 ; SSE-NEXT:    addss %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_adds1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vaddss %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
@@ -31,14 +31,14 @@ define float @reassociate_adds1(float %x
 
 define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) {
 ; SSE-LABEL: reassociate_adds2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    addss %xmm3, %xmm2
 ; SSE-NEXT:    addss %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_adds2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vaddss %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
@@ -51,14 +51,14 @@ define float @reassociate_adds2(float %x
 
 define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) {
 ; SSE-LABEL: reassociate_adds3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    addss %xmm3, %xmm2
 ; SSE-NEXT:    addss %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_adds3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vaddss %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
@@ -71,14 +71,14 @@ define float @reassociate_adds3(float %x
 
 define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) {
 ; SSE-LABEL: reassociate_adds4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    addss %xmm3, %xmm2
 ; SSE-NEXT:    addss %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_adds4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vaddss %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
@@ -94,7 +94,7 @@ define float @reassociate_adds4(float %x
 
 define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) {
 ; SSE-LABEL: reassociate_adds5:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    addss %xmm3, %xmm2
 ; SSE-NEXT:    addss %xmm2, %xmm0
@@ -105,7 +105,7 @@ define float @reassociate_adds5(float %x
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_adds5:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vaddss %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
@@ -131,14 +131,14 @@ define float @reassociate_adds5(float %x
 
 define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) {
 ; SSE-LABEL: reassociate_adds6:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss %xmm1, %xmm0
 ; SSE-NEXT:    addss %xmm3, %xmm2
 ; SSE-NEXT:    addss %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_adds6:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vaddss %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
@@ -153,14 +153,14 @@ define float @reassociate_adds6(float %x
 
 define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) {
 ; SSE-LABEL: reassociate_muls1:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss %xmm1, %xmm0
 ; SSE-NEXT:    mulss %xmm3, %xmm2
 ; SSE-NEXT:    mulss %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_muls1:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmulss %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
@@ -175,14 +175,14 @@ define float @reassociate_muls1(float %x
 
 define double @reassociate_adds_double(double %x0, double %x1, double %x2, double %x3) {
 ; SSE-LABEL: reassociate_adds_double:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd %xmm1, %xmm0
 ; SSE-NEXT:    addsd %xmm3, %xmm2
 ; SSE-NEXT:    addsd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_adds_double:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vaddsd %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
@@ -197,14 +197,14 @@ define double @reassociate_adds_double(d
 
 define double @reassociate_muls_double(double %x0, double %x1, double %x2, double %x3) {
 ; SSE-LABEL: reassociate_muls_double:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd %xmm1, %xmm0
 ; SSE-NEXT:    mulsd %xmm3, %xmm2
 ; SSE-NEXT:    mulsd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_muls_double:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmulsd %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vmulsd %xmm1, %xmm0, %xmm0
@@ -219,14 +219,14 @@ define double @reassociate_muls_double(d
 
 define <4 x float> @reassociate_adds_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
 ; SSE-LABEL: reassociate_adds_v4f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulps %xmm1, %xmm0
 ; SSE-NEXT:    addps %xmm3, %xmm2
 ; SSE-NEXT:    addps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_adds_v4f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vaddps %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
@@ -241,14 +241,14 @@ define <4 x float> @reassociate_adds_v4f
 
 define <2 x double> @reassociate_adds_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
 ; SSE-LABEL: reassociate_adds_v2f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    mulpd %xmm1, %xmm0
 ; SSE-NEXT:    addpd %xmm3, %xmm2
 ; SSE-NEXT:    addpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_adds_v2f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vaddpd %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
@@ -263,14 +263,14 @@ define <2 x double> @reassociate_adds_v2
 
 define <4 x float> @reassociate_muls_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
 ; SSE-LABEL: reassociate_muls_v4f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addps %xmm1, %xmm0
 ; SSE-NEXT:    mulps %xmm3, %xmm2
 ; SSE-NEXT:    mulps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_muls_v4f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmulps %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vmulps %xmm1, %xmm0, %xmm0
@@ -285,14 +285,14 @@ define <4 x float> @reassociate_muls_v4f
 
 define <2 x double> @reassociate_muls_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
 ; SSE-LABEL: reassociate_muls_v2f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addpd %xmm1, %xmm0
 ; SSE-NEXT:    mulpd %xmm3, %xmm2
 ; SSE-NEXT:    mulpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_muls_v2f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmulpd %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
@@ -307,7 +307,7 @@ define <2 x double> @reassociate_muls_v2
 
 define <8 x float> @reassociate_adds_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
 ; AVX-LABEL: reassociate_adds_v8f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vaddps %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    vaddps %ymm1, %ymm0, %ymm0
@@ -322,7 +322,7 @@ define <8 x float> @reassociate_adds_v8f
 
 define <4 x double> @reassociate_adds_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
 ; AVX-LABEL: reassociate_adds_v4f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vaddpd %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -337,7 +337,7 @@ define <4 x double> @reassociate_adds_v4
 
 define <8 x float> @reassociate_muls_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
 ; AVX-LABEL: reassociate_muls_v8f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vmulps %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    vmulps %ymm1, %ymm0, %ymm0
@@ -352,7 +352,7 @@ define <8 x float> @reassociate_muls_v8f
 
 define <4 x double> @reassociate_muls_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
 ; AVX-LABEL: reassociate_muls_v4f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vmulpd %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
@@ -367,14 +367,14 @@ define <4 x double> @reassociate_muls_v4
 
 define float @reassociate_mins_single(float %x0, float %x1, float %x2, float %x3) {
 ; SSE-LABEL: reassociate_mins_single:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss %xmm1, %xmm0
 ; SSE-NEXT:    minss %xmm3, %xmm2
 ; SSE-NEXT:    minss %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_mins_single:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vminss %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vminss %xmm1, %xmm0, %xmm0
@@ -391,14 +391,14 @@ define float @reassociate_mins_single(fl
 
 define float @reassociate_maxs_single(float %x0, float %x1, float %x2, float %x3) {
 ; SSE-LABEL: reassociate_maxs_single:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divss %xmm1, %xmm0
 ; SSE-NEXT:    maxss %xmm3, %xmm2
 ; SSE-NEXT:    maxss %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_maxs_single:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmaxss %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
@@ -415,14 +415,14 @@ define float @reassociate_maxs_single(fl
 
 define double @reassociate_mins_double(double %x0, double %x1, double %x2, double %x3) {
 ; SSE-LABEL: reassociate_mins_double:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd %xmm1, %xmm0
 ; SSE-NEXT:    minsd %xmm3, %xmm2
 ; SSE-NEXT:    minsd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_mins_double:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vminsd %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vminsd %xmm1, %xmm0, %xmm0
@@ -439,14 +439,14 @@ define double @reassociate_mins_double(d
 
 define double @reassociate_maxs_double(double %x0, double %x1, double %x2, double %x3) {
 ; SSE-LABEL: reassociate_maxs_double:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    divsd %xmm1, %xmm0
 ; SSE-NEXT:    maxsd %xmm3, %xmm2
 ; SSE-NEXT:    maxsd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_maxs_double:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmaxsd %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0
@@ -463,14 +463,14 @@ define double @reassociate_maxs_double(d
 
 define <4 x float> @reassociate_mins_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
 ; SSE-LABEL: reassociate_mins_v4f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addps %xmm1, %xmm0
 ; SSE-NEXT:    minps %xmm3, %xmm2
 ; SSE-NEXT:    minps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_mins_v4f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vminps %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vminps %xmm1, %xmm0, %xmm0
@@ -487,14 +487,14 @@ define <4 x float> @reassociate_mins_v4f
 
 define <4 x float> @reassociate_maxs_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
 ; SSE-LABEL: reassociate_maxs_v4f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addps %xmm1, %xmm0
 ; SSE-NEXT:    maxps %xmm3, %xmm2
 ; SSE-NEXT:    maxps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_maxs_v4f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmaxps %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
@@ -511,14 +511,14 @@ define <4 x float> @reassociate_maxs_v4f
 
 define <2 x double> @reassociate_mins_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
 ; SSE-LABEL: reassociate_mins_v2f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addpd %xmm1, %xmm0
 ; SSE-NEXT:    minpd %xmm3, %xmm2
 ; SSE-NEXT:    minpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_mins_v2f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vminpd %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vminpd %xmm1, %xmm0, %xmm0
@@ -535,14 +535,14 @@ define <2 x double> @reassociate_mins_v2
 
 define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
 ; SSE-LABEL: reassociate_maxs_v2f64:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addpd %xmm1, %xmm0
 ; SSE-NEXT:    maxpd %xmm3, %xmm2
 ; SSE-NEXT:    maxpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: reassociate_maxs_v2f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmaxpd %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0
@@ -559,7 +559,7 @@ define <2 x double> @reassociate_maxs_v2
 
 define <8 x float> @reassociate_mins_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
 ; AVX-LABEL: reassociate_mins_v8f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vminps %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    vminps %ymm1, %ymm0, %ymm0
@@ -576,7 +576,7 @@ define <8 x float> @reassociate_mins_v8f
 
 define <8 x float> @reassociate_maxs_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
 ; AVX-LABEL: reassociate_maxs_v8f32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vmaxps %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    vmaxps %ymm1, %ymm0, %ymm0
@@ -593,7 +593,7 @@ define <8 x float> @reassociate_maxs_v8f
 
 define <4 x double> @reassociate_mins_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
 ; AVX-LABEL: reassociate_mins_v4f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vminpd %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    vminpd %ymm1, %ymm0, %ymm0
@@ -610,7 +610,7 @@ define <4 x double> @reassociate_mins_v4
 
 define <4 x double> @reassociate_maxs_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
 ; AVX-LABEL: reassociate_maxs_v4f64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vmaxpd %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0

Modified: llvm/trunk/test/CodeGen/X86/machine-cp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/machine-cp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/machine-cp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/machine-cp.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 ; rdar://10640363
 define i32 @t1(i32 %a, i32 %b) nounwind  {
 ; CHECK-LABEL: t1:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl %esi, %edx
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    testl %edx, %edx
@@ -19,7 +19,7 @@ define i32 @t1(i32 %a, i32 %b) nounwind
 ; CHECK-NEXT:    testl %edx, %edx
 ; CHECK-NEXT:    movl %ecx, %eax
 ; CHECK-NEXT:    jne LBB0_2
-; CHECK-NEXT:  ## BB#3: ## %while.end
+; CHECK-NEXT:  ## %bb.3: ## %while.end
 ; CHECK-NEXT:    movl %ecx, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  LBB0_1:
@@ -44,7 +44,7 @@ while.end:
 ; rdar://10428165
 define <8 x i16> @t2(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
 ; CHECK-LABEL: t2:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; CHECK-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
 ; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -56,7 +56,7 @@ entry:
 
 define i32 @t3(i64 %a, i64 %b) nounwind  {
 ; CHECK-LABEL: t3:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movq %rsi, %rdx
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    testq %rdx, %rdx
@@ -70,7 +70,7 @@ define i32 @t3(i64 %a, i64 %b) nounwind
 ; CHECK-NEXT:    testq %rdx, %rdx
 ; CHECK-NEXT:    movq %rcx, %rax
 ; CHECK-NEXT:    jne LBB2_2
-; CHECK-NEXT:  ## BB#3: ## %while.end
+; CHECK-NEXT:  ## %bb.3: ## %while.end
 ; CHECK-NEXT:    movl %ecx, %eax
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  LBB2_1:
@@ -98,7 +98,7 @@ while.end:
 ; ... = op2 dst <-- this is used here.
 define <16 x float> @foo(<16 x float> %x) {
 ; CHECK-LABEL: foo:
-; CHECK:       ## BB#0: ## %bb
+; CHECK:       ## %bb.0: ## %bb
 ; CHECK-NEXT:    movaps %xmm3, %xmm8
 ; CHECK-NEXT:    xorps %xmm3, %xmm3
 ; CHECK-NEXT:    pxor %xmm6, %xmm6

Modified: llvm/trunk/test/CodeGen/X86/machine-cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/machine-cse.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/machine-cse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/machine-cse.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 
 define fastcc i8* @t(i32 %base) nounwind {
 ; CHECK-LABEL: t:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    shlq $9, %rax
@@ -17,7 +17,7 @@ define fastcc i8* @t(i32 %base) nounwind
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jne .LBB0_2
-; CHECK-NEXT:  # BB#1: # %bb1
+; CHECK-NEXT:  # %bb.1: # %bb1
 ; CHECK-NEXT:    callq bar
 ; CHECK-NEXT:  .LBB0_2: # %bb2
 ; CHECK-NEXT:    callq foo
@@ -49,17 +49,17 @@ declare void @printf(...) nounwind
 
 define void @commute(i32 %test_case, i32 %scale) nounwind ssp {
 ; CHECK-LABEL: commute:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; CHECK-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; CHECK-NEXT:    leal -1(%rdi), %eax
 ; CHECK-NEXT:    cmpl $2, %eax
 ; CHECK-NEXT:    ja .LBB1_4
-; CHECK-NEXT:  # BB#1: # %sw.bb
+; CHECK-NEXT:  # %bb.1: # %sw.bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jne .LBB1_4
-; CHECK-NEXT:  # BB#2: # %if.end34
+; CHECK-NEXT:  # %bb.2: # %if.end34
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    imull %edi, %esi
 ; CHECK-NEXT:    leal (%rsi,%rsi,2), %esi
@@ -107,11 +107,11 @@ sw.bb307:
 ; rdar://10660865
 define i32 @cross_mbb_phys_cse(i32 %a, i32 %b) nounwind ssp {
 ; CHECK-LABEL: cross_mbb_phys_cse:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    cmpl %esi, %edi
 ; CHECK-NEXT:    ja .LBB2_2
-; CHECK-NEXT:  # BB#1: # %if.end
+; CHECK-NEXT:  # %bb.1: # %if.end
 ; CHECK-NEXT:    sbbl %eax, %eax
 ; CHECK-NEXT:  .LBB2_2: # %return
 ; CHECK-NEXT:    retq
@@ -132,17 +132,17 @@ return:
 ; rdar://11393714
 define i8* @bsd_memchr(i8* %s, i32 %a, i32 %c, i64 %n) nounwind ssp {
 ; CHECK-LABEL: bsd_memchr:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testq %rcx, %rcx
 ; CHECK-NEXT:    je .LBB3_4
-; CHECK-NEXT:  # BB#1: # %preheader
+; CHECK-NEXT:  # %bb.1: # %preheader
 ; CHECK-NEXT:    movzbl %dl, %eax
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB3_2: # %do.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpl %eax, %esi
 ; CHECK-NEXT:    je .LBB3_5
-; CHECK-NEXT:  # BB#3: # %do.cond
+; CHECK-NEXT:  # %bb.3: # %do.cond
 ; CHECK-NEXT:    # in Loop: Header=BB3_2 Depth=1
 ; CHECK-NEXT:    incq %rdi
 ; CHECK-NEXT:    decq %rcx
@@ -184,13 +184,13 @@ declare i1 @t2_func()
 
 define i32 @t2() nounwind {
 ; CHECK-LABEL: t2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    movl $42, {{.*}}(%rip)
 ; CHECK-NEXT:    callq t2_func
 ; CHECK-NEXT:    testb $1, %al
 ; CHECK-NEXT:    je .LBB4_2
-; CHECK-NEXT:  # BB#1: # %a
+; CHECK-NEXT:  # %bb.1: # %a
 ; CHECK-NEXT:    movl {{.*}}(%rip), %eax
 ; CHECK-NEXT:    popq %rcx
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/machine-region-info.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/machine-region-info.mir?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/machine-region-info.mir (original)
+++ llvm/trunk/test/CodeGen/X86/machine-region-info.mir Mon Dec  4 09:18:51 2017
@@ -53,12 +53,12 @@ body:             |
 ...
 
 # CHECK: Region tree:
-# CHECK-NEXT: [0] BB#0 => <Function Return>
-# CHECK-NEXT:   [1] BB#0 => BB#11
-# CHECK-NEXT:     [2] BB#7 => BB#9
-# CHECK-NEXT:     [2] BB#9 => BB#11
-# CHECK-NEXT:     [2] BB#1 => BB#11
-# CHECK-NEXT:       [3] BB#2 => BB#5
-# CHECK-NEXT:         [4] BB#3 => BB#5
-# CHECK-NEXT:       [3] BB#5 => BB#11
+# CHECK-NEXT: [0] %bb.0 => <Function Return>
+# CHECK-NEXT:   [1] %bb.0 => %bb.11
+# CHECK-NEXT:     [2] %bb.7 => %bb.9
+# CHECK-NEXT:     [2] %bb.9 => %bb.11
+# CHECK-NEXT:     [2] %bb.1 => %bb.11
+# CHECK-NEXT:       [3] %bb.2 => %bb.5
+# CHECK-NEXT:         [4] %bb.3 => %bb.5
+# CHECK-NEXT:       [3] %bb.5 => %bb.11
 # CHECK-NEXT: End region tree

Modified: llvm/trunk/test/CodeGen/X86/madd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/madd.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/madd.ll (original)
+++ llvm/trunk/test/CodeGen/X86/madd.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: _Z10test_shortPsS_i:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    xorl %ecx, %ecx
@@ -21,7 +21,7 @@ define i32 @_Z10test_shortPsS_i(i16* noc
 ; SSE2-NEXT:    addq $8, %rcx
 ; SSE2-NEXT:    cmpq %rcx, %rax
 ; SSE2-NEXT:    jne .LBB0_1
-; SSE2-NEXT:  # BB#2: # %middle.block
+; SSE2-NEXT:  # %bb.2: # %middle.block
 ; SSE2-NEXT:    paddd %xmm0, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    paddd %xmm1, %xmm0
@@ -31,7 +31,7 @@ define i32 @_Z10test_shortPsS_i(i16* noc
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: _Z10test_shortPsS_i:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    movl %edx, %eax
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    xorl %ecx, %ecx
@@ -44,7 +44,7 @@ define i32 @_Z10test_shortPsS_i(i16* noc
 ; AVX2-NEXT:    addq $8, %rcx
 ; AVX2-NEXT:    cmpq %rcx, %rax
 ; AVX2-NEXT:    jne .LBB0_1
-; AVX2-NEXT:  # BB#2: # %middle.block
+; AVX2-NEXT:  # %bb.2: # %middle.block
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -55,7 +55,7 @@ define i32 @_Z10test_shortPsS_i(i16* noc
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: _Z10test_shortPsS_i:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    movl %edx, %eax
 ; AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    xorl %ecx, %ecx
@@ -68,7 +68,7 @@ define i32 @_Z10test_shortPsS_i(i16* noc
 ; AVX512-NEXT:    addq $8, %rcx
 ; AVX512-NEXT:    cmpq %rcx, %rax
 ; AVX512-NEXT:    jne .LBB0_1
-; AVX512-NEXT:  # BB#2: # %middle.block
+; AVX512-NEXT:  # %bb.2: # %middle.block
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -111,7 +111,7 @@ middle.block:
 
 define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: test_unsigned_short:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    xorl %ecx, %ecx
@@ -132,7 +132,7 @@ define i32 @test_unsigned_short(i16* noc
 ; SSE2-NEXT:    addq $8, %rcx
 ; SSE2-NEXT:    cmpq %rcx, %rax
 ; SSE2-NEXT:    jne .LBB1_1
-; SSE2-NEXT:  # BB#2: # %middle.block
+; SSE2-NEXT:  # %bb.2: # %middle.block
 ; SSE2-NEXT:    paddd %xmm1, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    paddd %xmm0, %xmm1
@@ -142,7 +142,7 @@ define i32 @test_unsigned_short(i16* noc
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: test_unsigned_short:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    movl %edx, %eax
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    xorl %ecx, %ecx
@@ -156,7 +156,7 @@ define i32 @test_unsigned_short(i16* noc
 ; AVX2-NEXT:    addq $8, %rcx
 ; AVX2-NEXT:    cmpq %rcx, %rax
 ; AVX2-NEXT:    jne .LBB1_1
-; AVX2-NEXT:  # BB#2: # %middle.block
+; AVX2-NEXT:  # %bb.2: # %middle.block
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -167,7 +167,7 @@ define i32 @test_unsigned_short(i16* noc
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_unsigned_short:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    movl %edx, %eax
 ; AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    xorl %ecx, %ecx
@@ -181,7 +181,7 @@ define i32 @test_unsigned_short(i16* noc
 ; AVX512-NEXT:    addq $8, %rcx
 ; AVX512-NEXT:    cmpq %rcx, %rax
 ; AVX512-NEXT:    jne .LBB1_1
-; AVX512-NEXT:  # BB#2: # %middle.block
+; AVX512-NEXT:  # %bb.2: # %middle.block
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -224,7 +224,7 @@ middle.block:
 
 define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: _Z9test_charPcS_i:
-; SSE2:       # BB#0: # %entry
+; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    xorl %ecx, %ecx
@@ -263,7 +263,7 @@ define i32 @_Z9test_charPcS_i(i8* nocapt
 ; SSE2-NEXT:    addq $16, %rcx
 ; SSE2-NEXT:    cmpq %rcx, %rax
 ; SSE2-NEXT:    jne .LBB2_1
-; SSE2-NEXT:  # BB#2: # %middle.block
+; SSE2-NEXT:  # %bb.2: # %middle.block
 ; SSE2-NEXT:    paddd %xmm3, %xmm0
 ; SSE2-NEXT:    paddd %xmm2, %xmm1
 ; SSE2-NEXT:    paddd %xmm0, %xmm1
@@ -275,7 +275,7 @@ define i32 @_Z9test_charPcS_i(i8* nocapt
 ; SSE2-NEXT:    retq
 ;
 ; AVX2-LABEL: _Z9test_charPcS_i:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    movl %edx, %eax
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    xorl %ecx, %ecx
@@ -290,7 +290,7 @@ define i32 @_Z9test_charPcS_i(i8* nocapt
 ; AVX2-NEXT:    addq $16, %rcx
 ; AVX2-NEXT:    cmpq %rcx, %rax
 ; AVX2-NEXT:    jne .LBB2_1
-; AVX2-NEXT:  # BB#2: # %middle.block
+; AVX2-NEXT:  # %bb.2: # %middle.block
 ; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
@@ -302,7 +302,7 @@ define i32 @_Z9test_charPcS_i(i8* nocapt
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: _Z9test_charPcS_i:
-; AVX512:       # BB#0: # %entry
+; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    movl %edx, %eax
 ; AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    xorl %ecx, %ecx
@@ -316,7 +316,7 @@ define i32 @_Z9test_charPcS_i(i8* nocapt
 ; AVX512-NEXT:    addq $16, %rcx
 ; AVX512-NEXT:    cmpq %rcx, %rax
 ; AVX512-NEXT:    jne .LBB2_1
-; AVX512-NEXT:  # BB#2: # %middle.block
+; AVX512-NEXT:  # %bb.2: # %middle.block
 ; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/mask-negated-bool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mask-negated-bool.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mask-negated-bool.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mask-negated-bool.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @mask_negated_zext_bool1(i1 %x) {
 ; CHECK-LABEL: mask_negated_zext_bool1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $1, %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -15,7 +15,7 @@ define i32 @mask_negated_zext_bool1(i1 %
 
 define i32 @mask_negated_zext_bool2(i1 zeroext %x) {
 ; CHECK-LABEL: mask_negated_zext_bool2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %ext = zext i1 %x to i32
@@ -26,7 +26,7 @@ define i32 @mask_negated_zext_bool2(i1 z
 
 define <4 x i32> @mask_negated_zext_bool_vec(<4 x i1> %x) {
 ; CHECK-LABEL: mask_negated_zext_bool_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %ext = zext <4 x i1> %x to <4 x i32>
@@ -37,7 +37,7 @@ define <4 x i32> @mask_negated_zext_bool
 
 define i32 @mask_negated_sext_bool1(i1 %x) {
 ; CHECK-LABEL: mask_negated_sext_bool1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $1, %edi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
@@ -49,7 +49,7 @@ define i32 @mask_negated_sext_bool1(i1 %
 
 define i32 @mask_negated_sext_bool2(i1 zeroext %x) {
 ; CHECK-LABEL: mask_negated_sext_bool2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %ext = sext i1 %x to i32
@@ -60,7 +60,7 @@ define i32 @mask_negated_sext_bool2(i1 z
 
 define <4 x i32> @mask_negated_sext_bool_vec(<4 x i1> %x) {
 ; CHECK-LABEL: mask_negated_sext_bool_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andps {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %ext = sext <4 x i1> %x to <4 x i32>

Modified: llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll Mon Dec  4 09:18:51 2017
@@ -18,14 +18,14 @@
 
 define <16 x float> @test1(float* %base, <16 x i32> %ind) {
 ; KNL_64-LABEL: test1:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_64-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
 ; KNL_64-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test1:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_32-NEXT:    vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -33,14 +33,14 @@ define <16 x float> @test1(float* %base,
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test1:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test1:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX_32-NEXT:    vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -75,14 +75,14 @@ declare <8 x i32> @llvm.masked.gather.v8
 
 define <16 x float> @test2(float* %base, <16 x i32> %ind, i16 %mask) {
 ; KNL_64-LABEL: test2:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kmovw %esi, %k1
 ; KNL_64-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
 ; KNL_64-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test2:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; KNL_32-NEXT:    vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -90,14 +90,14 @@ define <16 x float> @test2(float* %base,
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test2:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovw %esi, %k1
 ; SKX-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test2:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; SKX_32-NEXT:    vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -116,14 +116,14 @@ define <16 x float> @test2(float* %base,
 
 define <16 x i32> @test3(i32* %base, <16 x i32> %ind, i16 %mask) {
 ; KNL_64-LABEL: test3:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kmovw %esi, %k1
 ; KNL_64-NEXT:    vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k1}
 ; KNL_64-NEXT:    vmovdqa64 %zmm1, %zmm0
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test3:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; KNL_32-NEXT:    vpgatherdd (%eax,%zmm0,4), %zmm1 {%k1}
@@ -131,14 +131,14 @@ define <16 x i32> @test3(i32* %base, <16
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test3:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovw %esi, %k1
 ; SKX-NEXT:    vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k1}
 ; SKX-NEXT:    vmovdqa64 %zmm1, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test3:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; SKX_32-NEXT:    vpgatherdd (%eax,%zmm0,4), %zmm1 {%k1}
@@ -158,7 +158,7 @@ define <16 x i32> @test3(i32* %base, <16
 
 define <16 x i32> @test4(i32* %base, <16 x i32> %ind, i16 %mask) {
 ; KNL_64-LABEL: test4:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kmovw %esi, %k1
 ; KNL_64-NEXT:    kmovw %k1, %k2
 ; KNL_64-NEXT:    vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k2}
@@ -168,7 +168,7 @@ define <16 x i32> @test4(i32* %base, <16
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test4:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; KNL_32-NEXT:    kmovw %k1, %k2
@@ -179,7 +179,7 @@ define <16 x i32> @test4(i32* %base, <16
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test4:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovw %esi, %k1
 ; SKX-NEXT:    kmovw %k1, %k2
 ; SKX-NEXT:    vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k2}
@@ -189,7 +189,7 @@ define <16 x i32> @test4(i32* %base, <16
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test4:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; SKX_32-NEXT:    kmovw %k1, %k2
@@ -227,7 +227,7 @@ define <16 x i32> @test4(i32* %base, <16
 
 define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
 ; KNL_64-LABEL: test5:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kmovw %esi, %k1
 ; KNL_64-NEXT:    kmovw %k1, %k2
 ; KNL_64-NEXT:    vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k2}
@@ -236,7 +236,7 @@ define void @test5(i32* %base, <16 x i32
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test5:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; KNL_32-NEXT:    kmovw %k1, %k2
@@ -246,7 +246,7 @@ define void @test5(i32* %base, <16 x i32
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test5:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovw %esi, %k1
 ; SKX-NEXT:    kmovw %k1, %k2
 ; SKX-NEXT:    vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k2}
@@ -255,7 +255,7 @@ define void @test5(i32* %base, <16 x i32
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test5:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; SKX_32-NEXT:    kmovw %k1, %k2
@@ -289,7 +289,7 @@ declare void @llvm.masked.scatter.v16i32
 
 define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) {
 ; KNL_64-LABEL: test6:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_64-NEXT:    kxnorw %k0, %k0, %k2
 ; KNL_64-NEXT:    vpgatherqd (,%zmm1), %ymm2 {%k2}
@@ -298,7 +298,7 @@ define <8 x i32> @test6(<8 x i32>%a1, <8
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test6:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    vpmovsxdq %ymm1, %zmm2
 ; KNL_32-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_32-NEXT:    kxnorw %k0, %k0, %k2
@@ -308,7 +308,7 @@ define <8 x i32> @test6(<8 x i32>%a1, <8
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test6:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX-NEXT:    kxnorw %k0, %k0, %k2
 ; SKX-NEXT:    vpgatherqd (,%zmm1), %ymm2 {%k2}
@@ -317,7 +317,7 @@ define <8 x i32> @test6(<8 x i32>%a1, <8
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test6:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX_32-NEXT:    kxnorw %k0, %k0, %k2
 ; SKX_32-NEXT:    vpgatherdd (,%ymm1), %ymm2 {%k2}
@@ -334,7 +334,7 @@ define <8 x i32> @test6(<8 x i32>%a1, <8
 define <8 x i32> @test7(i32* %base, <8 x i32> %ind, i8 %mask) {
 ;
 ; KNL_64-LABEL: test7:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kmovw %esi, %k1
 ; KNL_64-NEXT:    vpmovsxdq %ymm0, %zmm0
 ; KNL_64-NEXT:    kmovw %k1, %k2
@@ -345,7 +345,7 @@ define <8 x i32> @test7(i32* %base, <8 x
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test7:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; KNL_32-NEXT:    kmovw %ecx, %k1
@@ -358,7 +358,7 @@ define <8 x i32> @test7(i32* %base, <8 x
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test7:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovw %esi, %k1
 ; SKX-NEXT:    kmovw %k1, %k2
 ; SKX-NEXT:    vpgatherdd (%rdi,%ymm0,4), %ymm1 {%k2}
@@ -368,7 +368,7 @@ define <8 x i32> @test7(i32* %base, <8 x
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test7:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    kmovb {{[0-9]+}}(%esp), %k1
 ; SKX_32-NEXT:    kmovw %k1, %k2
@@ -393,7 +393,7 @@ define <8 x i32> @test7(i32* %base, <8 x
 ; each gather call will be split into two
 define <16 x i32> @test8(<16 x i32*> %ptr.random, <16 x i32> %ind, i16 %mask) {
 ; KNL_64-LABEL: test8:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kmovw %edi, %k1
 ; KNL_64-NEXT:    kshiftrw $8, %k1, %k2
 ; KNL_64-NEXT:    kmovw %k2, %k3
@@ -408,7 +408,7 @@ define <16 x i32> @test8(<16 x i32*> %pt
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test8:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; KNL_32-NEXT:    kmovw %k1, %k2
 ; KNL_32-NEXT:    vpgatherdd (,%zmm0), %zmm1 {%k2}
@@ -418,7 +418,7 @@ define <16 x i32> @test8(<16 x i32*> %pt
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kmovw %edi, %k1
 ; SKX-NEXT:    kshiftrw $8, %k1, %k2
 ; SKX-NEXT:    kmovw %k2, %k3
@@ -433,7 +433,7 @@ define <16 x i32> @test8(<16 x i32*> %pt
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test8:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; SKX_32-NEXT:    kmovw %k1, %k2
 ; SKX_32-NEXT:    vpgatherdd (,%zmm0), %zmm1 {%k2}
@@ -458,7 +458,7 @@ define <16 x i32> @test8(<16 x i32*> %pt
 
 define <8 x i32> @test9(%struct.ST* %base, <8 x i64> %ind1, <8 x i32>%ind5) {
 ; KNL_64-LABEL: test9:
-; KNL_64:       # BB#0: # %entry
+; KNL_64:       # %bb.0: # %entry
 ; KNL_64-NEXT:    vpbroadcastq %rdi, %zmm2
 ; KNL_64-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [824,824,824,824,824,824,824,824]
 ; KNL_64-NEXT:    vpmuludq %zmm3, %zmm0, %zmm4
@@ -476,7 +476,7 @@ define <8 x i32> @test9(%struct.ST* %bas
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test9:
-; KNL_32:       # BB#0: # %entry
+; KNL_32:       # %bb.0: # %entry
 ; KNL_32-NEXT:    vpbroadcastd {{[0-9]+}}(%esp), %ymm2
 ; KNL_32-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [80,80,80,80,80,80,80,80]
 ; KNL_32-NEXT:    vpmulld %ymm3, %ymm1, %ymm1
@@ -493,7 +493,7 @@ define <8 x i32> @test9(%struct.ST* %bas
 ; KNL_32-NEXT:    retl
 ;
 ; SKX_SMALL-LABEL: test9:
-; SKX_SMALL:       # BB#0: # %entry
+; SKX_SMALL:       # %bb.0: # %entry
 ; SKX_SMALL-NEXT:    vpbroadcastq %rdi, %zmm2
 ; SKX_SMALL-NEXT:    vpmullq {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; SKX_SMALL-NEXT:    vpmovsxdq %ymm1, %zmm1
@@ -506,7 +506,7 @@ define <8 x i32> @test9(%struct.ST* %bas
 ; SKX_SMALL-NEXT:    retq
 ;
 ; SKX_LARGE-LABEL: test9:
-; SKX_LARGE:       # BB#0: # %entry
+; SKX_LARGE:       # %bb.0: # %entry
 ; SKX_LARGE-NEXT:    vpbroadcastq %rdi, %zmm2
 ; SKX_LARGE-NEXT:    vpmovsxdq %ymm1, %zmm1
 ; SKX_LARGE-NEXT:    movabsq ${{\.LCPI.*}}, %rax
@@ -522,7 +522,7 @@ define <8 x i32> @test9(%struct.ST* %bas
 ; SKX_LARGE-NEXT:    retq
 ;
 ; SKX_32-LABEL: test9:
-; SKX_32:       # BB#0: # %entry
+; SKX_32:       # %bb.0: # %entry
 ; SKX_32-NEXT:    vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1
 ; SKX_32-NEXT:    vpmovqd %zmm0, %ymm0
 ; SKX_32-NEXT:    vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
@@ -543,7 +543,7 @@ entry:
 
 define <8 x i32> @test10(%struct.ST* %base, <8 x i64> %i1, <8 x i32>%ind5) {
 ; KNL_64-LABEL: test10:
-; KNL_64:       # BB#0: # %entry
+; KNL_64:       # %bb.0: # %entry
 ; KNL_64-NEXT:    vpbroadcastq %rdi, %zmm2
 ; KNL_64-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [824,824,824,824,824,824,824,824]
 ; KNL_64-NEXT:    vpmuludq %zmm3, %zmm0, %zmm4
@@ -561,7 +561,7 @@ define <8 x i32> @test10(%struct.ST* %ba
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test10:
-; KNL_32:       # BB#0: # %entry
+; KNL_32:       # %bb.0: # %entry
 ; KNL_32-NEXT:    vpbroadcastd {{[0-9]+}}(%esp), %ymm2
 ; KNL_32-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [80,80,80,80,80,80,80,80]
 ; KNL_32-NEXT:    vpmulld %ymm3, %ymm1, %ymm1
@@ -578,7 +578,7 @@ define <8 x i32> @test10(%struct.ST* %ba
 ; KNL_32-NEXT:    retl
 ;
 ; SKX_SMALL-LABEL: test10:
-; SKX_SMALL:       # BB#0: # %entry
+; SKX_SMALL:       # %bb.0: # %entry
 ; SKX_SMALL-NEXT:    vpbroadcastq %rdi, %zmm2
 ; SKX_SMALL-NEXT:    vpmullq {{.*}}(%rip){1to8}, %zmm0, %zmm0
 ; SKX_SMALL-NEXT:    vpmovsxdq %ymm1, %zmm1
@@ -591,7 +591,7 @@ define <8 x i32> @test10(%struct.ST* %ba
 ; SKX_SMALL-NEXT:    retq
 ;
 ; SKX_LARGE-LABEL: test10:
-; SKX_LARGE:       # BB#0: # %entry
+; SKX_LARGE:       # %bb.0: # %entry
 ; SKX_LARGE-NEXT:    vpbroadcastq %rdi, %zmm2
 ; SKX_LARGE-NEXT:    vpmovsxdq %ymm1, %zmm1
 ; SKX_LARGE-NEXT:    movabsq ${{\.LCPI.*}}, %rax
@@ -607,7 +607,7 @@ define <8 x i32> @test10(%struct.ST* %ba
 ; SKX_LARGE-NEXT:    retq
 ;
 ; SKX_32-LABEL: test10:
-; SKX_32:       # BB#0: # %entry
+; SKX_32:       # %bb.0: # %entry
 ; SKX_32-NEXT:    vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1
 ; SKX_32-NEXT:    vpmovqd %zmm0, %ymm0
 ; SKX_32-NEXT:    vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
@@ -629,14 +629,14 @@ entry:
 ; Splat index in GEP, requires broadcast
 define <16 x float> @test11(float* %base, i32 %ind) {
 ; KNL_64-LABEL: test11:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpbroadcastd %esi, %zmm1
 ; KNL_64-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_64-NEXT:    vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1}
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test11:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %zmm1
 ; KNL_32-NEXT:    kxnorw %k0, %k0, %k1
@@ -644,14 +644,14 @@ define <16 x float> @test11(float* %base
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test11:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpbroadcastd %esi, %zmm1
 ; SKX-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX-NEXT:    vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1}
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test11:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %zmm1
 ; SKX_32-NEXT:    kxnorw %k0, %k0, %k1
@@ -670,14 +670,14 @@ define <16 x float> @test11(float* %base
 ; We are checking the uniform base here. It is taken directly from input to vgatherdps
 define <16 x float> @test12(float* %base, <16 x i32> %ind) {
 ; KNL_64-LABEL: test12:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_64-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
 ; KNL_64-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test12:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_32-NEXT:    vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -685,14 +685,14 @@ define <16 x float> @test12(float* %base
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test12:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test12:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX_32-NEXT:    vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -709,14 +709,14 @@ define <16 x float> @test12(float* %base
 ; The same as the previous, but the mask is undefined
 define <16 x float> @test13(float* %base, <16 x i32> %ind) {
 ; KNL_64-LABEL: test13:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_64-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
 ; KNL_64-NEXT:    vmovaps %zmm1, %zmm0
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test13:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_32-NEXT:    vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -724,14 +724,14 @@ define <16 x float> @test13(float* %base
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test13:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
 ; SKX-NEXT:    vmovaps %zmm1, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test13:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX_32-NEXT:    vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
@@ -748,7 +748,7 @@ define <16 x float> @test13(float* %base
 ; The base pointer is not splat, can't find unform base
 define <16 x float> @test14(float* %base, i32 %ind, <16 x float*> %vec) {
 ; KNL_64-LABEL: test14:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm0
 ; KNL_64-NEXT:    vpbroadcastq %xmm0, %zmm0
 ; KNL_64-NEXT:    vmovd %esi, %xmm1
@@ -762,7 +762,7 @@ define <16 x float> @test14(float* %base
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test14:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; KNL_32-NEXT:    vpbroadcastd %xmm0, %zmm0
 ; KNL_32-NEXT:    vpslld $2, {{[0-9]+}}(%esp){1to16}, %zmm1
@@ -772,7 +772,7 @@ define <16 x float> @test14(float* %base
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test14:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm0
 ; SKX-NEXT:    vpbroadcastq %xmm0, %zmm0
 ; SKX-NEXT:    vpbroadcastd %esi, %ymm1
@@ -785,7 +785,7 @@ define <16 x float> @test14(float* %base
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test14:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; SKX_32-NEXT:    vpbroadcastd %xmm0, %zmm0
 ; SKX_32-NEXT:    vpslld $2, {{[0-9]+}}(%esp){1to16}, %zmm1
@@ -810,7 +810,7 @@ declare <2 x double> @llvm.masked.gather
 ; Gather smaller than existing instruction
 define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
 ; KNL_64-LABEL: test15:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; KNL_64-NEXT:    vmovdqa %xmm1, %xmm1
 ; KNL_64-NEXT:    vpmovsxdq %ymm0, %zmm2
@@ -822,7 +822,7 @@ define <4 x float> @test15(float* %base,
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test15:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; KNL_32-NEXT:    vmovdqa %xmm1, %xmm1
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -835,7 +835,7 @@ define <4 x float> @test15(float* %base,
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test15:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm1, %xmm1
 ; SKX-NEXT:    vptestmd %xmm1, %xmm1, %k1
 ; SKX-NEXT:    vgatherdps (%rdi,%xmm0,4), %xmm1 {%k1}
@@ -843,7 +843,7 @@ define <4 x float> @test15(float* %base,
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test15:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpslld $31, %xmm1, %xmm1
 ; SKX_32-NEXT:    vptestmd %xmm1, %xmm1, %k1
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -860,7 +860,7 @@ define <4 x float> @test15(float* %base,
 ; Gather smaller than existing instruction
 define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x double> %src0) {
 ; KNL_64-LABEL: test16:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
 ; KNL_64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; KNL_64-NEXT:    vpslld $31, %xmm1, %xmm1
@@ -875,7 +875,7 @@ define <4 x double> @test16(double* %bas
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test16:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
 ; KNL_32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; KNL_32-NEXT:    vpslld $31, %xmm1, %xmm1
@@ -891,7 +891,7 @@ define <4 x double> @test16(double* %bas
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm1, %xmm1
 ; SKX-NEXT:    vptestmd %xmm1, %xmm1, %k1
 ; SKX-NEXT:    vgatherdpd (%rdi,%xmm0,8), %ymm2 {%k1}
@@ -899,7 +899,7 @@ define <4 x double> @test16(double* %bas
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test16:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpslld $31, %xmm1, %xmm1
 ; SKX_32-NEXT:    vptestmd %xmm1, %xmm1, %k1
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -915,7 +915,7 @@ define <4 x double> @test16(double* %bas
 
 define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x double> %src0) {
 ; KNL_64-LABEL: test17:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
 ; KNL_64-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; KNL_64-NEXT:    vpsraq $32, %zmm0, %zmm0
@@ -928,7 +928,7 @@ define <2 x double> @test17(double* %bas
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test17:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
 ; KNL_32-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; KNL_32-NEXT:    vpsraq $32, %zmm0, %zmm0
@@ -942,7 +942,7 @@ define <2 x double> @test17(double* %bas
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test17:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; SKX-NEXT:    vpsraq $32, %xmm0, %xmm0
 ; SKX-NEXT:    vpsllq $63, %xmm1, %xmm1
@@ -952,7 +952,7 @@ define <2 x double> @test17(double* %bas
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test17:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; SKX_32-NEXT:    vpsraq $32, %xmm0, %xmm0
 ; SKX_32-NEXT:    vpsllq $63, %xmm1, %xmm1
@@ -976,7 +976,7 @@ declare void @llvm.masked.scatter.v2f32.
 
 define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
 ; KNL_64-LABEL: test18:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; KNL_64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; KNL_64-NEXT:    vmovdqa %xmm2, %xmm2
@@ -987,7 +987,7 @@ define void @test18(<4 x i32>%a1, <4 x i
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test18:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; KNL_32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; KNL_32-NEXT:    vmovdqa %xmm2, %xmm2
@@ -999,7 +999,7 @@ define void @test18(<4 x i32>%a1, <4 x i
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test18:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm2, %xmm2
 ; SKX-NEXT:    vptestmd %xmm2, %xmm2, %k1
 ; SKX-NEXT:    vpscatterqd %xmm0, (,%ymm1) {%k1}
@@ -1007,7 +1007,7 @@ define void @test18(<4 x i32>%a1, <4 x i
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test18:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpslld $31, %xmm2, %xmm2
 ; SKX_32-NEXT:    vptestmd %xmm2, %xmm2, %k1
 ; SKX_32-NEXT:    vpscatterdd %xmm0, (,%xmm1) {%k1}
@@ -1018,7 +1018,7 @@ define void @test18(<4 x i32>%a1, <4 x i
 
 define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind) {
 ; KNL_64-LABEL: test19:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
 ; KNL_64-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; KNL_64-NEXT:    vpslld $31, %xmm1, %xmm1
@@ -1032,7 +1032,7 @@ define void @test19(<4 x double>%a1, dou
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test19:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
 ; KNL_32-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; KNL_32-NEXT:    vpslld $31, %xmm1, %xmm1
@@ -1047,7 +1047,7 @@ define void @test19(<4 x double>%a1, dou
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test19:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm1, %xmm1
 ; SKX-NEXT:    vptestmd %xmm1, %xmm1, %k1
 ; SKX-NEXT:    vscatterqpd %ymm0, (%rdi,%ymm2,8) {%k1}
@@ -1055,7 +1055,7 @@ define void @test19(<4 x double>%a1, dou
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test19:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpslld $31, %xmm1, %xmm1
 ; SKX_32-NEXT:    vptestmd %xmm1, %xmm1, %k1
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1070,7 +1070,7 @@ define void @test19(<4 x double>%a1, dou
 ; Data type requires widening
 define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
 ; KNL_64-LABEL: test20:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
 ; KNL_64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; KNL_64-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,2],zero,zero
@@ -1082,7 +1082,7 @@ define void @test20(<2 x float>%a1, <2 x
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test20:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; KNL_32-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; KNL_32-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,2],zero,zero
@@ -1095,7 +1095,7 @@ define void @test20(<2 x float>%a1, <2 x
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test20:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; SKX-NEXT:    vpsllq $63, %xmm2, %xmm2
 ; SKX-NEXT:    vptestmq %xmm2, %xmm2, %k1
@@ -1104,7 +1104,7 @@ define void @test20(<2 x float>%a1, <2 x
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test20:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; SKX_32-NEXT:    vpsllq $63, %xmm2, %xmm2
 ; SKX_32-NEXT:    vptestmq %xmm2, %xmm2, %k1
@@ -1117,7 +1117,7 @@ define void @test20(<2 x float>%a1, <2 x
 ; Data type requires promotion
 define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
 ; KNL_64-LABEL: test21:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
 ; KNL_64-NEXT:    vmovdqa %xmm2, %xmm2
 ; KNL_64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1128,7 +1128,7 @@ define void @test21(<2 x i32>%a1, <2 x i
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test21:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    vpsllq $32, %xmm1, %xmm1
 ; KNL_32-NEXT:    vpsraq $32, %zmm1, %zmm1
 ; KNL_32-NEXT:    vmovdqa %xmm2, %xmm2
@@ -1140,7 +1140,7 @@ define void @test21(<2 x i32>%a1, <2 x i
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test21:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; SKX-NEXT:    vpsllq $63, %xmm2, %xmm2
 ; SKX-NEXT:    vptestmq %xmm2, %xmm2, %k1
@@ -1150,7 +1150,7 @@ define void @test21(<2 x i32>%a1, <2 x i
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test21:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpsllq $32, %xmm1, %xmm1
 ; SKX_32-NEXT:    vpsraq $32, %xmm1, %xmm1
 ; SKX_32-NEXT:    vpsllq $63, %xmm2, %xmm2
@@ -1168,7 +1168,7 @@ declare <2 x float> @llvm.masked.gather.
 
 define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x float> %src0) {
 ; KNL_64-LABEL: test22:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
 ; KNL_64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; KNL_64-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
@@ -1182,7 +1182,7 @@ define <2 x float> @test22(float* %base,
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test22:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
 ; KNL_32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; KNL_32-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
@@ -1197,7 +1197,7 @@ define <2 x float> @test22(float* %base,
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test22:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; SKX-NEXT:    vpsllq $63, %xmm1, %xmm1
 ; SKX-NEXT:    vptestmq %xmm1, %xmm1, %k1
@@ -1206,7 +1206,7 @@ define <2 x float> @test22(float* %base,
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test22:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; SKX_32-NEXT:    vpsllq $63, %xmm1, %xmm1
 ; SKX_32-NEXT:    vptestmq %xmm1, %xmm1, %k1
@@ -1222,7 +1222,7 @@ define <2 x float> @test22(float* %base,
 
 define <2 x float> @test22a(float* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x float> %src0) {
 ; KNL_64-LABEL: test22a:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
 ; KNL_64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; KNL_64-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
@@ -1235,7 +1235,7 @@ define <2 x float> @test22a(float* %base
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test22a:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
 ; KNL_32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; KNL_32-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
@@ -1249,7 +1249,7 @@ define <2 x float> @test22a(float* %base
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test22a:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm1, %xmm1
 ; SKX-NEXT:    vptestmq %xmm1, %xmm1, %k1
 ; SKX-NEXT:    vgatherqps (%rdi,%xmm0,4), %xmm2 {%k1}
@@ -1257,7 +1257,7 @@ define <2 x float> @test22a(float* %base
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test22a:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpsllq $63, %xmm1, %xmm1
 ; SKX_32-NEXT:    vptestmq %xmm1, %xmm1, %k1
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1274,7 +1274,7 @@ declare <2 x i64> @llvm.masked.gather.v2
 
 define <2 x i32> @test23(i32* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i32> %src0) {
 ; KNL_64-LABEL: test23:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
 ; KNL_64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; KNL_64-NEXT:    vpmovsxdq %ymm0, %zmm0
@@ -1288,7 +1288,7 @@ define <2 x i32> @test23(i32* %base, <2
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test23:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
 ; KNL_32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1303,7 +1303,7 @@ define <2 x i32> @test23(i32* %base, <2
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test23:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm1, %xmm1
 ; SKX-NEXT:    vptestmq %xmm1, %xmm1, %k1
 ; SKX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1313,7 +1313,7 @@ define <2 x i32> @test23(i32* %base, <2
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test23:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpsllq $63, %xmm1, %xmm1
 ; SKX_32-NEXT:    vptestmq %xmm1, %xmm1, %k1
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1330,7 +1330,7 @@ define <2 x i32> @test23(i32* %base, <2
 
 define <2 x i32> @test23b(i32* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x i32> %src0) {
 ; KNL_64-LABEL: test23b:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; KNL_64-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
 ; KNL_64-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
@@ -1343,7 +1343,7 @@ define <2 x i32> @test23b(i32* %base, <2
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test23b:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
@@ -1357,7 +1357,7 @@ define <2 x i32> @test23b(i32* %base, <2
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test23b:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm1, %xmm1
 ; SKX-NEXT:    vptestmq %xmm1, %xmm1, %k1
 ; SKX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
@@ -1366,7 +1366,7 @@ define <2 x i32> @test23b(i32* %base, <2
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test23b:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpsllq $63, %xmm1, %xmm1
 ; SKX_32-NEXT:    vptestmq %xmm1, %xmm1, %k1
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1381,7 +1381,7 @@ define <2 x i32> @test23b(i32* %base, <2
 
 define <2 x i32> @test24(i32* %base, <2 x i32> %ind) {
 ; KNL_64-LABEL: test24:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; KNL_64-NEXT:    vpmovsxdq %ymm0, %zmm0
 ; KNL_64-NEXT:    movb $3, %al
@@ -1392,7 +1392,7 @@ define <2 x i32> @test24(i32* %base, <2
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test24:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; KNL_32-NEXT:    vpmovsxdq %ymm0, %zmm0
@@ -1404,7 +1404,7 @@ define <2 x i32> @test24(i32* %base, <2
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test24:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movb $3, %al
 ; SKX-NEXT:    kmovw %eax, %k1
 ; SKX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1413,7 +1413,7 @@ define <2 x i32> @test24(i32* %base, <2
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test24:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    movb $3, %cl
 ; SKX_32-NEXT:    kmovw %ecx, %k1
@@ -1429,7 +1429,7 @@ define <2 x i32> @test24(i32* %base, <2
 
 define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %src0) {
 ; KNL_64-LABEL: test25:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
 ; KNL_64-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; KNL_64-NEXT:    vpsraq $32, %zmm0, %zmm0
@@ -1442,7 +1442,7 @@ define <2 x i64> @test25(i64* %base, <2
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test25:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
 ; KNL_32-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; KNL_32-NEXT:    vpsraq $32, %zmm0, %zmm0
@@ -1456,7 +1456,7 @@ define <2 x i64> @test25(i64* %base, <2
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test25:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; SKX-NEXT:    vpsraq $32, %xmm0, %xmm0
 ; SKX-NEXT:    vpsllq $63, %xmm1, %xmm1
@@ -1466,7 +1466,7 @@ define <2 x i64> @test25(i64* %base, <2
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test25:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; SKX_32-NEXT:    vpsraq $32, %xmm0, %xmm0
 ; SKX_32-NEXT:    vpsllq $63, %xmm1, %xmm1
@@ -1483,7 +1483,7 @@ define <2 x i64> @test25(i64* %base, <2
 
 define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
 ; KNL_64-LABEL: test26:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
 ; KNL_64-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; KNL_64-NEXT:    vpsraq $32, %zmm0, %zmm0
@@ -1495,7 +1495,7 @@ define <2 x i64> @test26(i64* %base, <2
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test26:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
 ; KNL_32-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; KNL_32-NEXT:    vpsraq $32, %zmm0, %zmm0
@@ -1509,7 +1509,7 @@ define <2 x i64> @test26(i64* %base, <2
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test26:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; SKX-NEXT:    vpsraq $32, %xmm0, %xmm0
 ; SKX-NEXT:    kxnorw %k0, %k0, %k1
@@ -1518,7 +1518,7 @@ define <2 x i64> @test26(i64* %base, <2
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test26:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; SKX_32-NEXT:    vpsraq $32, %xmm0, %xmm0
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1535,7 +1535,7 @@ define <2 x i64> @test26(i64* %base, <2
 ; Result type requires widening; all-ones mask
 define <2 x float> @test27(float* %base, <2 x i32> %ind) {
 ; KNL_64-LABEL: test27:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; KNL_64-NEXT:    vpmovsxdq %ymm0, %zmm1
 ; KNL_64-NEXT:    movb $3, %al
@@ -1546,7 +1546,7 @@ define <2 x float> @test27(float* %base,
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test27:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vpmovsxdq %ymm0, %zmm1
@@ -1558,7 +1558,7 @@ define <2 x float> @test27(float* %base,
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test27:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[0,2,2,3]
 ; SKX-NEXT:    movb $3, %al
 ; SKX-NEXT:    kmovw %eax, %k1
@@ -1566,7 +1566,7 @@ define <2 x float> @test27(float* %base,
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test27:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[0,2,2,3]
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    movb $3, %cl
@@ -1582,7 +1582,7 @@ define <2 x float> @test27(float* %base,
 ; Data type requires promotion, mask is all-ones
 define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
 ; KNL_64-LABEL: test28:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
 ; KNL_64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; KNL_64-NEXT:    movb $3, %al
@@ -1592,7 +1592,7 @@ define void @test28(<2 x i32>%a1, <2 x i
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test28:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    vpsllq $32, %xmm1, %xmm1
 ; KNL_32-NEXT:    vpsraq $32, %zmm1, %zmm1
 ; KNL_32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1604,7 +1604,7 @@ define void @test28(<2 x i32>%a1, <2 x i
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test28:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; SKX-NEXT:    movb $3, %al
 ; SKX-NEXT:    kmovw %eax, %k1
@@ -1614,7 +1614,7 @@ define void @test28(<2 x i32>%a1, <2 x i
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test28:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpsllq $32, %xmm1, %xmm1
 ; SKX_32-NEXT:    vpsraq $32, %xmm1, %xmm1
 ; SKX_32-NEXT:    movb $3, %al
@@ -1636,7 +1636,7 @@ define void @test28(<2 x i32>%a1, <2 x i
 
 define <16 x float> @test29(float* %base, <16 x i32> %ind) {
 ; KNL_64-LABEL: test29:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    movw $44, %ax
 ; KNL_64-NEXT:    kmovw %eax, %k1
 ; KNL_64-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
@@ -1644,7 +1644,7 @@ define <16 x float> @test29(float* %base
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test29:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    movw $44, %cx
 ; KNL_32-NEXT:    kmovw %ecx, %k1
@@ -1653,7 +1653,7 @@ define <16 x float> @test29(float* %base
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test29:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movw $44, %ax
 ; SKX-NEXT:    kmovw %eax, %k1
 ; SKX-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
@@ -1661,7 +1661,7 @@ define <16 x float> @test29(float* %base
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test29:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    movw $44, %cx
 ; SKX_32-NEXT:    kmovw %ecx, %k1
@@ -1683,7 +1683,7 @@ define <16 x float> @test29(float* %base
 declare <3 x i32> @llvm.masked.gather.v3i32.v3p0i32(<3 x i32*>, i32, <3 x i1>, <3 x i32>)
 define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x i32> %src0) {
 ; KNL_64-LABEL: test30:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kmovw %edx, %k0
 ; KNL_64-NEXT:    kmovw %esi, %k2
 ; KNL_64-NEXT:    vpmovsxdq %xmm1, %ymm1
@@ -1692,7 +1692,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; KNL_64-NEXT:    testb $1, %dil
 ; KNL_64-NEXT:    # implicit-def: %xmm0
 ; KNL_64-NEXT:    je .LBB31_2
-; KNL_64-NEXT:  # BB#1: # %cond.load
+; KNL_64-NEXT:  # %bb.1: # %cond.load
 ; KNL_64-NEXT:    vmovq %xmm1, %rax
 ; KNL_64-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; KNL_64-NEXT:  .LBB31_2: # %else
@@ -1702,7 +1702,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; KNL_64-NEXT:    kmovw %k2, %eax
 ; KNL_64-NEXT:    testb $1, %al
 ; KNL_64-NEXT:    je .LBB31_4
-; KNL_64-NEXT:  # BB#3: # %cond.load1
+; KNL_64-NEXT:  # %bb.3: # %cond.load1
 ; KNL_64-NEXT:    vpextrq $1, %xmm1, %rax
 ; KNL_64-NEXT:    vpinsrd $1, (%rax), %xmm0, %xmm0
 ; KNL_64-NEXT:  .LBB31_4: # %else2
@@ -1711,7 +1711,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; KNL_64-NEXT:    kmovw %k0, %eax
 ; KNL_64-NEXT:    testb $1, %al
 ; KNL_64-NEXT:    je .LBB31_6
-; KNL_64-NEXT:  # BB#5: # %cond.load4
+; KNL_64-NEXT:  # %bb.5: # %cond.load4
 ; KNL_64-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; KNL_64-NEXT:    vmovq %xmm1, %rax
 ; KNL_64-NEXT:    vpinsrd $2, (%rax), %xmm0, %xmm0
@@ -1730,7 +1730,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test30:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    kmovw %eax, %k0
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1741,7 +1741,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; KNL_32-NEXT:    testb $1, %al
 ; KNL_32-NEXT:    # implicit-def: %xmm0
 ; KNL_32-NEXT:    je .LBB31_2
-; KNL_32-NEXT:  # BB#1: # %cond.load
+; KNL_32-NEXT:  # %bb.1: # %cond.load
 ; KNL_32-NEXT:    vmovd %xmm1, %ecx
 ; KNL_32-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; KNL_32-NEXT:  .LBB31_2: # %else
@@ -1751,7 +1751,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; KNL_32-NEXT:    kmovw %k2, %eax
 ; KNL_32-NEXT:    testb $1, %al
 ; KNL_32-NEXT:    je .LBB31_4
-; KNL_32-NEXT:  # BB#3: # %cond.load1
+; KNL_32-NEXT:  # %bb.3: # %cond.load1
 ; KNL_32-NEXT:    vpextrd $1, %xmm1, %eax
 ; KNL_32-NEXT:    vpinsrd $1, (%eax), %xmm0, %xmm0
 ; KNL_32-NEXT:  .LBB31_4: # %else2
@@ -1760,7 +1760,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; KNL_32-NEXT:    kmovw %k0, %eax
 ; KNL_32-NEXT:    testb $1, %al
 ; KNL_32-NEXT:    je .LBB31_6
-; KNL_32-NEXT:  # BB#5: # %cond.load4
+; KNL_32-NEXT:  # %bb.5: # %cond.load4
 ; KNL_32-NEXT:    vpextrd $2, %xmm1, %eax
 ; KNL_32-NEXT:    vpinsrd $2, (%eax), %xmm0, %xmm0
 ; KNL_32-NEXT:  .LBB31_6: # %else5
@@ -1777,7 +1777,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test30:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm2, %xmm2
 ; SKX-NEXT:    vptestmd %xmm2, %xmm2, %k1
 ; SKX-NEXT:    kshiftlw $15, %k1, %k0
@@ -1789,7 +1789,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; SKX-NEXT:    testb $1, %al
 ; SKX-NEXT:    # implicit-def: %xmm0
 ; SKX-NEXT:    je .LBB31_2
-; SKX-NEXT:  # BB#1: # %cond.load
+; SKX-NEXT:  # %bb.1: # %cond.load
 ; SKX-NEXT:    vmovq %xmm1, %rax
 ; SKX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SKX-NEXT:  .LBB31_2: # %else
@@ -1798,7 +1798,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; SKX-NEXT:    kmovw %k0, %eax
 ; SKX-NEXT:    testb $1, %al
 ; SKX-NEXT:    je .LBB31_4
-; SKX-NEXT:  # BB#3: # %cond.load1
+; SKX-NEXT:  # %bb.3: # %cond.load1
 ; SKX-NEXT:    vpextrq $1, %xmm1, %rax
 ; SKX-NEXT:    vpinsrd $1, (%rax), %xmm0, %xmm0
 ; SKX-NEXT:  .LBB31_4: # %else2
@@ -1807,7 +1807,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; SKX-NEXT:    kmovw %k0, %eax
 ; SKX-NEXT:    testb $1, %al
 ; SKX-NEXT:    je .LBB31_6
-; SKX-NEXT:  # BB#5: # %cond.load4
+; SKX-NEXT:  # %bb.5: # %cond.load4
 ; SKX-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; SKX-NEXT:    vmovq %xmm1, %rax
 ; SKX-NEXT:    vpinsrd $2, (%rax), %xmm0, %xmm0
@@ -1818,7 +1818,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test30:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    subl $12, %esp
 ; SKX_32-NEXT:    .cfi_def_cfa_offset 16
 ; SKX_32-NEXT:    vpslld $31, %xmm2, %xmm2
@@ -1831,7 +1831,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; SKX_32-NEXT:    testb $1, %al
 ; SKX_32-NEXT:    # implicit-def: %xmm1
 ; SKX_32-NEXT:    je .LBB31_2
-; SKX_32-NEXT:  # BB#1: # %cond.load
+; SKX_32-NEXT:  # %bb.1: # %cond.load
 ; SKX_32-NEXT:    vmovd %xmm2, %eax
 ; SKX_32-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SKX_32-NEXT:  .LBB31_2: # %else
@@ -1840,7 +1840,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; SKX_32-NEXT:    kmovw %k0, %eax
 ; SKX_32-NEXT:    testb $1, %al
 ; SKX_32-NEXT:    je .LBB31_4
-; SKX_32-NEXT:  # BB#3: # %cond.load1
+; SKX_32-NEXT:  # %bb.3: # %cond.load1
 ; SKX_32-NEXT:    vpextrd $1, %xmm2, %eax
 ; SKX_32-NEXT:    vpinsrd $1, (%eax), %xmm1, %xmm1
 ; SKX_32-NEXT:  .LBB31_4: # %else2
@@ -1850,7 +1850,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 ; SKX_32-NEXT:    kmovw %k0, %eax
 ; SKX_32-NEXT:    testb $1, %al
 ; SKX_32-NEXT:    je .LBB31_6
-; SKX_32-NEXT:  # BB#5: # %cond.load4
+; SKX_32-NEXT:  # %bb.5: # %cond.load4
 ; SKX_32-NEXT:    vpextrd $2, %xmm2, %eax
 ; SKX_32-NEXT:    vpinsrd $2, (%eax), %xmm1, %xmm1
 ; SKX_32-NEXT:  .LBB31_6: # %else5
@@ -1867,7 +1867,7 @@ define <3 x i32> @test30(<3 x i32*> %bas
 declare <16 x float*> @llvm.masked.gather.v16p0f32.v16p0p0f32(<16 x float**>, i32, <16 x i1>, <16 x float*>)
 define <16 x float*> @test31(<16 x float**> %ptrs) {
 ; KNL_64-LABEL: test31:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_64-NEXT:    kxnorw %k0, %k0, %k2
 ; KNL_64-NEXT:    vpgatherqq (,%zmm0), %zmm2 {%k2}
@@ -1877,14 +1877,14 @@ define <16 x float*> @test31(<16 x float
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test31:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_32-NEXT:    vpgatherdd (,%zmm0), %zmm1 {%k1}
 ; KNL_32-NEXT:    vmovdqa64 %zmm1, %zmm0
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test31:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX-NEXT:    kxnorw %k0, %k0, %k2
 ; SKX-NEXT:    vpgatherqq (,%zmm0), %zmm2 {%k2}
@@ -1894,7 +1894,7 @@ define <16 x float*> @test31(<16 x float
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test31:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX_32-NEXT:    vpgatherdd (,%zmm0), %zmm1 {%k1}
 ; SKX_32-NEXT:    vmovdqa64 %zmm1, %zmm0
@@ -1906,7 +1906,7 @@ define <16 x float*> @test31(<16 x float
 
 define <16 x i32> @test_gather_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %src0)  {
 ; KNL_64-LABEL: test_gather_16i32:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; KNL_64-NEXT:    vpslld $31, %zmm2, %zmm2
 ; KNL_64-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -1918,7 +1918,7 @@ define <16 x i32> @test_gather_16i32(<16
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test_gather_16i32:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; KNL_32-NEXT:    vpslld $31, %zmm1, %zmm1
 ; KNL_32-NEXT:    vptestmd %zmm1, %zmm1, %k1
@@ -1927,7 +1927,7 @@ define <16 x i32> @test_gather_16i32(<16
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test_gather_16i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; SKX-NEXT:    vpslld $31, %zmm2, %zmm2
 ; SKX-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -1939,7 +1939,7 @@ define <16 x i32> @test_gather_16i32(<16
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test_gather_16i32:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; SKX_32-NEXT:    vpslld $31, %zmm1, %zmm1
 ; SKX_32-NEXT:    vptestmd %zmm1, %zmm1, %k1
@@ -1951,7 +1951,7 @@ define <16 x i32> @test_gather_16i32(<16
 }
 define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %src0)  {
 ; KNL_64-LABEL: test_gather_16i64:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; KNL_64-NEXT:    vpslld $31, %zmm2, %zmm2
 ; KNL_64-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -1963,7 +1963,7 @@ define <16 x i64> @test_gather_16i64(<16
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test_gather_16i64:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    pushl %ebp
 ; KNL_32-NEXT:    .cfi_def_cfa_offset 8
 ; KNL_32-NEXT:    .cfi_offset %ebp, -8
@@ -1985,7 +1985,7 @@ define <16 x i64> @test_gather_16i64(<16
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test_gather_16i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; SKX-NEXT:    vpslld $31, %zmm2, %zmm2
 ; SKX-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -1997,7 +1997,7 @@ define <16 x i64> @test_gather_16i64(<16
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test_gather_16i64:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    pushl %ebp
 ; SKX_32-NEXT:    .cfi_def_cfa_offset 8
 ; SKX_32-NEXT:    .cfi_offset %ebp, -8
@@ -2023,7 +2023,7 @@ define <16 x i64> @test_gather_16i64(<16
 declare <16 x i64> @llvm.masked.gather.v16i64.v16p0i64(<16 x i64*> %ptrs, i32, <16 x i1> %mask, <16 x i64> %src0)
 define <16 x float> @test_gather_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x float> %src0)  {
 ; KNL_64-LABEL: test_gather_16f32:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; KNL_64-NEXT:    vpslld $31, %zmm2, %zmm2
 ; KNL_64-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2035,7 +2035,7 @@ define <16 x float> @test_gather_16f32(<
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test_gather_16f32:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; KNL_32-NEXT:    vpslld $31, %zmm1, %zmm1
 ; KNL_32-NEXT:    vptestmd %zmm1, %zmm1, %k1
@@ -2044,7 +2044,7 @@ define <16 x float> @test_gather_16f32(<
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test_gather_16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; SKX-NEXT:    vpslld $31, %zmm2, %zmm2
 ; SKX-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2056,7 +2056,7 @@ define <16 x float> @test_gather_16f32(<
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test_gather_16f32:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; SKX_32-NEXT:    vpslld $31, %zmm1, %zmm1
 ; SKX_32-NEXT:    vptestmd %zmm1, %zmm1, %k1
@@ -2068,7 +2068,7 @@ define <16 x float> @test_gather_16f32(<
 }
 define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x double> %src0)  {
 ; KNL_64-LABEL: test_gather_16f64:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; KNL_64-NEXT:    vpslld $31, %zmm2, %zmm2
 ; KNL_64-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2080,7 +2080,7 @@ define <16 x double> @test_gather_16f64(
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test_gather_16f64:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    pushl %ebp
 ; KNL_32-NEXT:    .cfi_def_cfa_offset 8
 ; KNL_32-NEXT:    .cfi_offset %ebp, -8
@@ -2102,7 +2102,7 @@ define <16 x double> @test_gather_16f64(
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test_gather_16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; SKX-NEXT:    vpslld $31, %zmm2, %zmm2
 ; SKX-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2114,7 +2114,7 @@ define <16 x double> @test_gather_16f64(
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test_gather_16f64:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    pushl %ebp
 ; SKX_32-NEXT:    .cfi_def_cfa_offset 8
 ; SKX_32-NEXT:    .cfi_offset %ebp, -8
@@ -2140,7 +2140,7 @@ define <16 x double> @test_gather_16f64(
 declare <16 x double> @llvm.masked.gather.v16f64.v16p0f64(<16 x double*> %ptrs, i32, <16 x i1> %mask, <16 x double> %src0)
 define void @test_scatter_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %src0)  {
 ; KNL_64-LABEL: test_scatter_16i32:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; KNL_64-NEXT:    vpslld $31, %zmm2, %zmm2
 ; KNL_64-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2152,7 +2152,7 @@ define void @test_scatter_16i32(<16 x i3
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test_scatter_16i32:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; KNL_32-NEXT:    vpslld $31, %zmm1, %zmm1
 ; KNL_32-NEXT:    vptestmd %zmm1, %zmm1, %k1
@@ -2161,7 +2161,7 @@ define void @test_scatter_16i32(<16 x i3
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test_scatter_16i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; SKX-NEXT:    vpslld $31, %zmm2, %zmm2
 ; SKX-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2173,7 +2173,7 @@ define void @test_scatter_16i32(<16 x i3
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test_scatter_16i32:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; SKX_32-NEXT:    vpslld $31, %zmm1, %zmm1
 ; SKX_32-NEXT:    vptestmd %zmm1, %zmm1, %k1
@@ -2185,7 +2185,7 @@ define void @test_scatter_16i32(<16 x i3
 }
 define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %src0)  {
 ; KNL_64-LABEL: test_scatter_16i64:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; KNL_64-NEXT:    vpslld $31, %zmm2, %zmm2
 ; KNL_64-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2196,7 +2196,7 @@ define void @test_scatter_16i64(<16 x i6
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test_scatter_16i64:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    pushl %ebp
 ; KNL_32-NEXT:    .cfi_def_cfa_offset 8
 ; KNL_32-NEXT:    .cfi_offset %ebp, -8
@@ -2218,7 +2218,7 @@ define void @test_scatter_16i64(<16 x i6
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test_scatter_16i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; SKX-NEXT:    vpslld $31, %zmm2, %zmm2
 ; SKX-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2229,7 +2229,7 @@ define void @test_scatter_16i64(<16 x i6
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test_scatter_16i64:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    pushl %ebp
 ; SKX_32-NEXT:    .cfi_def_cfa_offset 8
 ; SKX_32-NEXT:    .cfi_offset %ebp, -8
@@ -2255,7 +2255,7 @@ define void @test_scatter_16i64(<16 x i6
 declare void @llvm.masked.scatter.v16i64.v16p0i64(<16 x i64> %src0, <16 x i64*> %ptrs, i32, <16 x i1> %mask)
 define void @test_scatter_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x float> %src0)  {
 ; KNL_64-LABEL: test_scatter_16f32:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; KNL_64-NEXT:    vpslld $31, %zmm2, %zmm2
 ; KNL_64-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2267,7 +2267,7 @@ define void @test_scatter_16f32(<16 x fl
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test_scatter_16f32:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; KNL_32-NEXT:    vpslld $31, %zmm1, %zmm1
 ; KNL_32-NEXT:    vptestmd %zmm1, %zmm1, %k1
@@ -2276,7 +2276,7 @@ define void @test_scatter_16f32(<16 x fl
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test_scatter_16f32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; SKX-NEXT:    vpslld $31, %zmm2, %zmm2
 ; SKX-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2288,7 +2288,7 @@ define void @test_scatter_16f32(<16 x fl
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test_scatter_16f32:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpmovsxbd %xmm1, %zmm1
 ; SKX_32-NEXT:    vpslld $31, %zmm1, %zmm1
 ; SKX_32-NEXT:    vptestmd %zmm1, %zmm1, %k1
@@ -2301,7 +2301,7 @@ define void @test_scatter_16f32(<16 x fl
 declare void @llvm.masked.scatter.v16f32.v16p0f32(<16 x float> %src0, <16 x float*> %ptrs, i32, <16 x i1> %mask)
 define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x double> %src0)  {
 ; KNL_64-LABEL: test_scatter_16f64:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; KNL_64-NEXT:    vpslld $31, %zmm2, %zmm2
 ; KNL_64-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2312,7 +2312,7 @@ define void @test_scatter_16f64(<16 x do
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test_scatter_16f64:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    pushl %ebp
 ; KNL_32-NEXT:    .cfi_def_cfa_offset 8
 ; KNL_32-NEXT:    .cfi_offset %ebp, -8
@@ -2334,7 +2334,7 @@ define void @test_scatter_16f64(<16 x do
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test_scatter_16f64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbd %xmm2, %zmm2
 ; SKX-NEXT:    vpslld $31, %zmm2, %zmm2
 ; SKX-NEXT:    vptestmd %zmm2, %zmm2, %k1
@@ -2345,7 +2345,7 @@ define void @test_scatter_16f64(<16 x do
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test_scatter_16f64:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    pushl %ebp
 ; SKX_32-NEXT:    .cfi_def_cfa_offset 8
 ; SKX_32-NEXT:    .cfi_offset %ebp, -8
@@ -2372,7 +2372,7 @@ declare void @llvm.masked.scatter.v16f64
 
 define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i64> %d) {
 ; KNL_64-LABEL: test_pr28312:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; KNL_64-NEXT:    vpslld $31, %xmm1, %xmm1
 ; KNL_64-NEXT:    vpsrad $31, %xmm1, %xmm1
@@ -2386,7 +2386,7 @@ define <4 x i64> @test_pr28312(<4 x i64*
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test_pr28312:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    pushl %ebp
 ; KNL_32-NEXT:    .cfi_def_cfa_offset 8
 ; KNL_32-NEXT:    .cfi_offset %ebp, -8
@@ -2410,7 +2410,7 @@ define <4 x i64> @test_pr28312(<4 x i64*
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: test_pr28312:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpslld $31, %xmm1, %xmm1
 ; SKX-NEXT:    vptestmd %xmm1, %xmm1, %k1
 ; SKX-NEXT:    vpgatherqq (,%ymm0), %ymm1 {%k1}
@@ -2419,7 +2419,7 @@ define <4 x i64> @test_pr28312(<4 x i64*
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: test_pr28312:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    pushl %ebp
 ; SKX_32-NEXT:    .cfi_def_cfa_offset 8
 ; SKX_32-NEXT:    .cfi_offset %ebp, -8
@@ -2446,28 +2446,28 @@ declare <4 x i64> @llvm.masked.gather.v4
 
 define <8 x i32> @test_global_array(<8 x i64> %indxs) {
 ; KNL_64-LABEL: test_global_array:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_64-NEXT:    vpgatherqd glob_array(,%zmm0,4), %ymm1 {%k1}
 ; KNL_64-NEXT:    vmovdqa %ymm1, %ymm0
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: test_global_array:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    kxnorw %k0, %k0, %k1
 ; KNL_32-NEXT:    vpgatherqd glob_array(,%zmm0,4), %ymm1 {%k1}
 ; KNL_32-NEXT:    vmovdqa %ymm1, %ymm0
 ; KNL_32-NEXT:    retl
 ;
 ; SKX_SMALL-LABEL: test_global_array:
-; SKX_SMALL:       # BB#0:
+; SKX_SMALL:       # %bb.0:
 ; SKX_SMALL-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX_SMALL-NEXT:    vpgatherqd glob_array(,%zmm0,4), %ymm1 {%k1}
 ; SKX_SMALL-NEXT:    vmovdqa %ymm1, %ymm0
 ; SKX_SMALL-NEXT:    retq
 ;
 ; SKX_LARGE-LABEL: test_global_array:
-; SKX_LARGE:       # BB#0:
+; SKX_LARGE:       # %bb.0:
 ; SKX_LARGE-NEXT:    movabsq $glob_array, %rax
 ; SKX_LARGE-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX_LARGE-NEXT:    vpgatherqd (%rax,%zmm0,4), %ymm1 {%k1}
@@ -2475,7 +2475,7 @@ define <8 x i32> @test_global_array(<8 x
 ; SKX_LARGE-NEXT:    retq
 ;
 ; SKX_32-LABEL: test_global_array:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX_32-NEXT:    vpgatherqd glob_array(,%zmm0,4), %ymm1 {%k1}
 ; SKX_32-NEXT:    vmovdqa %ymm1, %ymm0
@@ -2487,20 +2487,20 @@ define <8 x i32> @test_global_array(<8 x
 
 define void @v1_scatter(<1 x i32>%a1, <1 x i32*> %ptr, <1 x i1> %mask) {
 ; KNL_64-LABEL: v1_scatter:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    testb $1, %dl
 ; KNL_64-NEXT:    jne .LBB43_1
-; KNL_64-NEXT:  # BB#2: # %else
+; KNL_64-NEXT:  # %bb.2: # %else
 ; KNL_64-NEXT:    retq
 ; KNL_64-NEXT:  .LBB43_1: # %cond.store
 ; KNL_64-NEXT:    movl %edi, (%rsi)
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: v1_scatter:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; KNL_32-NEXT:    jne .LBB43_1
-; KNL_32-NEXT:  # BB#2: # %else
+; KNL_32-NEXT:  # %bb.2: # %else
 ; KNL_32-NEXT:    retl
 ; KNL_32-NEXT:  .LBB43_1: # %cond.store
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -2509,20 +2509,20 @@ define void @v1_scatter(<1 x i32>%a1, <1
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: v1_scatter:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    testb $1, %dl
 ; SKX-NEXT:    jne .LBB43_1
-; SKX-NEXT:  # BB#2: # %else
+; SKX-NEXT:  # %bb.2: # %else
 ; SKX-NEXT:    retq
 ; SKX-NEXT:  .LBB43_1: # %cond.store
 ; SKX-NEXT:    movl %edi, (%rsi)
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: v1_scatter:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; SKX_32-NEXT:    jne .LBB43_1
-; SKX_32-NEXT:  # BB#2: # %else
+; SKX_32-NEXT:  # %bb.2: # %else
 ; SKX_32-NEXT:    retl
 ; SKX_32-NEXT:  .LBB43_1: # %cond.store
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -2536,23 +2536,23 @@ declare void @llvm.masked.scatter.v1i32.
 
 define <1 x i32> @v1_gather(<1 x i32*> %ptr, <1 x i1> %mask, <1 x i32> %src0) {
 ; KNL_64-LABEL: v1_gather:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    movl (%rdi), %eax
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: v1_gather:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    movl (%eax), %eax
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: v1_gather:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    movl (%rdi), %eax
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: v1_gather:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    movl (%eax), %eax
 ; SKX_32-NEXT:    retl
@@ -2565,7 +2565,7 @@ declare <1 x i32> @llvm.masked.gather.v1
 ; This experienced a bad interaction when we widened and then tried to split.
 define <2 x float> @large_index(float* %base, <2 x i128> %ind, <2 x i1> %mask, <2 x float> %src0) {
 ; KNL_64-LABEL: large_index:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; KNL_64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
 ; KNL_64-NEXT:    vmovaps %xmm0, %xmm0
@@ -2580,7 +2580,7 @@ define <2 x float> @large_index(float* %
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: large_index:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; KNL_32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
 ; KNL_32-NEXT:    vmovaps %xmm0, %xmm0
@@ -2597,7 +2597,7 @@ define <2 x float> @large_index(float* %
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: large_index:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsllq $63, %xmm0, %xmm0
 ; SKX-NEXT:    vptestmq %xmm0, %xmm0, %k1
 ; SKX-NEXT:    vmovq %rcx, %xmm0
@@ -2608,7 +2608,7 @@ define <2 x float> @large_index(float* %
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: large_index:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpsllq $63, %xmm0, %xmm0
 ; SKX_32-NEXT:    vptestmq %xmm0, %xmm0, %k1
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -2627,7 +2627,7 @@ define <2 x float> @large_index(float* %
 ; Make sure we allow index to be sign extended from a smaller than i32 element size.
 define <16 x float> @sext_i8_index(float* %base, <16 x i8> %ind) {
 ; KNL_64-LABEL: sext_i8_index:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpmovsxbw %xmm0, %ymm0
 ; KNL_64-NEXT:    vpmovsxwq %xmm0, %zmm1
 ; KNL_64-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -2640,7 +2640,7 @@ define <16 x float> @sext_i8_index(float
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: sext_i8_index:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vpmovsxbw %xmm0, %ymm0
 ; KNL_32-NEXT:    vpmovsxwq %xmm0, %zmm1
@@ -2654,7 +2654,7 @@ define <16 x float> @sext_i8_index(float
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: sext_i8_index:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovsxbw %xmm0, %ymm0
 ; SKX-NEXT:    vpmovsxwq %xmm0, %zmm1
 ; SKX-NEXT:    vextracti128 $1, %ymm0, %xmm0
@@ -2667,7 +2667,7 @@ define <16 x float> @sext_i8_index(float
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: sext_i8_index:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    vpmovsxbw %xmm0, %ymm0
 ; SKX_32-NEXT:    vpmovsxwq %xmm0, %zmm1
@@ -2690,7 +2690,7 @@ define <16 x float> @sext_i8_index(float
 ; Make sure we allow index to be sign extended from a smaller than i32 element size.
 define <8 x float> @sext_v8i8_index(float* %base, <8 x i8> %ind) {
 ; KNL_64-LABEL: sext_v8i8_index:
-; KNL_64:       # BB#0:
+; KNL_64:       # %bb.0:
 ; KNL_64-NEXT:    vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; KNL_64-NEXT:    vpsllq $56, %zmm0, %zmm0
 ; KNL_64-NEXT:    vpsraq $56, %zmm0, %zmm1
@@ -2699,7 +2699,7 @@ define <8 x float> @sext_v8i8_index(floa
 ; KNL_64-NEXT:    retq
 ;
 ; KNL_32-LABEL: sext_v8i8_index:
-; KNL_32:       # BB#0:
+; KNL_32:       # %bb.0:
 ; KNL_32-NEXT:    vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; KNL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_32-NEXT:    vpsllq $56, %zmm0, %zmm0
@@ -2709,7 +2709,7 @@ define <8 x float> @sext_v8i8_index(floa
 ; KNL_32-NEXT:    retl
 ;
 ; SKX-LABEL: sext_v8i8_index:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; SKX-NEXT:    vpsllq $56, %zmm0, %zmm0
 ; SKX-NEXT:    vpsraq $56, %zmm0, %zmm1
@@ -2718,7 +2718,7 @@ define <8 x float> @sext_v8i8_index(floa
 ; SKX-NEXT:    retq
 ;
 ; SKX_32-LABEL: sext_v8i8_index:
-; SKX_32:       # BB#0:
+; SKX_32:       # %bb.0:
 ; SKX_32-NEXT:    vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; SKX_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SKX_32-NEXT:    vpsllq $56, %zmm0, %zmm0

Modified: llvm/trunk/test/CodeGen/X86/masked_memop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_memop.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked_memop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_memop.ll Mon Dec  4 09:18:51 2017
@@ -10,11 +10,11 @@
 
 define <1 x double> @loadv1(<1 x i64> %trigger, <1 x double>* %addr, <1 x double> %dst) {
 ; AVX-LABEL: loadv1:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    testq %rdi, %rdi
 ; AVX-NEXT:    ## implicit-def: %xmm1
 ; AVX-NEXT:    je LBB0_1
-; AVX-NEXT:  ## BB#2: ## %else
+; AVX-NEXT:  ## %bb.2: ## %else
 ; AVX-NEXT:    testq %rdi, %rdi
 ; AVX-NEXT:    jne LBB0_3
 ; AVX-NEXT:  LBB0_4: ## %else
@@ -30,11 +30,11 @@ define <1 x double> @loadv1(<1 x i64> %t
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: loadv1:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    testq %rdi, %rdi
 ; AVX512F-NEXT:    ## implicit-def: %xmm1
 ; AVX512F-NEXT:    jne LBB0_2
-; AVX512F-NEXT:  ## BB#1: ## %cond.load
+; AVX512F-NEXT:  ## %bb.1: ## %cond.load
 ; AVX512F-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-NEXT:  LBB0_2: ## %else
 ; AVX512F-NEXT:    testq %rdi, %rdi
@@ -44,11 +44,11 @@ define <1 x double> @loadv1(<1 x i64> %t
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: loadv1:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    testq %rdi, %rdi
 ; SKX-NEXT:    ## implicit-def: %xmm1
 ; SKX-NEXT:    jne LBB0_2
-; SKX-NEXT:  ## BB#1: ## %cond.load
+; SKX-NEXT:  ## %bb.1: ## %cond.load
 ; SKX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
 ; SKX-NEXT:  LBB0_2: ## %else
 ; SKX-NEXT:    testq %rdi, %rdi
@@ -64,20 +64,20 @@ declare <1 x double> @llvm.masked.load.v
 
 define void @storev1(<1 x i32> %trigger, <1 x i32>* %addr, <1 x i32> %val) {
 ; AVX-LABEL: storev1:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    testl %edi, %edi
 ; AVX-NEXT:    je LBB1_1
-; AVX-NEXT:  ## BB#2: ## %else
+; AVX-NEXT:  ## %bb.2: ## %else
 ; AVX-NEXT:    retq
 ; AVX-NEXT:  LBB1_1: ## %cond.store
 ; AVX-NEXT:    movl %edx, (%rsi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: storev1:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    testl %edi, %edi
 ; AVX512-NEXT:    je LBB1_1
-; AVX512-NEXT:  ## BB#2: ## %else
+; AVX512-NEXT:  ## %bb.2: ## %else
 ; AVX512-NEXT:    retq
 ; AVX512-NEXT:  LBB1_1: ## %cond.store
 ; AVX512-NEXT:    movl %edx, (%rsi)
@@ -90,7 +90,7 @@ declare void @llvm.masked.store.v1i32.p0
 
 define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double> %dst) {
 ; AVX-LABEL: test6:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2
@@ -98,7 +98,7 @@ define <2 x double> @test6(<2 x i64> %tr
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: test6:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
 ; AVX512F-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2
@@ -106,7 +106,7 @@ define <2 x double> @test6(<2 x i64> %tr
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test6:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vblendmpd (%rdi), %xmm1, %xmm0 {%k1}
@@ -118,7 +118,7 @@ define <2 x double> @test6(<2 x i64> %tr
 
 define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %dst) {
 ; AVX-LABEL: test7:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2
@@ -126,7 +126,7 @@ define <4 x float> @test7(<4 x i32> %tri
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: test7:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
 ; AVX512F-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2
@@ -134,7 +134,7 @@ define <4 x float> @test7(<4 x i32> %tri
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test7:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vblendmps (%rdi), %xmm1, %xmm0 {%k1}
@@ -146,7 +146,7 @@ define <4 x float> @test7(<4 x i32> %tri
 
 define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
 ; AVX1-LABEL: test8:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2
@@ -154,7 +154,7 @@ define <4 x i32> @test8(<4 x i32> %trigg
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test8:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmaskmovd (%rdi), %xmm0, %xmm2
@@ -162,7 +162,7 @@ define <4 x i32> @test8(<4 x i32> %trigg
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test8:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpmaskmovd (%rdi), %xmm0, %xmm2
@@ -170,7 +170,7 @@ define <4 x i32> @test8(<4 x i32> %trigg
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test8:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vpblendmd (%rdi), %xmm1, %xmm0 {%k1}
@@ -182,28 +182,28 @@ define <4 x i32> @test8(<4 x i32> %trigg
 
 define void @test9(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
 ; AVX1-LABEL: test9:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi)
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test9:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmaskmovd %xmm1, %xmm0, (%rdi)
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test9:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpmaskmovd %xmm1, %xmm0, (%rdi)
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test9:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vmovdqu32 %xmm1, (%rdi) {%k1}
@@ -215,7 +215,7 @@ define void @test9(<4 x i32> %trigger, <
 
 define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double> %dst) {
 ; AVX1-LABEL: test10:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm2
@@ -227,7 +227,7 @@ define <4 x double> @test10(<4 x i32> %t
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test10:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmovsxdq %xmm0, %ymm0
@@ -236,7 +236,7 @@ define <4 x double> @test10(<4 x i32> %t
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test10:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpmovsxdq %xmm0, %ymm0
@@ -245,7 +245,7 @@ define <4 x double> @test10(<4 x i32> %t
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test10:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %xmm2, %xmm0, %k1
 ; SKX-NEXT:    vblendmpd (%rdi), %ymm1, %ymm0 {%k1}
@@ -257,7 +257,7 @@ define <4 x double> @test10(<4 x i32> %t
 
 define <4 x double> @test10b(<4 x i32> %trigger, <4 x double>* %addr, <4 x double> %dst) {
 ; AVX1-LABEL: test10b:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm1
@@ -268,7 +268,7 @@ define <4 x double> @test10b(<4 x i32> %
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test10b:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmovsxdq %xmm0, %ymm0
@@ -276,7 +276,7 @@ define <4 x double> @test10b(<4 x i32> %
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test10b:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpmovsxdq %xmm0, %ymm0
@@ -284,7 +284,7 @@ define <4 x double> @test10b(<4 x i32> %
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test10b:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; SKX-NEXT:    vpcmpeqd %xmm1, %xmm0, %k1
 ; SKX-NEXT:    vmovapd (%rdi), %ymm0 {%k1} {z}
@@ -296,7 +296,7 @@ define <4 x double> @test10b(<4 x i32> %
 
 define <8 x float> @test11a(<8 x i32> %trigger, <8 x float>* %addr, <8 x float> %dst) {
 ; AVX1-LABEL: test11a:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
@@ -307,7 +307,7 @@ define <8 x float> @test11a(<8 x i32> %t
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test11a:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2
@@ -315,7 +315,7 @@ define <8 x float> @test11a(<8 x i32> %t
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test11a:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512F-NEXT:    ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
@@ -327,7 +327,7 @@ define <8 x float> @test11a(<8 x i32> %t
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test11a:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %ymm2, %ymm0, %k1
 ; SKX-NEXT:    vblendmps (%rdi), %ymm1, %ymm0 {%k1}
@@ -339,7 +339,7 @@ define <8 x float> @test11a(<8 x i32> %t
 
 define <8 x i32> @test11b(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %dst) {
 ; AVX1-LABEL: test11b:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX1-NEXT:    vpslld $31, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrad $31, %xmm2, %xmm2
@@ -352,7 +352,7 @@ define <8 x i32> @test11b(<8 x i1> %mask
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test11b:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
 ; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
@@ -361,7 +361,7 @@ define <8 x i32> @test11b(<8 x i1> %mask
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test11b:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512F-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; AVX512F-NEXT:    vpsllq $63, %zmm0, %zmm0
@@ -371,7 +371,7 @@ define <8 x i32> @test11b(<8 x i1> %mask
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test11b:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1
 ; SKX-NEXT:    vpblendmd (%rdi), %ymm1, %ymm0 {%k1}
@@ -382,7 +382,7 @@ define <8 x i32> @test11b(<8 x i1> %mask
 
 define <8 x float> @test11c(<8 x i1> %mask, <8 x float>* %addr) {
 ; AVX1-LABEL: test11c:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX1-NEXT:    vpslld $31, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
@@ -394,7 +394,7 @@ define <8 x float> @test11c(<8 x i1> %ma
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test11c:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
 ; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
@@ -402,7 +402,7 @@ define <8 x float> @test11c(<8 x i1> %ma
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test11c:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; AVX512F-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; AVX512F-NEXT:    vptestmq %zmm0, %zmm0, %k1
@@ -411,7 +411,7 @@ define <8 x float> @test11c(<8 x i1> %ma
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test11c:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1
 ; SKX-NEXT:    vmovaps (%rdi), %ymm0 {%k1} {z}
@@ -422,7 +422,7 @@ define <8 x float> @test11c(<8 x i1> %ma
 
 define <8 x i32> @test11d(<8 x i1> %mask, <8 x i32>* %addr) {
 ; AVX1-LABEL: test11d:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX1-NEXT:    vpslld $31, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
@@ -434,7 +434,7 @@ define <8 x i32> @test11d(<8 x i1> %mask
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test11d:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
 ; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
@@ -442,7 +442,7 @@ define <8 x i32> @test11d(<8 x i1> %mask
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test11d:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; AVX512F-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; AVX512F-NEXT:    vptestmq %zmm0, %zmm0, %k1
@@ -451,7 +451,7 @@ define <8 x i32> @test11d(<8 x i1> %mask
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test11d:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; SKX-NEXT:    vpmovw2m %xmm0, %k1
 ; SKX-NEXT:    vmovdqu32 (%rdi), %ymm0 {%k1} {z}
@@ -462,7 +462,7 @@ define <8 x i32> @test11d(<8 x i1> %mask
 
 define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
 ; AVX1-LABEL: test12:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
@@ -473,7 +473,7 @@ define void @test12(<8 x i32> %trigger,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test12:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmaskmovd %ymm1, %ymm0, (%rdi)
@@ -481,7 +481,7 @@ define void @test12(<8 x i32> %trigger,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test12:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
 ; AVX512F-NEXT:    ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
@@ -493,7 +493,7 @@ define void @test12(<8 x i32> %trigger,
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test12:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpcmpeqd %ymm2, %ymm0, %k1
 ; SKX-NEXT:    vmovdqu32 %ymm1, (%rdi) {%k1}
@@ -506,7 +506,7 @@ define void @test12(<8 x i32> %trigger,
 
 define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
 ; AVX1-LABEL: test14:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -515,7 +515,7 @@ define void @test14(<2 x i32> %trigger,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test14:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX2-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -524,7 +524,7 @@ define void @test14(<2 x i32> %trigger,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test14:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX512F-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -533,7 +533,7 @@ define void @test14(<2 x i32> %trigger,
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test14:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm0, %k1
@@ -546,7 +546,7 @@ define void @test14(<2 x i32> %trigger,
 
 define void @test15(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) {
 ; AVX1-LABEL: test15:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -556,7 +556,7 @@ define void @test15(<2 x i32> %trigger,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test15:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX2-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -566,7 +566,7 @@ define void @test15(<2 x i32> %trigger,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test15:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX512F-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -576,7 +576,7 @@ define void @test15(<2 x i32> %trigger,
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test15:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm0, %k1
@@ -589,7 +589,7 @@ define void @test15(<2 x i32> %trigger,
 
 define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %dst) {
 ; AVX1-LABEL: test16:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -599,7 +599,7 @@ define <2 x float> @test16(<2 x i32> %tr
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test16:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX2-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -609,7 +609,7 @@ define <2 x float> @test16(<2 x i32> %tr
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test16:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX512F-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -619,7 +619,7 @@ define <2 x float> @test16(<2 x i32> %tr
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test16:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm0, %k1
@@ -632,7 +632,7 @@ define <2 x float> @test16(<2 x i32> %tr
 
 define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
 ; AVX1-LABEL: test17:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
 ; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -644,7 +644,7 @@ define <2 x i32> @test17(<2 x i32> %trig
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test17:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX2-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -656,7 +656,7 @@ define <2 x i32> @test17(<2 x i32> %trig
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test17:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX512F-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
@@ -668,7 +668,7 @@ define <2 x i32> @test17(<2 x i32> %trig
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test17:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; SKX-NEXT:    vpcmpeqq %xmm2, %xmm0, %k1
@@ -683,7 +683,7 @@ define <2 x i32> @test17(<2 x i32> %trig
 
 define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
 ; AVX1-LABEL: test18:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; AVX1-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
@@ -692,7 +692,7 @@ define <2 x float> @test18(<2 x i32> %tr
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test18:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; AVX2-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
@@ -701,7 +701,7 @@ define <2 x float> @test18(<2 x i32> %tr
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test18:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; AVX512F-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
@@ -710,7 +710,7 @@ define <2 x float> @test18(<2 x i32> %tr
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test18:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; SKX-NEXT:    vpcmpeqq %xmm1, %xmm0, %k1
@@ -723,18 +723,18 @@ define <2 x float> @test18(<2 x i32> %tr
 
 define <4 x float> @load_all(<4 x i32> %trigger, <4 x float>* %addr) {
 ; AVX-LABEL: load_all:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vmovups (%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: load_all:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm0
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: load_all:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX-NEXT:    vmovups (%rdi), %xmm0 {%k1} {z}
 ; SKX-NEXT:    retq
@@ -749,19 +749,19 @@ define <4 x float> @load_all(<4 x i32> %
 
 define <4 x float> @mload_constmask_v4f32(<4 x float>* %addr, <4 x float> %dst) {
 ; AVX-LABEL: mload_constmask_v4f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = mem[0],xmm0[1],mem[2,3]
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: mload_constmask_v4f32:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vmovaps {{.*#+}} xmm1 = [4294967295,0,4294967295,4294967295]
 ; AVX512F-NEXT:    vmaskmovps (%rdi), %xmm1, %xmm2
 ; AVX512F-NEXT:    vblendvps %xmm1, %xmm2, %xmm0, %xmm0
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: mload_constmask_v4f32:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movb $13, %al
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vmovups (%rdi), %xmm0 {%k1}
@@ -774,28 +774,28 @@ define <4 x float> @mload_constmask_v4f3
 
 define <4 x i32> @mload_constmask_v4i32(<4 x i32>* %addr, <4 x i32> %dst) {
 ; AVX1-LABEL: mload_constmask_v4i32:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vmovaps {{.*#+}} xmm1 = [0,4294967295,4294967295,4294967295]
 ; AVX1-NEXT:    vmaskmovps (%rdi), %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: mload_constmask_v4i32:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,4294967295,4294967295,4294967295]
 ; AVX2-NEXT:    vpmaskmovd (%rdi), %xmm1, %xmm1
 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: mload_constmask_v4i32:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,4294967295,4294967295,4294967295]
 ; AVX512F-NEXT:    vpmaskmovd (%rdi), %xmm1, %xmm2
 ; AVX512F-NEXT:    vblendvps %xmm1, %xmm2, %xmm0, %xmm0
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: mload_constmask_v4i32:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movb $14, %al
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vmovdqu32 (%rdi), %xmm0 {%k1}
@@ -808,14 +808,14 @@ define <4 x i32> @mload_constmask_v4i32(
 
 define <8 x float> @mload_constmask_v8f32(<8 x float>* %addr, <8 x float> %dst) {
 ; AVX-LABEL: mload_constmask_v8f32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,0,0,0,0,0]
 ; AVX-NEXT:    vmaskmovps (%rdi), %ymm1, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: mload_constmask_v8f32:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512F-NEXT:    movw $7, %ax
 ; AVX512F-NEXT:    kmovw %eax, %k1
@@ -824,7 +824,7 @@ define <8 x float> @mload_constmask_v8f3
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: mload_constmask_v8f32:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movb $7, %al
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vmovups (%rdi), %ymm0 {%k1}
@@ -835,21 +835,21 @@ define <8 x float> @mload_constmask_v8f3
 
 define <4 x double> @mload_constmask_v4f64(<4 x double>* %addr, <4 x double> %dst) {
 ; AVX-LABEL: mload_constmask_v4f64:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vmovapd {{.*#+}} ymm1 = [18446744073709551615,18446744073709551615,18446744073709551615,0]
 ; AVX-NEXT:    vmaskmovpd (%rdi), %ymm1, %ymm1
 ; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: mload_constmask_v4f64:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vmovapd {{.*#+}} ymm1 = [18446744073709551615,18446744073709551615,18446744073709551615,0]
 ; AVX512F-NEXT:    vmaskmovpd (%rdi), %ymm1, %ymm2
 ; AVX512F-NEXT:    vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: mload_constmask_v4f64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movb $7, %al
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vmovupd (%rdi), %ymm0 {%k1}
@@ -862,12 +862,12 @@ define <4 x double> @mload_constmask_v4f
 
 define <8 x i32> @mload_constmask_v8i32(<8 x i32>* %addr, <8 x i32> %dst) {
 ; AVX-LABEL: mload_constmask_v8i32:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2],ymm0[3,4,5,6],mem[7]
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: mload_constmask_v8i32:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
 ; AVX512F-NEXT:    movw $135, %ax
 ; AVX512F-NEXT:    kmovw %eax, %k1
@@ -876,7 +876,7 @@ define <8 x i32> @mload_constmask_v8i32(
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: mload_constmask_v8i32:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movb $-121, %al
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vmovdqu32 (%rdi), %ymm0 {%k1}
@@ -887,24 +887,24 @@ define <8 x i32> @mload_constmask_v8i32(
 
 define <4 x i64> @mload_constmask_v4i64(<4 x i64>* %addr, <4 x i64> %dst) {
 ; AVX1-LABEL: mload_constmask_v4i64:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = mem[0],ymm0[1,2],mem[3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: mload_constmask_v4i64:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1],ymm0[2,3,4,5],mem[6,7]
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: mload_constmask_v4i64:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [18446744073709551615,0,0,18446744073709551615]
 ; AVX512F-NEXT:    vpmaskmovq (%rdi), %ymm1, %ymm2
 ; AVX512F-NEXT:    vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: mload_constmask_v4i64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movb $9, %al
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vmovdqu64 (%rdi), %ymm0 {%k1}
@@ -917,20 +917,20 @@ define <4 x i64> @mload_constmask_v4i64(
 
 define <8 x double> @mload_constmask_v8f64(<8 x double>* %addr, <8 x double> %dst) {
 ; AVX-LABEL: mload_constmask_v8f64:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],mem[3]
 ; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = mem[0,1,2],ymm0[3]
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: mload_constmask_v8f64:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    movb $-121, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
 ; AVX512F-NEXT:    vmovupd (%rdi), %zmm0 {%k1}
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: mload_constmask_v8f64:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movb $-121, %al
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vmovupd (%rdi), %zmm0 {%k1}
@@ -943,19 +943,19 @@ define <8 x double> @mload_constmask_v8f
 
 define <4 x double> @mload_constmask_v4f64_undef_passthrough(<4 x double>* %addr) {
 ; AVX-LABEL: mload_constmask_v4f64_undef_passthrough:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,18446744073709551615,0]
 ; AVX-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: mload_constmask_v4f64_undef_passthrough:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,18446744073709551615,0]
 ; AVX512F-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: mload_constmask_v4f64_undef_passthrough:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movb $7, %al
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vmovupd (%rdi), %ymm0 {%k1} {z}
@@ -966,25 +966,25 @@ define <4 x double> @mload_constmask_v4f
 
 define <4 x i64> @mload_constmask_v4i64_undef_passthrough(<4 x i64>* %addr) {
 ; AVX1-LABEL: mload_constmask_v4i64_undef_passthrough:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vmovapd {{.*#+}} ymm0 = [0,18446744073709551615,18446744073709551615,0]
 ; AVX1-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: mload_constmask_v4i64_undef_passthrough:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,18446744073709551615,18446744073709551615,0]
 ; AVX2-NEXT:    vpmaskmovq (%rdi), %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: mload_constmask_v4i64_undef_passthrough:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,18446744073709551615,18446744073709551615,0]
 ; AVX512F-NEXT:    vpmaskmovq (%rdi), %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: mload_constmask_v4i64_undef_passthrough:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    movb $6, %al
 ; SKX-NEXT:    kmovd %eax, %k1
 ; SKX-NEXT:    vmovdqu64 (%rdi), %ymm0 {%k1} {z}
@@ -995,25 +995,25 @@ define <4 x i64> @mload_constmask_v4i64_
 
 define void @test21(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
 ; AVX1-LABEL: test21:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi)
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test21:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmaskmovd %xmm1, %xmm0, (%rdi)
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: test21:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpmaskmovd %xmm1, %xmm0, (%rdi)
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: test21:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX-NEXT:    vmovdqu32 %xmm1, (%rdi) {%k1}
 ; SKX-NEXT:    retq
@@ -1026,12 +1026,12 @@ define void @test21(<4 x i32> %trigger,
 
 define void @one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) {
 ; AVX-LABEL: one_mask_bit_set1:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vmovss %xmm0, (%rdi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: one_mask_bit_set1:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vmovss %xmm0, (%rdi)
 ; AVX512-NEXT:    retq
   call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %val, <4 x i32>* %addr, i32 4, <4 x i1><i1 true, i1 false, i1 false, i1 false>)
@@ -1042,12 +1042,12 @@ define void @one_mask_bit_set1(<4 x i32>
 
 define void @one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) {
 ; AVX-LABEL: one_mask_bit_set2:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vextractps $2, %xmm0, 8(%rdi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: one_mask_bit_set2:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vextractps $2, %xmm0, 8(%rdi)
 ; AVX512-NEXT:    retq
   call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %val, <4 x float>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 true, i1 false>)
@@ -1058,14 +1058,14 @@ define void @one_mask_bit_set2(<4 x floa
 
 define void @one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) {
 ; AVX-LABEL: one_mask_bit_set3:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX-NEXT:    vmovlps %xmm0, 16(%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: one_mask_bit_set3:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX512-NEXT:    vmovlps %xmm0, 16(%rdi)
 ; AVX512-NEXT:    vzeroupper
@@ -1078,14 +1078,14 @@ define void @one_mask_bit_set3(<4 x i64>
 
 define void @one_mask_bit_set4(<4 x double>* %addr, <4 x double> %val) {
 ; AVX-LABEL: one_mask_bit_set4:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX-NEXT:    vmovhpd %xmm0, 24(%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: one_mask_bit_set4:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX512-NEXT:    vmovhpd %xmm0, 24(%rdi)
 ; AVX512-NEXT:    vzeroupper
@@ -1098,14 +1098,14 @@ define void @one_mask_bit_set4(<4 x doub
 
 define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
 ; AVX-LABEL: one_mask_bit_set5:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm0
 ; AVX-NEXT:    vmovlps %xmm0, 48(%rdi)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: one_mask_bit_set5:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm0
 ; AVX512-NEXT:    vmovlps %xmm0, 48(%rdi)
 ; AVX512-NEXT:    vzeroupper
@@ -1118,12 +1118,12 @@ define void @one_mask_bit_set5(<8 x doub
 
 define <4 x i32> @load_one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) {
 ; AVX-LABEL: load_one_mask_bit_set1:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vpinsrd $0, (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: load_one_mask_bit_set1:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vpinsrd $0, (%rdi), %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %res = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr, i32 4, <4 x i1><i1 true, i1 false, i1 false, i1 false>, <4 x i32> %val)
@@ -1134,12 +1134,12 @@ define <4 x i32> @load_one_mask_bit_set1
 
 define <4 x float> @load_one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) {
 ; AVX-LABEL: load_one_mask_bit_set2:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: load_one_mask_bit_set2:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; AVX512-NEXT:    retq
   %res = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 true, i1 false>, <4 x float> %val)
@@ -1150,21 +1150,21 @@ define <4 x float> @load_one_mask_bit_se
 
 define <4 x i64> @load_one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) {
 ; AVX1-LABEL: load_one_mask_bit_set3:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpinsrq $0, 16(%rdi), %xmm1, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: load_one_mask_bit_set3:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpinsrq $0, 16(%rdi), %xmm1, %xmm1
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: load_one_mask_bit_set3:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpinsrq $0, 16(%rdi), %xmm1, %xmm1
 ; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -1177,14 +1177,14 @@ define <4 x i64> @load_one_mask_bit_set3
 
 define <4 x double> @load_one_mask_bit_set4(<4 x double>* %addr, <4 x double> %val) {
 ; AVX-LABEL: load_one_mask_bit_set4:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: load_one_mask_bit_set4:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1197,7 +1197,7 @@ define <4 x double> @load_one_mask_bit_s
 
 define <8 x double> @load_one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
 ; AVX-LABEL: load_one_mask_bit_set5:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
@@ -1205,7 +1205,7 @@ define <8 x double> @load_one_mask_bit_s
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: load_one_mask_bit_set5:
-; AVX512:       ## BB#0:
+; AVX512:       ## %bb.0:
 ; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm1
 ; AVX512-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512-NEXT:    vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
@@ -1219,17 +1219,17 @@ define <8 x double> @load_one_mask_bit_s
 
 define void @trunc_mask(<4 x float> %x, <4 x float>* %ptr, <4 x float> %y, <4 x i32> %mask) {
 ; AVX-LABEL: trunc_mask:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    vmaskmovps %xmm0, %xmm2, (%rdi)
 ; AVX-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_mask:
-; AVX512F:       ## BB#0:
+; AVX512F:       ## %bb.0:
 ; AVX512F-NEXT:    vmaskmovps %xmm0, %xmm2, (%rdi)
 ; AVX512F-NEXT:    retq
 ;
 ; SKX-LABEL: trunc_mask:
-; SKX:       ## BB#0:
+; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; SKX-NEXT:    vpcmpgtd %xmm2, %xmm1, %k1
 ; SKX-NEXT:    vmovups %xmm0, (%rdi) {%k1}

Modified: llvm/trunk/test/CodeGen/X86/memcmp-minsize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memcmp-minsize.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memcmp-minsize.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memcmp-minsize.ll Mon Dec  4 09:18:51 2017
@@ -13,7 +13,7 @@ declare i32 @memcmp(i8*, i8*, i64)
 
 define i32 @length2(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length2:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $2
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -23,7 +23,7 @@ define i32 @length2(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq $2
 ; X64-NEXT:    popq %rdx
 ; X64-NEXT:    jmp memcmp # TAILCALL
@@ -33,7 +33,7 @@ define i32 @length2(i8* %X, i8* %Y) noun
 
 define i1 @length2_eq(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length2_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl (%ecx), %ecx
@@ -42,7 +42,7 @@ define i1 @length2_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    cmpw (%rsi), %ax
 ; X64-NEXT:    sete %al
@@ -54,14 +54,14 @@ define i1 @length2_eq(i8* %X, i8* %Y) no
 
 define i1 @length2_eq_const(i8* %X) nounwind minsize {
 ; X86-LABEL: length2_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    cmpw $12849, (%eax) # imm = 0x3231
 ; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2_eq_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpw $12849, (%rdi) # imm = 0x3231
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    retq
@@ -72,7 +72,7 @@ define i1 @length2_eq_const(i8* %X) noun
 
 define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length2_eq_nobuiltin_attr:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $2
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -84,7 +84,7 @@ define i1 @length2_eq_nobuiltin_attr(i8*
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2_eq_nobuiltin_attr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    pushq $2
 ; X64-NEXT:    popq %rdx
@@ -100,7 +100,7 @@ define i1 @length2_eq_nobuiltin_attr(i8*
 
 define i32 @length3(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length3:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $3
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -110,7 +110,7 @@ define i32 @length3(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length3:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq $3
 ; X64-NEXT:    popq %rdx
 ; X64-NEXT:    jmp memcmp # TAILCALL
@@ -120,7 +120,7 @@ define i32 @length3(i8* %X, i8* %Y) noun
 
 define i1 @length3_eq(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length3_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $3
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -132,7 +132,7 @@ define i1 @length3_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length3_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    pushq $3
 ; X64-NEXT:    popq %rdx
@@ -148,7 +148,7 @@ define i1 @length3_eq(i8* %X, i8* %Y) no
 
 define i32 @length4(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length4:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $4
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -158,7 +158,7 @@ define i32 @length4(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length4:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq $4
 ; X64-NEXT:    popq %rdx
 ; X64-NEXT:    jmp memcmp # TAILCALL
@@ -168,7 +168,7 @@ define i32 @length4(i8* %X, i8* %Y) noun
 
 define i1 @length4_eq(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length4_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %ecx
@@ -177,7 +177,7 @@ define i1 @length4_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length4_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    cmpl (%rsi), %eax
 ; X64-NEXT:    setne %al
@@ -189,14 +189,14 @@ define i1 @length4_eq(i8* %X, i8* %Y) no
 
 define i1 @length4_eq_const(i8* %X) nounwind minsize {
 ; X86-LABEL: length4_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    cmpl $875770417, (%eax) # imm = 0x34333231
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length4_eq_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpl $875770417, (%rdi) # imm = 0x34333231
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
@@ -207,7 +207,7 @@ define i1 @length4_eq_const(i8* %X) noun
 
 define i32 @length5(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length5:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $5
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -217,7 +217,7 @@ define i32 @length5(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length5:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq $5
 ; X64-NEXT:    popq %rdx
 ; X64-NEXT:    jmp memcmp # TAILCALL
@@ -227,7 +227,7 @@ define i32 @length5(i8* %X, i8* %Y) noun
 
 define i1 @length5_eq(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length5_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $5
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -239,7 +239,7 @@ define i1 @length5_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length5_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    pushq $5
 ; X64-NEXT:    popq %rdx
@@ -255,7 +255,7 @@ define i1 @length5_eq(i8* %X, i8* %Y) no
 
 define i32 @length8(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length8:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $8
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -265,7 +265,7 @@ define i32 @length8(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq $8
 ; X64-NEXT:    popq %rdx
 ; X64-NEXT:    jmp memcmp # TAILCALL
@@ -275,7 +275,7 @@ define i32 @length8(i8* %X, i8* %Y) noun
 
 define i1 @length8_eq(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length8_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $8
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -287,7 +287,7 @@ define i1 @length8_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length8_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rax
 ; X64-NEXT:    cmpq (%rsi), %rax
 ; X64-NEXT:    sete %al
@@ -299,7 +299,7 @@ define i1 @length8_eq(i8* %X, i8* %Y) no
 
 define i1 @length8_eq_const(i8* %X) nounwind minsize {
 ; X86-LABEL: length8_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $8
 ; X86-NEXT:    pushl $.L.str
@@ -311,7 +311,7 @@ define i1 @length8_eq_const(i8* %X) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length8_eq_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
 ; X64-NEXT:    cmpq %rax, (%rdi)
 ; X64-NEXT:    setne %al
@@ -323,7 +323,7 @@ define i1 @length8_eq_const(i8* %X) noun
 
 define i1 @length12_eq(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length12_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $12
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -335,7 +335,7 @@ define i1 @length12_eq(i8* %X, i8* %Y) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length12_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    pushq $12
 ; X64-NEXT:    popq %rdx
@@ -351,7 +351,7 @@ define i1 @length12_eq(i8* %X, i8* %Y) n
 
 define i32 @length12(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length12:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $12
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -361,7 +361,7 @@ define i32 @length12(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length12:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq $12
 ; X64-NEXT:    popq %rdx
 ; X64-NEXT:    jmp memcmp # TAILCALL
@@ -373,7 +373,7 @@ define i32 @length12(i8* %X, i8* %Y) nou
 
 define i32 @length16(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length16:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $16
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -383,7 +383,7 @@ define i32 @length16(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq $16
 ; X64-NEXT:    popq %rdx
 ; X64-NEXT:    jmp memcmp # TAILCALL
@@ -393,7 +393,7 @@ define i32 @length16(i8* %X, i8* %Y) nou
 
 define i1 @length16_eq(i8* %x, i8* %y) nounwind minsize {
 ; X86-NOSSE-LABEL: length16_eq:
-; X86-NOSSE:       # BB#0:
+; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl $0
 ; X86-NOSSE-NEXT:    pushl $16
 ; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -405,7 +405,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length16_eq:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE2-NEXT:    movdqu (%ecx), %xmm0
@@ -417,7 +417,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length16_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rsi), %xmm0
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
@@ -427,7 +427,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length16_eq:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX2-NEXT:    vpcmpeqb (%rsi), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpmovmskb %xmm0, %eax
@@ -441,7 +441,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 
 define i1 @length16_eq_const(i8* %X) nounwind minsize {
 ; X86-NOSSE-LABEL: length16_eq_const:
-; X86-NOSSE:       # BB#0:
+; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl $0
 ; X86-NOSSE-NEXT:    pushl $16
 ; X86-NOSSE-NEXT:    pushl $.L.str
@@ -453,7 +453,7 @@ define i1 @length16_eq_const(i8* %X) nou
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length16_eq_const:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movdqu (%eax), %xmm0
 ; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm0
@@ -463,7 +463,7 @@ define i1 @length16_eq_const(i8* %X) nou
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length16_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %eax
@@ -472,7 +472,7 @@ define i1 @length16_eq_const(i8* %X) nou
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length16_eq_const:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpmovmskb %xmm0, %eax
@@ -488,7 +488,7 @@ define i1 @length16_eq_const(i8* %X) nou
 
 define i32 @length24(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length24:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $24
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -498,7 +498,7 @@ define i32 @length24(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length24:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq $24
 ; X64-NEXT:    popq %rdx
 ; X64-NEXT:    jmp memcmp # TAILCALL
@@ -508,7 +508,7 @@ define i32 @length24(i8* %X, i8* %Y) nou
 
 define i1 @length24_eq(i8* %x, i8* %y) nounwind minsize {
 ; X86-LABEL: length24_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $24
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -520,7 +520,7 @@ define i1 @length24_eq(i8* %x, i8* %y) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length24_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    pushq $24
 ; X64-NEXT:    popq %rdx
@@ -536,7 +536,7 @@ define i1 @length24_eq(i8* %x, i8* %y) n
 
 define i1 @length24_eq_const(i8* %X) nounwind minsize {
 ; X86-LABEL: length24_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $24
 ; X86-NEXT:    pushl $.L.str
@@ -548,7 +548,7 @@ define i1 @length24_eq_const(i8* %X) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length24_eq_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    pushq $24
 ; X64-NEXT:    popq %rdx
@@ -565,7 +565,7 @@ define i1 @length24_eq_const(i8* %X) nou
 
 define i32 @length32(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $32
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -575,7 +575,7 @@ define i32 @length32(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq $32
 ; X64-NEXT:    popq %rdx
 ; X64-NEXT:    jmp memcmp # TAILCALL
@@ -587,7 +587,7 @@ define i32 @length32(i8* %X, i8* %Y) nou
 
 define i1 @length32_eq(i8* %x, i8* %y) nounwind minsize {
 ; X86-LABEL: length32_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $32
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -599,7 +599,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length32_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    pushq %rax
 ; X64-SSE2-NEXT:    pushq $32
 ; X64-SSE2-NEXT:    popq %rdx
@@ -610,7 +610,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length32_eq:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb (%rsi), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
@@ -625,7 +625,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 
 define i1 @length32_eq_const(i8* %X) nounwind minsize {
 ; X86-LABEL: length32_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $32
 ; X86-NEXT:    pushl $.L.str
@@ -637,7 +637,7 @@ define i1 @length32_eq_const(i8* %X) nou
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length32_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    pushq %rax
 ; X64-SSE2-NEXT:    pushq $32
 ; X64-SSE2-NEXT:    popq %rdx
@@ -649,7 +649,7 @@ define i1 @length32_eq_const(i8* %X) nou
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length32_eq_const:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
@@ -664,7 +664,7 @@ define i1 @length32_eq_const(i8* %X) nou
 
 define i32 @length64(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length64:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $64
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -674,7 +674,7 @@ define i32 @length64(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq $64
 ; X64-NEXT:    popq %rdx
 ; X64-NEXT:    jmp memcmp # TAILCALL
@@ -684,7 +684,7 @@ define i32 @length64(i8* %X, i8* %Y) nou
 
 define i1 @length64_eq(i8* %x, i8* %y) nounwind minsize {
 ; X86-LABEL: length64_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $64
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -696,7 +696,7 @@ define i1 @length64_eq(i8* %x, i8* %y) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length64_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    pushq $64
 ; X64-NEXT:    popq %rdx
@@ -712,7 +712,7 @@ define i1 @length64_eq(i8* %x, i8* %y) n
 
 define i1 @length64_eq_const(i8* %X) nounwind minsize {
 ; X86-LABEL: length64_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $64
 ; X86-NEXT:    pushl $.L.str
@@ -724,7 +724,7 @@ define i1 @length64_eq_const(i8* %X) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length64_eq_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    pushq $64
 ; X64-NEXT:    popq %rdx

Modified: llvm/trunk/test/CodeGen/X86/memcmp-optsize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memcmp-optsize.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memcmp-optsize.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memcmp-optsize.ll Mon Dec  4 09:18:51 2017
@@ -13,7 +13,7 @@ declare i32 @memcmp(i8*, i8*, i64)
 
 define i32 @length2(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length2:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl (%ecx), %ecx
@@ -26,7 +26,7 @@ define i32 @length2(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzwl (%rsi), %ecx
 ; X64-NEXT:    rolw $8, %ax
@@ -41,7 +41,7 @@ define i32 @length2(i8* %X, i8* %Y) noun
 
 define i1 @length2_eq(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length2_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl (%ecx), %ecx
@@ -50,7 +50,7 @@ define i1 @length2_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    cmpw (%rsi), %ax
 ; X64-NEXT:    sete %al
@@ -62,7 +62,7 @@ define i1 @length2_eq(i8* %X, i8* %Y) no
 
 define i1 @length2_eq_const(i8* %X) nounwind optsize {
 ; X86-LABEL: length2_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movzwl (%eax), %eax
 ; X86-NEXT:    cmpl $12849, %eax # imm = 0x3231
@@ -70,7 +70,7 @@ define i1 @length2_eq_const(i8* %X) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2_eq_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    cmpl $12849, %eax # imm = 0x3231
 ; X64-NEXT:    setne %al
@@ -82,7 +82,7 @@ define i1 @length2_eq_const(i8* %X) noun
 
 define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length2_eq_nobuiltin_attr:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $2
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -94,7 +94,7 @@ define i1 @length2_eq_nobuiltin_attr(i8*
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2_eq_nobuiltin_attr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    movl $2, %edx
 ; X64-NEXT:    callq memcmp
@@ -109,7 +109,7 @@ define i1 @length2_eq_nobuiltin_attr(i8*
 
 define i32 @length3(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length3:
-; X86:       # BB#0: # %loadbb
+; X86:       # %bb.0: # %loadbb
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -119,7 +119,7 @@ define i32 @length3(i8* %X, i8* %Y) noun
 ; X86-NEXT:    rolw $8, %si
 ; X86-NEXT:    cmpw %si, %dx
 ; X86-NEXT:    jne .LBB4_1
-; X86-NEXT:  # BB#2: # %loadbb1
+; X86-NEXT:  # %bb.2: # %loadbb1
 ; X86-NEXT:    movzbl 2(%eax), %eax
 ; X86-NEXT:    movzbl 2(%ecx), %ecx
 ; X86-NEXT:    subl %ecx, %eax
@@ -133,14 +133,14 @@ define i32 @length3(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length3:
-; X64:       # BB#0: # %loadbb
+; X64:       # %bb.0: # %loadbb
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzwl (%rsi), %ecx
 ; X64-NEXT:    rolw $8, %ax
 ; X64-NEXT:    rolw $8, %cx
 ; X64-NEXT:    cmpw %cx, %ax
 ; X64-NEXT:    jne .LBB4_1
-; X64-NEXT:  # BB#2: # %loadbb1
+; X64-NEXT:  # %bb.2: # %loadbb1
 ; X64-NEXT:    movzbl 2(%rdi), %eax
 ; X64-NEXT:    movzbl 2(%rsi), %ecx
 ; X64-NEXT:    subl %ecx, %eax
@@ -156,13 +156,13 @@ define i32 @length3(i8* %X, i8* %Y) noun
 
 define i1 @length3_eq(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length3_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl (%ecx), %edx
 ; X86-NEXT:    cmpw (%eax), %dx
 ; X86-NEXT:    jne .LBB5_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:  # %bb.1: # %loadbb1
 ; X86-NEXT:    movb 2(%ecx), %dl
 ; X86-NEXT:    xorl %ecx, %ecx
 ; X86-NEXT:    cmpb 2(%eax), %dl
@@ -176,11 +176,11 @@ define i1 @length3_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length3_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    cmpw (%rsi), %ax
 ; X64-NEXT:    jne .LBB5_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:  # %bb.1: # %loadbb1
 ; X64-NEXT:    movb 2(%rdi), %cl
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpb 2(%rsi), %cl
@@ -198,7 +198,7 @@ define i1 @length3_eq(i8* %X, i8* %Y) no
 
 define i32 @length4(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length4:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %ecx
@@ -212,7 +212,7 @@ define i32 @length4(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length4:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %ecx
 ; X64-NEXT:    movl (%rsi), %edx
 ; X64-NEXT:    bswapl %ecx
@@ -228,7 +228,7 @@ define i32 @length4(i8* %X, i8* %Y) noun
 
 define i1 @length4_eq(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length4_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %ecx
@@ -237,7 +237,7 @@ define i1 @length4_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length4_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    cmpl (%rsi), %eax
 ; X64-NEXT:    setne %al
@@ -249,14 +249,14 @@ define i1 @length4_eq(i8* %X, i8* %Y) no
 
 define i1 @length4_eq_const(i8* %X) nounwind optsize {
 ; X86-LABEL: length4_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    cmpl $875770417, (%eax) # imm = 0x34333231
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length4_eq_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpl $875770417, (%rdi) # imm = 0x34333231
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
@@ -267,7 +267,7 @@ define i1 @length4_eq_const(i8* %X) noun
 
 define i32 @length5(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length5:
-; X86:       # BB#0: # %loadbb
+; X86:       # %bb.0: # %loadbb
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -277,7 +277,7 @@ define i32 @length5(i8* %X, i8* %Y) noun
 ; X86-NEXT:    bswapl %esi
 ; X86-NEXT:    cmpl %esi, %edx
 ; X86-NEXT:    jne .LBB9_1
-; X86-NEXT:  # BB#2: # %loadbb1
+; X86-NEXT:  # %bb.2: # %loadbb1
 ; X86-NEXT:    movzbl 4(%eax), %eax
 ; X86-NEXT:    movzbl 4(%ecx), %ecx
 ; X86-NEXT:    subl %ecx, %eax
@@ -291,14 +291,14 @@ define i32 @length5(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length5:
-; X64:       # BB#0: # %loadbb
+; X64:       # %bb.0: # %loadbb
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    movl (%rsi), %ecx
 ; X64-NEXT:    bswapl %eax
 ; X64-NEXT:    bswapl %ecx
 ; X64-NEXT:    cmpl %ecx, %eax
 ; X64-NEXT:    jne .LBB9_1
-; X64-NEXT:  # BB#2: # %loadbb1
+; X64-NEXT:  # %bb.2: # %loadbb1
 ; X64-NEXT:    movzbl 4(%rdi), %eax
 ; X64-NEXT:    movzbl 4(%rsi), %ecx
 ; X64-NEXT:    subl %ecx, %eax
@@ -314,13 +314,13 @@ define i32 @length5(i8* %X, i8* %Y) noun
 
 define i1 @length5_eq(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length5_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %edx
 ; X86-NEXT:    cmpl (%eax), %edx
 ; X86-NEXT:    jne .LBB10_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:  # %bb.1: # %loadbb1
 ; X86-NEXT:    movb 4(%ecx), %dl
 ; X86-NEXT:    xorl %ecx, %ecx
 ; X86-NEXT:    cmpb 4(%eax), %dl
@@ -334,11 +334,11 @@ define i1 @length5_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length5_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    cmpl (%rsi), %eax
 ; X64-NEXT:    jne .LBB10_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:  # %bb.1: # %loadbb1
 ; X64-NEXT:    movb 4(%rdi), %cl
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpb 4(%rsi), %cl
@@ -356,7 +356,7 @@ define i1 @length5_eq(i8* %X, i8* %Y) no
 
 define i32 @length8(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length8:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -366,7 +366,7 @@ define i32 @length8(i8* %X, i8* %Y) noun
 ; X86-NEXT:    bswapl %edx
 ; X86-NEXT:    cmpl %edx, %ecx
 ; X86-NEXT:    jne .LBB11_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:  # %bb.1: # %loadbb1
 ; X86-NEXT:    movl 4(%esi), %ecx
 ; X86-NEXT:    movl 4(%eax), %edx
 ; X86-NEXT:    bswapl %ecx
@@ -384,7 +384,7 @@ define i32 @length8(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rcx
 ; X64-NEXT:    movq (%rsi), %rdx
 ; X64-NEXT:    bswapq %rcx
@@ -400,13 +400,13 @@ define i32 @length8(i8* %X, i8* %Y) noun
 
 define i1 @length8_eq(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length8_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %edx
 ; X86-NEXT:    cmpl (%eax), %edx
 ; X86-NEXT:    jne .LBB12_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:  # %bb.1: # %loadbb1
 ; X86-NEXT:    movl 4(%ecx), %edx
 ; X86-NEXT:    xorl %ecx, %ecx
 ; X86-NEXT:    cmpl 4(%eax), %edx
@@ -420,7 +420,7 @@ define i1 @length8_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length8_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rax
 ; X64-NEXT:    cmpq (%rsi), %rax
 ; X64-NEXT:    sete %al
@@ -432,11 +432,11 @@ define i1 @length8_eq(i8* %X, i8* %Y) no
 
 define i1 @length8_eq_const(i8* %X) nounwind optsize {
 ; X86-LABEL: length8_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    cmpl $858927408, (%ecx) # imm = 0x33323130
 ; X86-NEXT:    jne .LBB13_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:  # %bb.1: # %loadbb1
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    cmpl $926299444, 4(%ecx) # imm = 0x37363534
 ; X86-NEXT:    je .LBB13_3
@@ -449,7 +449,7 @@ define i1 @length8_eq_const(i8* %X) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length8_eq_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
 ; X64-NEXT:    cmpq %rax, (%rdi)
 ; X64-NEXT:    setne %al
@@ -461,7 +461,7 @@ define i1 @length8_eq_const(i8* %X) noun
 
 define i1 @length12_eq(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length12_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $12
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -473,11 +473,11 @@ define i1 @length12_eq(i8* %X, i8* %Y) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length12_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rax
 ; X64-NEXT:    cmpq (%rsi), %rax
 ; X64-NEXT:    jne .LBB14_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:  # %bb.1: # %loadbb1
 ; X64-NEXT:    movl 8(%rdi), %ecx
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpl 8(%rsi), %ecx
@@ -495,7 +495,7 @@ define i1 @length12_eq(i8* %X, i8* %Y) n
 
 define i32 @length12(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length12:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $12
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -505,14 +505,14 @@ define i32 @length12(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length12:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rcx
 ; X64-NEXT:    movq (%rsi), %rdx
 ; X64-NEXT:    bswapq %rcx
 ; X64-NEXT:    bswapq %rdx
 ; X64-NEXT:    cmpq %rdx, %rcx
 ; X64-NEXT:    jne .LBB15_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:  # %bb.1: # %loadbb1
 ; X64-NEXT:    movl 8(%rdi), %ecx
 ; X64-NEXT:    movl 8(%rsi), %edx
 ; X64-NEXT:    bswapl %ecx
@@ -535,7 +535,7 @@ define i32 @length12(i8* %X, i8* %Y) nou
 
 define i32 @length16(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length16:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $16
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -545,14 +545,14 @@ define i32 @length16(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rcx
 ; X64-NEXT:    movq (%rsi), %rdx
 ; X64-NEXT:    bswapq %rcx
 ; X64-NEXT:    bswapq %rdx
 ; X64-NEXT:    cmpq %rdx, %rcx
 ; X64-NEXT:    jne .LBB16_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:  # %bb.1: # %loadbb1
 ; X64-NEXT:    movq 8(%rdi), %rcx
 ; X64-NEXT:    movq 8(%rsi), %rdx
 ; X64-NEXT:    bswapq %rcx
@@ -573,7 +573,7 @@ define i32 @length16(i8* %X, i8* %Y) nou
 
 define i1 @length16_eq(i8* %x, i8* %y) nounwind optsize {
 ; X86-NOSSE-LABEL: length16_eq:
-; X86-NOSSE:       # BB#0:
+; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl $0
 ; X86-NOSSE-NEXT:    pushl $16
 ; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -585,7 +585,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length16_eq:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE2-NEXT:    movdqu (%ecx), %xmm0
@@ -597,7 +597,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length16_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    movdqu (%rsi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
@@ -607,7 +607,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length16_eq:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX2-NEXT:    vpcmpeqb (%rsi), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpmovmskb %xmm0, %eax
@@ -621,7 +621,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 
 define i1 @length16_eq_const(i8* %X) nounwind optsize {
 ; X86-NOSSE-LABEL: length16_eq_const:
-; X86-NOSSE:       # BB#0:
+; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl $0
 ; X86-NOSSE-NEXT:    pushl $16
 ; X86-NOSSE-NEXT:    pushl $.L.str
@@ -633,7 +633,7 @@ define i1 @length16_eq_const(i8* %X) nou
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length16_eq_const:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movdqu (%eax), %xmm0
 ; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm0
@@ -643,7 +643,7 @@ define i1 @length16_eq_const(i8* %X) nou
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length16_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %eax
@@ -652,7 +652,7 @@ define i1 @length16_eq_const(i8* %X) nou
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length16_eq_const:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpmovmskb %xmm0, %eax
@@ -668,7 +668,7 @@ define i1 @length16_eq_const(i8* %X) nou
 
 define i32 @length24(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length24:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $24
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -678,7 +678,7 @@ define i32 @length24(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length24:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl $24, %edx
 ; X64-NEXT:    jmp memcmp # TAILCALL
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 24) nounwind
@@ -687,7 +687,7 @@ define i32 @length24(i8* %X, i8* %Y) nou
 
 define i1 @length24_eq(i8* %x, i8* %y) nounwind optsize {
 ; X86-LABEL: length24_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $24
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -699,14 +699,14 @@ define i1 @length24_eq(i8* %x, i8* %y) n
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length24_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    movdqu (%rsi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
 ; X64-SSE2-NEXT:    pmovmskb %xmm1, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-SSE2-NEXT:    jne .LBB20_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X64-SSE2-NEXT:    movq 16(%rdi), %rcx
 ; X64-SSE2-NEXT:    xorl %eax, %eax
 ; X64-SSE2-NEXT:    cmpq 16(%rsi), %rcx
@@ -719,13 +719,13 @@ define i1 @length24_eq(i8* %x, i8* %y) n
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length24_eq:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX2-NEXT:    vpcmpeqb (%rsi), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpmovmskb %xmm0, %eax
 ; X64-AVX2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-AVX2-NEXT:    jne .LBB20_2
-; X64-AVX2-NEXT:  # BB#1: # %loadbb1
+; X64-AVX2-NEXT:  # %bb.1: # %loadbb1
 ; X64-AVX2-NEXT:    movq 16(%rdi), %rcx
 ; X64-AVX2-NEXT:    xorl %eax, %eax
 ; X64-AVX2-NEXT:    cmpq 16(%rsi), %rcx
@@ -743,7 +743,7 @@ define i1 @length24_eq(i8* %x, i8* %y) n
 
 define i1 @length24_eq_const(i8* %X) nounwind optsize {
 ; X86-LABEL: length24_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $24
 ; X86-NEXT:    pushl $.L.str
@@ -755,13 +755,13 @@ define i1 @length24_eq_const(i8* %X) nou
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length24_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-SSE2-NEXT:    jne .LBB21_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X64-SSE2-NEXT:    xorl %eax, %eax
 ; X64-SSE2-NEXT:    movabsq $3689065127958034230, %rcx # imm = 0x3332313039383736
 ; X64-SSE2-NEXT:    cmpq %rcx, 16(%rdi)
@@ -774,13 +774,13 @@ define i1 @length24_eq_const(i8* %X) nou
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length24_eq_const:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpmovmskb %xmm0, %eax
 ; X64-AVX2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-AVX2-NEXT:    jne .LBB21_2
-; X64-AVX2-NEXT:  # BB#1: # %loadbb1
+; X64-AVX2-NEXT:  # %bb.1: # %loadbb1
 ; X64-AVX2-NEXT:    xorl %eax, %eax
 ; X64-AVX2-NEXT:    movabsq $3689065127958034230, %rcx # imm = 0x3332313039383736
 ; X64-AVX2-NEXT:    cmpq %rcx, 16(%rdi)
@@ -798,7 +798,7 @@ define i1 @length24_eq_const(i8* %X) nou
 
 define i32 @length32(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $32
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -808,7 +808,7 @@ define i32 @length32(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl $32, %edx
 ; X64-NEXT:    jmp memcmp # TAILCALL
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind
@@ -819,7 +819,7 @@ define i32 @length32(i8* %X, i8* %Y) nou
 
 define i1 @length32_eq(i8* %x, i8* %y) nounwind optsize {
 ; X86-NOSSE-LABEL: length32_eq:
-; X86-NOSSE:       # BB#0:
+; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl $0
 ; X86-NOSSE-NEXT:    pushl $32
 ; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -831,7 +831,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length32_eq:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE2-NEXT:    movdqu (%ecx), %xmm0
@@ -840,7 +840,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X86-SSE2-NEXT:    pmovmskb %xmm1, %edx
 ; X86-SSE2-NEXT:    cmpl $65535, %edx # imm = 0xFFFF
 ; X86-SSE2-NEXT:    jne .LBB23_2
-; X86-SSE2-NEXT:  # BB#1: # %loadbb1
+; X86-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X86-SSE2-NEXT:    movdqu 16(%ecx), %xmm0
 ; X86-SSE2-NEXT:    movdqu 16(%eax), %xmm1
 ; X86-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
@@ -857,14 +857,14 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length32_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    movdqu (%rsi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
 ; X64-SSE2-NEXT:    pmovmskb %xmm1, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-SSE2-NEXT:    jne .LBB23_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X64-SSE2-NEXT:    movdqu 16(%rdi), %xmm0
 ; X64-SSE2-NEXT:    movdqu 16(%rsi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
@@ -880,7 +880,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length32_eq:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb (%rsi), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
@@ -895,7 +895,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 
 define i1 @length32_eq_const(i8* %X) nounwind optsize {
 ; X86-NOSSE-LABEL: length32_eq_const:
-; X86-NOSSE:       # BB#0:
+; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl $0
 ; X86-NOSSE-NEXT:    pushl $32
 ; X86-NOSSE-NEXT:    pushl $.L.str
@@ -907,14 +907,14 @@ define i1 @length32_eq_const(i8* %X) nou
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length32_eq_const:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movdqu (%eax), %xmm0
 ; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm0
 ; X86-SSE2-NEXT:    pmovmskb %xmm0, %ecx
 ; X86-SSE2-NEXT:    cmpl $65535, %ecx # imm = 0xFFFF
 ; X86-SSE2-NEXT:    jne .LBB24_2
-; X86-SSE2-NEXT:  # BB#1: # %loadbb1
+; X86-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X86-SSE2-NEXT:    movdqu 16(%eax), %xmm0
 ; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm0
 ; X86-SSE2-NEXT:    pmovmskb %xmm0, %ecx
@@ -930,13 +930,13 @@ define i1 @length32_eq_const(i8* %X) nou
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length32_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-SSE2-NEXT:    jne .LBB24_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X64-SSE2-NEXT:    movdqu 16(%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %ecx
@@ -951,7 +951,7 @@ define i1 @length32_eq_const(i8* %X) nou
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length32_eq_const:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
@@ -966,7 +966,7 @@ define i1 @length32_eq_const(i8* %X) nou
 
 define i32 @length64(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length64:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $64
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -976,7 +976,7 @@ define i32 @length64(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl $64, %edx
 ; X64-NEXT:    jmp memcmp # TAILCALL
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind
@@ -985,7 +985,7 @@ define i32 @length64(i8* %X, i8* %Y) nou
 
 define i1 @length64_eq(i8* %x, i8* %y) nounwind optsize {
 ; X86-LABEL: length64_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $64
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -997,7 +997,7 @@ define i1 @length64_eq(i8* %x, i8* %y) n
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length64_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    pushq %rax
 ; X64-SSE2-NEXT:    movl $64, %edx
 ; X64-SSE2-NEXT:    callq memcmp
@@ -1007,13 +1007,13 @@ define i1 @length64_eq(i8* %x, i8* %y) n
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length64_eq:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb (%rsi), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
 ; X64-AVX2-NEXT:    cmpl $-1, %eax
 ; X64-AVX2-NEXT:    jne .LBB26_2
-; X64-AVX2-NEXT:  # BB#1: # %loadbb1
+; X64-AVX2-NEXT:  # %bb.1: # %loadbb1
 ; X64-AVX2-NEXT:    vmovdqu 32(%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb 32(%rsi), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %ecx
@@ -1034,7 +1034,7 @@ define i1 @length64_eq(i8* %x, i8* %y) n
 
 define i1 @length64_eq_const(i8* %X) nounwind optsize {
 ; X86-LABEL: length64_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $64
 ; X86-NEXT:    pushl $.L.str
@@ -1046,7 +1046,7 @@ define i1 @length64_eq_const(i8* %X) nou
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length64_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    pushq %rax
 ; X64-SSE2-NEXT:    movl $.L.str, %esi
 ; X64-SSE2-NEXT:    movl $64, %edx
@@ -1057,13 +1057,13 @@ define i1 @length64_eq_const(i8* %X) nou
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length64_eq_const:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
 ; X64-AVX2-NEXT:    cmpl $-1, %eax
 ; X64-AVX2-NEXT:    jne .LBB27_2
-; X64-AVX2-NEXT:  # BB#1: # %loadbb1
+; X64-AVX2-NEXT:  # %bb.1: # %loadbb1
 ; X64-AVX2-NEXT:    vmovdqu 32(%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %ecx

Modified: llvm/trunk/test/CodeGen/X86/memcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memcmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memcmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memcmp.ll Mon Dec  4 09:18:51 2017
@@ -15,12 +15,12 @@ declare i32 @memcmp(i8*, i8*, i64)
 
 define i32 @length0(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length0:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length0:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    retq
    %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind
@@ -29,12 +29,12 @@ define i32 @length0(i8* %X, i8* %Y) noun
 
 define i1 @length0_eq(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length0_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movb $1, %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length0_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movb $1, %al
 ; X64-NEXT:    retq
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind
@@ -44,7 +44,7 @@ define i1 @length0_eq(i8* %X, i8* %Y) no
 
 define i32 @length2(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length2:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl (%ecx), %ecx
@@ -57,7 +57,7 @@ define i32 @length2(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzwl (%rsi), %ecx
 ; X64-NEXT:    rolw $8, %ax
@@ -72,7 +72,7 @@ define i32 @length2(i8* %X, i8* %Y) noun
 
 define i1 @length2_eq(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length2_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl (%ecx), %ecx
@@ -81,7 +81,7 @@ define i1 @length2_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    cmpw (%rsi), %ax
 ; X64-NEXT:    sete %al
@@ -93,7 +93,7 @@ define i1 @length2_eq(i8* %X, i8* %Y) no
 
 define i1 @length2_eq_const(i8* %X) nounwind {
 ; X86-LABEL: length2_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movzwl (%eax), %eax
 ; X86-NEXT:    cmpl $12849, %eax # imm = 0x3231
@@ -101,7 +101,7 @@ define i1 @length2_eq_const(i8* %X) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2_eq_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    cmpl $12849, %eax # imm = 0x3231
 ; X64-NEXT:    setne %al
@@ -113,7 +113,7 @@ define i1 @length2_eq_const(i8* %X) noun
 
 define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length2_eq_nobuiltin_attr:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $2
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -125,7 +125,7 @@ define i1 @length2_eq_nobuiltin_attr(i8*
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length2_eq_nobuiltin_attr:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    movl $2, %edx
 ; X64-NEXT:    callq memcmp
@@ -140,7 +140,7 @@ define i1 @length2_eq_nobuiltin_attr(i8*
 
 define i32 @length3(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length3:
-; X86:       # BB#0: # %loadbb
+; X86:       # %bb.0: # %loadbb
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -150,7 +150,7 @@ define i32 @length3(i8* %X, i8* %Y) noun
 ; X86-NEXT:    rolw $8, %si
 ; X86-NEXT:    cmpw %si, %dx
 ; X86-NEXT:    jne .LBB6_1
-; X86-NEXT:  # BB#2: # %loadbb1
+; X86-NEXT:  # %bb.2: # %loadbb1
 ; X86-NEXT:    movzbl 2(%eax), %eax
 ; X86-NEXT:    movzbl 2(%ecx), %ecx
 ; X86-NEXT:    subl %ecx, %eax
@@ -164,14 +164,14 @@ define i32 @length3(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length3:
-; X64:       # BB#0: # %loadbb
+; X64:       # %bb.0: # %loadbb
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzwl (%rsi), %ecx
 ; X64-NEXT:    rolw $8, %ax
 ; X64-NEXT:    rolw $8, %cx
 ; X64-NEXT:    cmpw %cx, %ax
 ; X64-NEXT:    jne .LBB6_1
-; X64-NEXT:  # BB#2: # %loadbb1
+; X64-NEXT:  # %bb.2: # %loadbb1
 ; X64-NEXT:    movzbl 2(%rdi), %eax
 ; X64-NEXT:    movzbl 2(%rsi), %ecx
 ; X64-NEXT:    subl %ecx, %eax
@@ -187,13 +187,13 @@ define i32 @length3(i8* %X, i8* %Y) noun
 
 define i1 @length3_eq(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length3_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl (%ecx), %edx
 ; X86-NEXT:    cmpw (%eax), %dx
 ; X86-NEXT:    jne .LBB7_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:  # %bb.1: # %loadbb1
 ; X86-NEXT:    movb 2(%ecx), %dl
 ; X86-NEXT:    xorl %ecx, %ecx
 ; X86-NEXT:    cmpb 2(%eax), %dl
@@ -206,11 +206,11 @@ define i1 @length3_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length3_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    cmpw (%rsi), %ax
 ; X64-NEXT:    jne .LBB7_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:  # %bb.1: # %loadbb1
 ; X64-NEXT:    movb 2(%rdi), %cl
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpb 2(%rsi), %cl
@@ -228,7 +228,7 @@ define i1 @length3_eq(i8* %X, i8* %Y) no
 
 define i32 @length4(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length4:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %ecx
@@ -242,7 +242,7 @@ define i32 @length4(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length4:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %ecx
 ; X64-NEXT:    movl (%rsi), %edx
 ; X64-NEXT:    bswapl %ecx
@@ -258,7 +258,7 @@ define i32 @length4(i8* %X, i8* %Y) noun
 
 define i1 @length4_eq(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length4_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %ecx
@@ -267,7 +267,7 @@ define i1 @length4_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length4_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    cmpl (%rsi), %eax
 ; X64-NEXT:    setne %al
@@ -279,14 +279,14 @@ define i1 @length4_eq(i8* %X, i8* %Y) no
 
 define i1 @length4_eq_const(i8* %X) nounwind {
 ; X86-LABEL: length4_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    cmpl $875770417, (%eax) # imm = 0x34333231
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length4_eq_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpl $875770417, (%rdi) # imm = 0x34333231
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
@@ -297,7 +297,7 @@ define i1 @length4_eq_const(i8* %X) noun
 
 define i32 @length5(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length5:
-; X86:       # BB#0: # %loadbb
+; X86:       # %bb.0: # %loadbb
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -307,7 +307,7 @@ define i32 @length5(i8* %X, i8* %Y) noun
 ; X86-NEXT:    bswapl %esi
 ; X86-NEXT:    cmpl %esi, %edx
 ; X86-NEXT:    jne .LBB11_1
-; X86-NEXT:  # BB#2: # %loadbb1
+; X86-NEXT:  # %bb.2: # %loadbb1
 ; X86-NEXT:    movzbl 4(%eax), %eax
 ; X86-NEXT:    movzbl 4(%ecx), %ecx
 ; X86-NEXT:    subl %ecx, %eax
@@ -321,14 +321,14 @@ define i32 @length5(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length5:
-; X64:       # BB#0: # %loadbb
+; X64:       # %bb.0: # %loadbb
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    movl (%rsi), %ecx
 ; X64-NEXT:    bswapl %eax
 ; X64-NEXT:    bswapl %ecx
 ; X64-NEXT:    cmpl %ecx, %eax
 ; X64-NEXT:    jne .LBB11_1
-; X64-NEXT:  # BB#2: # %loadbb1
+; X64-NEXT:  # %bb.2: # %loadbb1
 ; X64-NEXT:    movzbl 4(%rdi), %eax
 ; X64-NEXT:    movzbl 4(%rsi), %ecx
 ; X64-NEXT:    subl %ecx, %eax
@@ -344,13 +344,13 @@ define i32 @length5(i8* %X, i8* %Y) noun
 
 define i1 @length5_eq(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length5_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %edx
 ; X86-NEXT:    cmpl (%eax), %edx
 ; X86-NEXT:    jne .LBB12_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:  # %bb.1: # %loadbb1
 ; X86-NEXT:    movb 4(%ecx), %dl
 ; X86-NEXT:    xorl %ecx, %ecx
 ; X86-NEXT:    cmpb 4(%eax), %dl
@@ -363,11 +363,11 @@ define i1 @length5_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length5_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    cmpl (%rsi), %eax
 ; X64-NEXT:    jne .LBB12_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:  # %bb.1: # %loadbb1
 ; X64-NEXT:    movb 4(%rdi), %cl
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpb 4(%rsi), %cl
@@ -385,7 +385,7 @@ define i1 @length5_eq(i8* %X, i8* %Y) no
 
 define i32 @length8(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length8:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -395,7 +395,7 @@ define i32 @length8(i8* %X, i8* %Y) noun
 ; X86-NEXT:    bswapl %edx
 ; X86-NEXT:    cmpl %edx, %ecx
 ; X86-NEXT:    jne .LBB13_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:  # %bb.1: # %loadbb1
 ; X86-NEXT:    movl 4(%esi), %ecx
 ; X86-NEXT:    movl 4(%eax), %edx
 ; X86-NEXT:    bswapl %ecx
@@ -413,7 +413,7 @@ define i32 @length8(i8* %X, i8* %Y) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rcx
 ; X64-NEXT:    movq (%rsi), %rdx
 ; X64-NEXT:    bswapq %rcx
@@ -429,13 +429,13 @@ define i32 @length8(i8* %X, i8* %Y) noun
 
 define i1 @length8_eq(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length8_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %edx
 ; X86-NEXT:    cmpl (%eax), %edx
 ; X86-NEXT:    jne .LBB14_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:  # %bb.1: # %loadbb1
 ; X86-NEXT:    movl 4(%ecx), %edx
 ; X86-NEXT:    xorl %ecx, %ecx
 ; X86-NEXT:    cmpl 4(%eax), %edx
@@ -448,7 +448,7 @@ define i1 @length8_eq(i8* %X, i8* %Y) no
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length8_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rax
 ; X64-NEXT:    cmpq (%rsi), %rax
 ; X64-NEXT:    sete %al
@@ -460,11 +460,11 @@ define i1 @length8_eq(i8* %X, i8* %Y) no
 
 define i1 @length8_eq_const(i8* %X) nounwind {
 ; X86-LABEL: length8_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    cmpl $858927408, (%ecx) # imm = 0x33323130
 ; X86-NEXT:    jne .LBB15_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:  # %bb.1: # %loadbb1
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    cmpl $926299444, 4(%ecx) # imm = 0x37363534
 ; X86-NEXT:    je .LBB15_3
@@ -476,7 +476,7 @@ define i1 @length8_eq_const(i8* %X) noun
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length8_eq_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
 ; X64-NEXT:    cmpq %rax, (%rdi)
 ; X64-NEXT:    setne %al
@@ -488,7 +488,7 @@ define i1 @length8_eq_const(i8* %X) noun
 
 define i1 @length12_eq(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length12_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $12
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -500,11 +500,11 @@ define i1 @length12_eq(i8* %X, i8* %Y) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length12_eq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rax
 ; X64-NEXT:    cmpq (%rsi), %rax
 ; X64-NEXT:    jne .LBB16_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:  # %bb.1: # %loadbb1
 ; X64-NEXT:    movl 8(%rdi), %ecx
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpl 8(%rsi), %ecx
@@ -522,7 +522,7 @@ define i1 @length12_eq(i8* %X, i8* %Y) n
 
 define i32 @length12(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length12:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $12
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -532,14 +532,14 @@ define i32 @length12(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length12:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rcx
 ; X64-NEXT:    movq (%rsi), %rdx
 ; X64-NEXT:    bswapq %rcx
 ; X64-NEXT:    bswapq %rdx
 ; X64-NEXT:    cmpq %rdx, %rcx
 ; X64-NEXT:    jne .LBB17_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:  # %bb.1: # %loadbb1
 ; X64-NEXT:    movl 8(%rdi), %ecx
 ; X64-NEXT:    movl 8(%rsi), %edx
 ; X64-NEXT:    bswapl %ecx
@@ -562,7 +562,7 @@ define i32 @length12(i8* %X, i8* %Y) nou
 
 define i32 @length16(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length16:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $16
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -572,14 +572,14 @@ define i32 @length16(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %rcx
 ; X64-NEXT:    movq (%rsi), %rdx
 ; X64-NEXT:    bswapq %rcx
 ; X64-NEXT:    bswapq %rdx
 ; X64-NEXT:    cmpq %rdx, %rcx
 ; X64-NEXT:    jne .LBB18_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:  # %bb.1: # %loadbb1
 ; X64-NEXT:    movq 8(%rdi), %rcx
 ; X64-NEXT:    movq 8(%rsi), %rdx
 ; X64-NEXT:    bswapq %rcx
@@ -600,7 +600,7 @@ define i32 @length16(i8* %X, i8* %Y) nou
 
 define i1 @length16_eq(i8* %x, i8* %y) nounwind {
 ; X86-NOSSE-LABEL: length16_eq:
-; X86-NOSSE:       # BB#0:
+; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl $0
 ; X86-NOSSE-NEXT:    pushl $16
 ; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -612,7 +612,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE1-LABEL: length16_eq:
-; X86-SSE1:       # BB#0:
+; X86-SSE1:       # %bb.0:
 ; X86-SSE1-NEXT:    pushl $0
 ; X86-SSE1-NEXT:    pushl $16
 ; X86-SSE1-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -624,7 +624,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 ; X86-SSE1-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length16_eq:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE2-NEXT:    movdqu (%ecx), %xmm0
@@ -636,7 +636,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length16_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    movdqu (%rsi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
@@ -646,7 +646,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: length16_eq:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX-NEXT:    vpcmpeqb (%rsi), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpmovmskb %xmm0, %eax
@@ -660,7 +660,7 @@ define i1 @length16_eq(i8* %x, i8* %y) n
 
 define i1 @length16_eq_const(i8* %X) nounwind {
 ; X86-NOSSE-LABEL: length16_eq_const:
-; X86-NOSSE:       # BB#0:
+; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl $0
 ; X86-NOSSE-NEXT:    pushl $16
 ; X86-NOSSE-NEXT:    pushl $.L.str
@@ -672,7 +672,7 @@ define i1 @length16_eq_const(i8* %X) nou
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE1-LABEL: length16_eq_const:
-; X86-SSE1:       # BB#0:
+; X86-SSE1:       # %bb.0:
 ; X86-SSE1-NEXT:    pushl $0
 ; X86-SSE1-NEXT:    pushl $16
 ; X86-SSE1-NEXT:    pushl $.L.str
@@ -684,7 +684,7 @@ define i1 @length16_eq_const(i8* %X) nou
 ; X86-SSE1-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length16_eq_const:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movdqu (%eax), %xmm0
 ; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm0
@@ -694,7 +694,7 @@ define i1 @length16_eq_const(i8* %X) nou
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length16_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %eax
@@ -703,7 +703,7 @@ define i1 @length16_eq_const(i8* %X) nou
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: length16_eq_const:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX-NEXT:    vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpmovmskb %xmm0, %eax
@@ -719,7 +719,7 @@ define i1 @length16_eq_const(i8* %X) nou
 
 define i32 @length24(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length24:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $24
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -729,7 +729,7 @@ define i32 @length24(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length24:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl $24, %edx
 ; X64-NEXT:    jmp memcmp # TAILCALL
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 24) nounwind
@@ -738,7 +738,7 @@ define i32 @length24(i8* %X, i8* %Y) nou
 
 define i1 @length24_eq(i8* %x, i8* %y) nounwind {
 ; X86-LABEL: length24_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $24
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -750,14 +750,14 @@ define i1 @length24_eq(i8* %x, i8* %y) n
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length24_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    movdqu (%rsi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
 ; X64-SSE2-NEXT:    pmovmskb %xmm1, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-SSE2-NEXT:    jne .LBB22_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X64-SSE2-NEXT:    movq 16(%rdi), %rcx
 ; X64-SSE2-NEXT:    xorl %eax, %eax
 ; X64-SSE2-NEXT:    cmpq 16(%rsi), %rcx
@@ -770,13 +770,13 @@ define i1 @length24_eq(i8* %x, i8* %y) n
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: length24_eq:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX-NEXT:    vpcmpeqb (%rsi), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpmovmskb %xmm0, %eax
 ; X64-AVX-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-AVX-NEXT:    jne .LBB22_2
-; X64-AVX-NEXT:  # BB#1: # %loadbb1
+; X64-AVX-NEXT:  # %bb.1: # %loadbb1
 ; X64-AVX-NEXT:    movq 16(%rdi), %rcx
 ; X64-AVX-NEXT:    xorl %eax, %eax
 ; X64-AVX-NEXT:    cmpq 16(%rsi), %rcx
@@ -794,7 +794,7 @@ define i1 @length24_eq(i8* %x, i8* %y) n
 
 define i1 @length24_eq_const(i8* %X) nounwind {
 ; X86-LABEL: length24_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $24
 ; X86-NEXT:    pushl $.L.str
@@ -806,13 +806,13 @@ define i1 @length24_eq_const(i8* %X) nou
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length24_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-SSE2-NEXT:    jne .LBB23_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X64-SSE2-NEXT:    xorl %eax, %eax
 ; X64-SSE2-NEXT:    movabsq $3689065127958034230, %rcx # imm = 0x3332313039383736
 ; X64-SSE2-NEXT:    cmpq %rcx, 16(%rdi)
@@ -825,13 +825,13 @@ define i1 @length24_eq_const(i8* %X) nou
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: length24_eq_const:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX-NEXT:    vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpmovmskb %xmm0, %eax
 ; X64-AVX-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-AVX-NEXT:    jne .LBB23_2
-; X64-AVX-NEXT:  # BB#1: # %loadbb1
+; X64-AVX-NEXT:  # %bb.1: # %loadbb1
 ; X64-AVX-NEXT:    xorl %eax, %eax
 ; X64-AVX-NEXT:    movabsq $3689065127958034230, %rcx # imm = 0x3332313039383736
 ; X64-AVX-NEXT:    cmpq %rcx, 16(%rdi)
@@ -849,7 +849,7 @@ define i1 @length24_eq_const(i8* %X) nou
 
 define i32 @length32(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $32
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -859,7 +859,7 @@ define i32 @length32(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl $32, %edx
 ; X64-NEXT:    jmp memcmp # TAILCALL
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind
@@ -870,7 +870,7 @@ define i32 @length32(i8* %X, i8* %Y) nou
 
 define i1 @length32_eq(i8* %x, i8* %y) nounwind {
 ; X86-NOSSE-LABEL: length32_eq:
-; X86-NOSSE:       # BB#0:
+; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl $0
 ; X86-NOSSE-NEXT:    pushl $32
 ; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -882,7 +882,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE1-LABEL: length32_eq:
-; X86-SSE1:       # BB#0:
+; X86-SSE1:       # %bb.0:
 ; X86-SSE1-NEXT:    pushl $0
 ; X86-SSE1-NEXT:    pushl $32
 ; X86-SSE1-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -894,7 +894,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X86-SSE1-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length32_eq:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE2-NEXT:    movdqu (%ecx), %xmm0
@@ -903,7 +903,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X86-SSE2-NEXT:    pmovmskb %xmm1, %edx
 ; X86-SSE2-NEXT:    cmpl $65535, %edx # imm = 0xFFFF
 ; X86-SSE2-NEXT:    jne .LBB25_2
-; X86-SSE2-NEXT:  # BB#1: # %loadbb1
+; X86-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X86-SSE2-NEXT:    movdqu 16(%ecx), %xmm0
 ; X86-SSE2-NEXT:    movdqu 16(%eax), %xmm1
 ; X86-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
@@ -919,14 +919,14 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length32_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    movdqu (%rsi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
 ; X64-SSE2-NEXT:    pmovmskb %xmm1, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-SSE2-NEXT:    jne .LBB25_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X64-SSE2-NEXT:    movdqu 16(%rdi), %xmm0
 ; X64-SSE2-NEXT:    movdqu 16(%rsi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
@@ -942,13 +942,13 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: length32_eq:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX1-NEXT:    vpcmpeqb (%rsi), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; X64-AVX1-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-AVX1-NEXT:    jne .LBB25_2
-; X64-AVX1-NEXT:  # BB#1: # %loadbb1
+; X64-AVX1-NEXT:  # %bb.1: # %loadbb1
 ; X64-AVX1-NEXT:    vmovdqu 16(%rdi), %xmm0
 ; X64-AVX1-NEXT:    vpcmpeqb 16(%rsi), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpmovmskb %xmm0, %ecx
@@ -963,7 +963,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length32_eq:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb (%rsi), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
@@ -978,7 +978,7 @@ define i1 @length32_eq(i8* %x, i8* %y) n
 
 define i1 @length32_eq_const(i8* %X) nounwind {
 ; X86-NOSSE-LABEL: length32_eq_const:
-; X86-NOSSE:       # BB#0:
+; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl $0
 ; X86-NOSSE-NEXT:    pushl $32
 ; X86-NOSSE-NEXT:    pushl $.L.str
@@ -990,7 +990,7 @@ define i1 @length32_eq_const(i8* %X) nou
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE1-LABEL: length32_eq_const:
-; X86-SSE1:       # BB#0:
+; X86-SSE1:       # %bb.0:
 ; X86-SSE1-NEXT:    pushl $0
 ; X86-SSE1-NEXT:    pushl $32
 ; X86-SSE1-NEXT:    pushl $.L.str
@@ -1002,14 +1002,14 @@ define i1 @length32_eq_const(i8* %X) nou
 ; X86-SSE1-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length32_eq_const:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movdqu (%eax), %xmm0
 ; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm0
 ; X86-SSE2-NEXT:    pmovmskb %xmm0, %ecx
 ; X86-SSE2-NEXT:    cmpl $65535, %ecx # imm = 0xFFFF
 ; X86-SSE2-NEXT:    jne .LBB26_2
-; X86-SSE2-NEXT:  # BB#1: # %loadbb1
+; X86-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X86-SSE2-NEXT:    movdqu 16(%eax), %xmm0
 ; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm0
 ; X86-SSE2-NEXT:    pmovmskb %xmm0, %ecx
@@ -1024,13 +1024,13 @@ define i1 @length32_eq_const(i8* %X) nou
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length32_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-SSE2-NEXT:    jne .LBB26_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:  # %bb.1: # %loadbb1
 ; X64-SSE2-NEXT:    movdqu 16(%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %ecx
@@ -1045,13 +1045,13 @@ define i1 @length32_eq_const(i8* %X) nou
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: length32_eq_const:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX1-NEXT:    vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; X64-AVX1-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; X64-AVX1-NEXT:    jne .LBB26_2
-; X64-AVX1-NEXT:  # BB#1: # %loadbb1
+; X64-AVX1-NEXT:  # %bb.1: # %loadbb1
 ; X64-AVX1-NEXT:    vmovdqu 16(%rdi), %xmm0
 ; X64-AVX1-NEXT:    vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpmovmskb %xmm0, %ecx
@@ -1066,7 +1066,7 @@ define i1 @length32_eq_const(i8* %X) nou
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length32_eq_const:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
@@ -1081,7 +1081,7 @@ define i1 @length32_eq_const(i8* %X) nou
 
 define i32 @length64(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: length64:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $64
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -1091,7 +1091,7 @@ define i32 @length64(i8* %X, i8* %Y) nou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl $64, %edx
 ; X64-NEXT:    jmp memcmp # TAILCALL
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind
@@ -1100,7 +1100,7 @@ define i32 @length64(i8* %X, i8* %Y) nou
 
 define i1 @length64_eq(i8* %x, i8* %y) nounwind {
 ; X86-LABEL: length64_eq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $64
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -1112,7 +1112,7 @@ define i1 @length64_eq(i8* %x, i8* %y) n
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length64_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    pushq %rax
 ; X64-SSE2-NEXT:    movl $64, %edx
 ; X64-SSE2-NEXT:    callq memcmp
@@ -1122,7 +1122,7 @@ define i1 @length64_eq(i8* %x, i8* %y) n
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: length64_eq:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    pushq %rax
 ; X64-AVX1-NEXT:    movl $64, %edx
 ; X64-AVX1-NEXT:    callq memcmp
@@ -1132,13 +1132,13 @@ define i1 @length64_eq(i8* %x, i8* %y) n
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length64_eq:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb (%rsi), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
 ; X64-AVX2-NEXT:    cmpl $-1, %eax
 ; X64-AVX2-NEXT:    jne .LBB28_2
-; X64-AVX2-NEXT:  # BB#1: # %loadbb1
+; X64-AVX2-NEXT:  # %bb.1: # %loadbb1
 ; X64-AVX2-NEXT:    vmovdqu 32(%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb 32(%rsi), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %ecx
@@ -1159,7 +1159,7 @@ define i1 @length64_eq(i8* %x, i8* %y) n
 
 define i1 @length64_eq_const(i8* %X) nounwind {
 ; X86-LABEL: length64_eq_const:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $0
 ; X86-NEXT:    pushl $64
 ; X86-NEXT:    pushl $.L.str
@@ -1171,7 +1171,7 @@ define i1 @length64_eq_const(i8* %X) nou
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length64_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    pushq %rax
 ; X64-SSE2-NEXT:    movl $.L.str, %esi
 ; X64-SSE2-NEXT:    movl $64, %edx
@@ -1182,7 +1182,7 @@ define i1 @length64_eq_const(i8* %X) nou
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: length64_eq_const:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    pushq %rax
 ; X64-AVX1-NEXT:    movl $.L.str, %esi
 ; X64-AVX1-NEXT:    movl $64, %edx
@@ -1193,13 +1193,13 @@ define i1 @length64_eq_const(i8* %X) nou
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length64_eq_const:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
 ; X64-AVX2-NEXT:    cmpl $-1, %eax
 ; X64-AVX2-NEXT:    jne .LBB29_2
-; X64-AVX2-NEXT:  # BB#1: # %loadbb1
+; X64-AVX2-NEXT:  # %bb.1: # %loadbb1
 ; X64-AVX2-NEXT:    vmovdqu 32(%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %ecx
@@ -1221,7 +1221,7 @@ define i1 @length64_eq_const(i8* %X) nou
 ; This checks that we do not do stupid things with huge sizes.
 define i32 @huge_length(i8* %X, i8* %Y) nounwind {
 ; X86-LABEL: huge_length:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl $2147483647 # imm = 0x7FFFFFFF
 ; X86-NEXT:    pushl $-1
 ; X86-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -1231,7 +1231,7 @@ define i32 @huge_length(i8* %X, i8* %Y)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: huge_length:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movabsq $9223372036854775807, %rdx # imm = 0x7FFFFFFFFFFFFFFF
 ; X64-NEXT:    jmp memcmp # TAILCALL
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9223372036854775807) nounwind

Modified: llvm/trunk/test/CodeGen/X86/memset-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memset-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memset-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memset-2.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define fastcc void @t1() nounwind {
 ; CHECK-LABEL: t1:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    subl $16, %esp
 ; CHECK-NEXT:    pushl $188
 ; CHECK-NEXT:    pushl $0
@@ -17,7 +17,7 @@ entry:
 
 define fastcc void @t2(i8 signext %c) nounwind {
 ; CHECK-LABEL: t2:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    subl $12, %esp
 ; CHECK-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    movl $76, {{[0-9]+}}(%esp)
@@ -31,7 +31,7 @@ declare void @llvm.memset.p0i8.i32(i8* n
 
 define void @t3(i8* nocapture %s, i8 %a) nounwind {
 ; CHECK-LABEL: t3:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    imull $16843009, %ecx, %ecx ## imm = 0x1010101
@@ -45,7 +45,7 @@ entry:
 
 define void @t4(i8* nocapture %s, i8 %a) nounwind {
 ; CHECK-LABEL: t4:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    imull $16843009, %ecx, %ecx ## imm = 0x1010101

Modified: llvm/trunk/test/CodeGen/X86/memset-nonzero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memset-nonzero.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memset-nonzero.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memset-nonzero.ll Mon Dec  4 09:18:51 2017
@@ -9,20 +9,20 @@
 
 define void @memset_16_nonzero_bytes(i8* %x) {
 ; SSE-LABEL: memset_16_nonzero_bytes:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
 ; SSE-NEXT:    movq %rax, 8(%rdi)
 ; SSE-NEXT:    movq %rax, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; SSE2FAST-LABEL: memset_16_nonzero_bytes:
-; SSE2FAST:       # BB#0:
+; SSE2FAST:       # %bb.0:
 ; SSE2FAST-NEXT:    movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; SSE2FAST-NEXT:    movups %xmm0, (%rdi)
 ; SSE2FAST-NEXT:    retq
 ;
 ; AVX-LABEL: memset_16_nonzero_bytes:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; AVX-NEXT:    vmovups %xmm0, (%rdi)
 ; AVX-NEXT:    retq
@@ -32,7 +32,7 @@ define void @memset_16_nonzero_bytes(i8*
 
 define void @memset_32_nonzero_bytes(i8* %x) {
 ; SSE-LABEL: memset_32_nonzero_bytes:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
 ; SSE-NEXT:    movq %rax, 24(%rdi)
 ; SSE-NEXT:    movq %rax, 16(%rdi)
@@ -41,14 +41,14 @@ define void @memset_32_nonzero_bytes(i8*
 ; SSE-NEXT:    retq
 ;
 ; SSE2FAST-LABEL: memset_32_nonzero_bytes:
-; SSE2FAST:       # BB#0:
+; SSE2FAST:       # %bb.0:
 ; SSE2FAST-NEXT:    movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; SSE2FAST-NEXT:    movups %xmm0, 16(%rdi)
 ; SSE2FAST-NEXT:    movups %xmm0, (%rdi)
 ; SSE2FAST-NEXT:    retq
 ;
 ; AVX-LABEL: memset_32_nonzero_bytes:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; AVX-NEXT:    vmovups %ymm0, (%rdi)
 ; AVX-NEXT:    vzeroupper
@@ -59,7 +59,7 @@ define void @memset_32_nonzero_bytes(i8*
 
 define void @memset_64_nonzero_bytes(i8* %x) {
 ; SSE-LABEL: memset_64_nonzero_bytes:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
 ; SSE-NEXT:    movq %rax, 56(%rdi)
 ; SSE-NEXT:    movq %rax, 48(%rdi)
@@ -72,7 +72,7 @@ define void @memset_64_nonzero_bytes(i8*
 ; SSE-NEXT:    retq
 ;
 ; SSE2FAST-LABEL: memset_64_nonzero_bytes:
-; SSE2FAST:       # BB#0:
+; SSE2FAST:       # %bb.0:
 ; SSE2FAST-NEXT:    movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; SSE2FAST-NEXT:    movups %xmm0, 48(%rdi)
 ; SSE2FAST-NEXT:    movups %xmm0, 32(%rdi)
@@ -81,7 +81,7 @@ define void @memset_64_nonzero_bytes(i8*
 ; SSE2FAST-NEXT:    retq
 ;
 ; AVX-LABEL: memset_64_nonzero_bytes:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; AVX-NEXT:    vmovups %ymm0, 32(%rdi)
 ; AVX-NEXT:    vmovups %ymm0, (%rdi)
@@ -93,7 +93,7 @@ define void @memset_64_nonzero_bytes(i8*
 
 define void @memset_128_nonzero_bytes(i8* %x) {
 ; SSE-LABEL: memset_128_nonzero_bytes:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
 ; SSE-NEXT:    movq %rax, 120(%rdi)
 ; SSE-NEXT:    movq %rax, 112(%rdi)
@@ -114,7 +114,7 @@ define void @memset_128_nonzero_bytes(i8
 ; SSE-NEXT:    retq
 ;
 ; SSE2FAST-LABEL: memset_128_nonzero_bytes:
-; SSE2FAST:       # BB#0:
+; SSE2FAST:       # %bb.0:
 ; SSE2FAST-NEXT:    movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; SSE2FAST-NEXT:    movups %xmm0, 112(%rdi)
 ; SSE2FAST-NEXT:    movups %xmm0, 96(%rdi)
@@ -127,7 +127,7 @@ define void @memset_128_nonzero_bytes(i8
 ; SSE2FAST-NEXT:    retq
 ;
 ; AVX-LABEL: memset_128_nonzero_bytes:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; AVX-NEXT:    vmovups %ymm0, 96(%rdi)
 ; AVX-NEXT:    vmovups %ymm0, 64(%rdi)
@@ -141,7 +141,7 @@ define void @memset_128_nonzero_bytes(i8
 
 define void @memset_256_nonzero_bytes(i8* %x) {
 ; SSE-LABEL: memset_256_nonzero_bytes:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    pushq %rax
 ; SSE-NEXT:    .cfi_def_cfa_offset 16
 ; SSE-NEXT:    movl $42, %esi
@@ -151,7 +151,7 @@ define void @memset_256_nonzero_bytes(i8
 ; SSE-NEXT:    retq
 ;
 ; SSE2FAST-LABEL: memset_256_nonzero_bytes:
-; SSE2FAST:       # BB#0:
+; SSE2FAST:       # %bb.0:
 ; SSE2FAST-NEXT:    movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; SSE2FAST-NEXT:    movups %xmm0, 240(%rdi)
 ; SSE2FAST-NEXT:    movups %xmm0, 224(%rdi)
@@ -172,7 +172,7 @@ define void @memset_256_nonzero_bytes(i8
 ; SSE2FAST-NEXT:    retq
 ;
 ; AVX-LABEL: memset_256_nonzero_bytes:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; AVX-NEXT:    vmovups %ymm0, 224(%rdi)
 ; AVX-NEXT:    vmovups %ymm0, 192(%rdi)
@@ -194,7 +194,7 @@ declare i8* @__memset_chk(i8*, i32, i64,
 
 define void @memset_16_nonconst_bytes(i8* %x, i8 %c) {
 ; SSE-LABEL: memset_16_nonconst_bytes:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movzbl %sil, %eax
 ; SSE-NEXT:    movabsq $72340172838076673, %rcx # imm = 0x101010101010101
 ; SSE-NEXT:    imulq %rax, %rcx
@@ -203,7 +203,7 @@ define void @memset_16_nonconst_bytes(i8
 ; SSE-NEXT:    retq
 ;
 ; SSE2FAST-LABEL: memset_16_nonconst_bytes:
-; SSE2FAST:       # BB#0:
+; SSE2FAST:       # %bb.0:
 ; SSE2FAST-NEXT:    movd %esi, %xmm0
 ; SSE2FAST-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2FAST-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -212,7 +212,7 @@ define void @memset_16_nonconst_bytes(i8
 ; SSE2FAST-NEXT:    retq
 ;
 ; AVX1-LABEL: memset_16_nonconst_bytes:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovd %esi, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
@@ -220,7 +220,7 @@ define void @memset_16_nonconst_bytes(i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: memset_16_nonconst_bytes:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovd %esi, %xmm0
 ; AVX2-NEXT:    vpbroadcastb %xmm0, %xmm0
 ; AVX2-NEXT:    vmovdqu %xmm0, (%rdi)
@@ -231,7 +231,7 @@ define void @memset_16_nonconst_bytes(i8
 
 define void @memset_32_nonconst_bytes(i8* %x, i8 %c) {
 ; SSE-LABEL: memset_32_nonconst_bytes:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movzbl %sil, %eax
 ; SSE-NEXT:    movabsq $72340172838076673, %rcx # imm = 0x101010101010101
 ; SSE-NEXT:    imulq %rax, %rcx
@@ -242,7 +242,7 @@ define void @memset_32_nonconst_bytes(i8
 ; SSE-NEXT:    retq
 ;
 ; SSE2FAST-LABEL: memset_32_nonconst_bytes:
-; SSE2FAST:       # BB#0:
+; SSE2FAST:       # %bb.0:
 ; SSE2FAST-NEXT:    movd %esi, %xmm0
 ; SSE2FAST-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2FAST-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -252,7 +252,7 @@ define void @memset_32_nonconst_bytes(i8
 ; SSE2FAST-NEXT:    retq
 ;
 ; AVX1-LABEL: memset_32_nonconst_bytes:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovd %esi, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
@@ -262,7 +262,7 @@ define void @memset_32_nonconst_bytes(i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: memset_32_nonconst_bytes:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovd %esi, %xmm0
 ; AVX2-NEXT:    vpbroadcastb %xmm0, %ymm0
 ; AVX2-NEXT:    vmovdqu %ymm0, (%rdi)
@@ -274,7 +274,7 @@ define void @memset_32_nonconst_bytes(i8
 
 define void @memset_64_nonconst_bytes(i8* %x, i8 %c) {
 ; SSE-LABEL: memset_64_nonconst_bytes:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movzbl %sil, %eax
 ; SSE-NEXT:    movabsq $72340172838076673, %rcx # imm = 0x101010101010101
 ; SSE-NEXT:    imulq %rax, %rcx
@@ -289,7 +289,7 @@ define void @memset_64_nonconst_bytes(i8
 ; SSE-NEXT:    retq
 ;
 ; SSE2FAST-LABEL: memset_64_nonconst_bytes:
-; SSE2FAST:       # BB#0:
+; SSE2FAST:       # %bb.0:
 ; SSE2FAST-NEXT:    movd %esi, %xmm0
 ; SSE2FAST-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2FAST-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -301,7 +301,7 @@ define void @memset_64_nonconst_bytes(i8
 ; SSE2FAST-NEXT:    retq
 ;
 ; AVX1-LABEL: memset_64_nonconst_bytes:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovd %esi, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
@@ -312,7 +312,7 @@ define void @memset_64_nonconst_bytes(i8
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: memset_64_nonconst_bytes:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovd %esi, %xmm0
 ; AVX2-NEXT:    vpbroadcastb %xmm0, %ymm0
 ; AVX2-NEXT:    vmovdqu %ymm0, 32(%rdi)
@@ -325,7 +325,7 @@ define void @memset_64_nonconst_bytes(i8
 
 define void @memset_128_nonconst_bytes(i8* %x, i8 %c) {
 ; SSE-LABEL: memset_128_nonconst_bytes:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movzbl %sil, %eax
 ; SSE-NEXT:    movabsq $72340172838076673, %rcx # imm = 0x101010101010101
 ; SSE-NEXT:    imulq %rax, %rcx
@@ -348,7 +348,7 @@ define void @memset_128_nonconst_bytes(i
 ; SSE-NEXT:    retq
 ;
 ; SSE2FAST-LABEL: memset_128_nonconst_bytes:
-; SSE2FAST:       # BB#0:
+; SSE2FAST:       # %bb.0:
 ; SSE2FAST-NEXT:    movd %esi, %xmm0
 ; SSE2FAST-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2FAST-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -364,7 +364,7 @@ define void @memset_128_nonconst_bytes(i
 ; SSE2FAST-NEXT:    retq
 ;
 ; AVX1-LABEL: memset_128_nonconst_bytes:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovd %esi, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
@@ -377,7 +377,7 @@ define void @memset_128_nonconst_bytes(i
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: memset_128_nonconst_bytes:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovd %esi, %xmm0
 ; AVX2-NEXT:    vpbroadcastb %xmm0, %ymm0
 ; AVX2-NEXT:    vmovdqu %ymm0, 96(%rdi)
@@ -392,12 +392,12 @@ define void @memset_128_nonconst_bytes(i
 
 define void @memset_256_nonconst_bytes(i8* %x, i8 %c) {
 ; SSE-LABEL: memset_256_nonconst_bytes:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movl $256, %edx # imm = 0x100
 ; SSE-NEXT:    jmp memset # TAILCALL
 ;
 ; SSE2FAST-LABEL: memset_256_nonconst_bytes:
-; SSE2FAST:       # BB#0:
+; SSE2FAST:       # %bb.0:
 ; SSE2FAST-NEXT:    movd %esi, %xmm0
 ; SSE2FAST-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2FAST-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -421,7 +421,7 @@ define void @memset_256_nonconst_bytes(i
 ; SSE2FAST-NEXT:    retq
 ;
 ; AVX1-LABEL: memset_256_nonconst_bytes:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovd %esi, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
@@ -438,7 +438,7 @@ define void @memset_256_nonconst_bytes(i
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: memset_256_nonconst_bytes:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovd %esi, %xmm0
 ; AVX2-NEXT:    vpbroadcastb %xmm0, %ymm0
 ; AVX2-NEXT:    vmovdqu %ymm0, 224(%rdi)

Modified: llvm/trunk/test/CodeGen/X86/memset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memset.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memset.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memset.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define void @t() nounwind  {
 ; X86-LABEL: t:
-; X86:       ## BB#0: ## %entry
+; X86:       ## %bb.0: ## %entry
 ; X86-NEXT:    subl $44, %esp
 ; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
 ; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
@@ -25,7 +25,7 @@ define void @t() nounwind  {
 ; X86-NEXT:    ## -- End function
 ;
 ; XMM-LABEL: t:
-; XMM:       ## BB#0: ## %entry
+; XMM:       ## %bb.0: ## %entry
 ; XMM-NEXT:    subl $60, %esp
 ; XMM-NEXT:    xorps %xmm0, %xmm0
 ; XMM-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
@@ -38,7 +38,7 @@ define void @t() nounwind  {
 ; XMM-NEXT:    ## -- End function
 ;
 ; YMM-LABEL: t:
-; YMM:       ## BB#0: ## %entry
+; YMM:       ## %bb.0: ## %entry
 ; YMM-NEXT:    pushl %ebp
 ; YMM-NEXT:    movl %esp, %ebp
 ; YMM-NEXT:    andl $-32, %esp
@@ -71,7 +71,7 @@ declare void @llvm.memset.p0i8.i64(i8* n
 ; unaligned loads and stores.
 define void @PR15348(i8* %a) {
 ; X86-LABEL: PR15348:
-; X86:       ## BB#0:
+; X86:       ## %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movb $0, 16(%eax)
 ; X86-NEXT:    movl $0, 12(%eax)
@@ -81,7 +81,7 @@ define void @PR15348(i8* %a) {
 ; X86-NEXT:    retl
 ;
 ; XMM-LABEL: PR15348:
-; XMM:       ## BB#0:
+; XMM:       ## %bb.0:
 ; XMM-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; XMM-NEXT:    movb $0, 16(%eax)
 ; XMM-NEXT:    movl $0, 12(%eax)
@@ -91,7 +91,7 @@ define void @PR15348(i8* %a) {
 ; XMM-NEXT:    retl
 ;
 ; YMM-LABEL: PR15348:
-; YMM:       ## BB#0:
+; YMM:       ## %bb.0:
 ; YMM-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; YMM-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; YMM-NEXT:    vmovups %xmm0, (%eax)

Modified: llvm/trunk/test/CodeGen/X86/memset64-on-x86-32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memset64-on-x86-32.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memset64-on-x86-32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memset64-on-x86-32.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define void @bork() nounwind {
 ; FAST-LABEL: bork:
-; FAST:       # BB#0:
+; FAST:       # %bb.0:
 ; FAST-NEXT:    xorps %xmm0, %xmm0
 ; FAST-NEXT:    movups %xmm0, 64
 ; FAST-NEXT:    movups %xmm0, 48
@@ -15,7 +15,7 @@ define void @bork() nounwind {
 ; FAST-NEXT:    retl
 ;
 ; SLOW_32-LABEL: bork:
-; SLOW_32:       # BB#0:
+; SLOW_32:       # %bb.0:
 ; SLOW_32-NEXT:    movl $0, 4
 ; SLOW_32-NEXT:    movl $0, 0
 ; SLOW_32-NEXT:    movl $0, 12
@@ -39,7 +39,7 @@ define void @bork() nounwind {
 ; SLOW_32-NEXT:    retl
 ;
 ; SLOW_64-LABEL: bork:
-; SLOW_64:       # BB#0:
+; SLOW_64:       # %bb.0:
 ; SLOW_64-NEXT:    movq $0, 72
 ; SLOW_64-NEXT:    movq $0, 64
 ; SLOW_64-NEXT:    movq $0, 56

Modified: llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-128.ll Mon Dec  4 09:18:51 2017
@@ -11,17 +11,17 @@
 
 define <2 x double> @merge_2f64_f64_23(double* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_2f64_f64_23:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movups 16(%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_2f64_f64_23:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups 16(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_2f64_f64_23:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    fldl 16(%eax)
 ; X32-SSE1-NEXT:    fldl 24(%eax)
@@ -29,7 +29,7 @@ define <2 x double> @merge_2f64_f64_23(d
 ; X32-SSE1-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: merge_2f64_f64_23:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movups 16(%eax), %xmm0
 ; X32-SSE41-NEXT:    retl
@@ -44,17 +44,17 @@ define <2 x double> @merge_2f64_f64_23(d
 
 define <2 x i64> @merge_2i64_i64_12(i64* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_2i64_i64_12:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movups 8(%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_2i64_i64_12:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups 8(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_2i64_i64_12:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    pushl %edi
 ; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
 ; X32-SSE1-NEXT:    pushl %esi
@@ -76,7 +76,7 @@ define <2 x i64> @merge_2i64_i64_12(i64*
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_2i64_i64_12:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movups 8(%eax), %xmm0
 ; X32-SSE41-NEXT:    retl
@@ -91,17 +91,17 @@ define <2 x i64> @merge_2i64_i64_12(i64*
 
 define <4 x float> @merge_4f32_f32_2345(float* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4f32_f32_2345:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movups 8(%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4f32_f32_2345:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups 8(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE-LABEL: merge_4f32_f32_2345:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    movups 8(%eax), %xmm0
 ; X32-SSE-NEXT:    retl
@@ -122,17 +122,17 @@ define <4 x float> @merge_4f32_f32_2345(
 
 define <4 x float> @merge_4f32_f32_3zuu(float* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4f32_f32_3zuu:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4f32_f32_3zuu:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE-LABEL: merge_4f32_f32_3zuu:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE-NEXT:    retl
@@ -145,17 +145,17 @@ define <4 x float> @merge_4f32_f32_3zuu(
 
 define <4 x float> @merge_4f32_f32_34uu(float* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4f32_f32_34uu:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4f32_f32_34uu:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4f32_f32_34uu:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -163,7 +163,7 @@ define <4 x float> @merge_4f32_f32_34uu(
 ; X32-SSE1-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: merge_4f32_f32_34uu:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-SSE41-NEXT:    retl
@@ -178,7 +178,7 @@ define <4 x float> @merge_4f32_f32_34uu(
 
 define <4 x float> @merge_4f32_f32_34z6(float* %ptr) nounwind uwtable noinline ssp {
 ; SSE2-LABEL: merge_4f32_f32_34z6:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movups 12(%rdi), %xmm0
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
@@ -186,20 +186,20 @@ define <4 x float> @merge_4f32_f32_34z6(
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: merge_4f32_f32_34z6:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movups 12(%rdi), %xmm1
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4f32_f32_34z6:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = mem[0,1],xmm0[2],mem[3]
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4f32_f32_34z6:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movups 12(%eax), %xmm0
 ; X32-SSE1-NEXT:    xorps %xmm1, %xmm1
@@ -208,7 +208,7 @@ define <4 x float> @merge_4f32_f32_34z6(
 ; X32-SSE1-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: merge_4f32_f32_34z6:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movups 12(%eax), %xmm1
 ; X32-SSE41-NEXT:    xorps %xmm0, %xmm0
@@ -228,17 +228,17 @@ define <4 x float> @merge_4f32_f32_34z6(
 
 define <4 x float> @merge_4f32_f32_45zz(float* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4f32_f32_45zz:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4f32_f32_45zz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4f32_f32_45zz:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -248,7 +248,7 @@ define <4 x float> @merge_4f32_f32_45zz(
 ; X32-SSE1-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: merge_4f32_f32_45zz:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-SSE41-NEXT:    retl
@@ -263,26 +263,26 @@ define <4 x float> @merge_4f32_f32_45zz(
 
 define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline ssp {
 ; SSE2-LABEL: merge_4f32_f32_012u:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: merge_4f32_f32_012u:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4f32_f32_012u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4f32_f32_012u:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -292,7 +292,7 @@ define <4 x float> @merge_4f32_f32_012u(
 ; X32-SSE1-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: merge_4f32_f32_012u:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
@@ -312,26 +312,26 @@ define <4 x float> @merge_4f32_f32_012u(
 
 define <4 x float> @merge_4f32_f32_019u(float* %ptr) nounwind uwtable noinline ssp {
 ; SSE2-LABEL: merge_4f32_f32_019u:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: merge_4f32_f32_019u:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4f32_f32_019u:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4f32_f32_019u:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -341,7 +341,7 @@ define <4 x float> @merge_4f32_f32_019u(
 ; X32-SSE1-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: merge_4f32_f32_019u:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
@@ -361,17 +361,17 @@ define <4 x float> @merge_4f32_f32_019u(
 
 define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4i32_i32_23u5:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movups 8(%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4i32_i32_23u5:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups 8(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4i32_i32_23u5:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    pushl %esi
 ; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
 ; X32-SSE1-NEXT:    .cfi_offset %esi, -8
@@ -387,7 +387,7 @@ define <4 x i32> @merge_4i32_i32_23u5(i3
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_4i32_i32_23u5:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movups 8(%eax), %xmm0
 ; X32-SSE41-NEXT:    retl
@@ -405,19 +405,19 @@ define <4 x i32> @merge_4i32_i32_23u5(i3
 
 define <4 x i32> @merge_4i32_i32_23u5_inc2(i32* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4i32_i32_23u5_inc2:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movups 8(%rdi), %xmm0
 ; SSE-NEXT:    incl 8(%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4i32_i32_23u5_inc2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups 8(%rdi), %xmm0
 ; AVX-NEXT:    incl 8(%rdi)
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4i32_i32_23u5_inc2:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    pushl %edi
 ; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
 ; X32-SSE1-NEXT:    pushl %esi
@@ -439,7 +439,7 @@ define <4 x i32> @merge_4i32_i32_23u5_in
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_4i32_i32_23u5_inc2:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movups 8(%eax), %xmm0
 ; X32-SSE41-NEXT:    incl 8(%eax)
@@ -460,19 +460,19 @@ define <4 x i32> @merge_4i32_i32_23u5_in
 
 define <4 x i32> @merge_4i32_i32_23u5_inc3(i32* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4i32_i32_23u5_inc3:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movups 8(%rdi), %xmm0
 ; SSE-NEXT:    incl 12(%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4i32_i32_23u5_inc3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups 8(%rdi), %xmm0
 ; AVX-NEXT:    incl 12(%rdi)
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4i32_i32_23u5_inc3:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    pushl %edi
 ; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
 ; X32-SSE1-NEXT:    pushl %esi
@@ -494,7 +494,7 @@ define <4 x i32> @merge_4i32_i32_23u5_in
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_4i32_i32_23u5_inc3:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movups 8(%eax), %xmm0
 ; X32-SSE41-NEXT:    incl 12(%eax)
@@ -515,17 +515,17 @@ define <4 x i32> @merge_4i32_i32_23u5_in
 
 define <4 x i32> @merge_4i32_i32_3zuu(i32* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4i32_i32_3zuu:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4i32_i32_3zuu:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4i32_i32_3zuu:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-SSE1-NEXT:    movl 12(%ecx), %ecx
@@ -534,7 +534,7 @@ define <4 x i32> @merge_4i32_i32_3zuu(i3
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_4i32_i32_3zuu:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE41-NEXT:    retl
@@ -547,17 +547,17 @@ define <4 x i32> @merge_4i32_i32_3zuu(i3
 
 define <4 x i32> @merge_4i32_i32_34uu(i32* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4i32_i32_34uu:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4i32_i32_34uu:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4i32_i32_34uu:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-SSE1-NEXT:    movl 12(%ecx), %edx
@@ -567,7 +567,7 @@ define <4 x i32> @merge_4i32_i32_34uu(i3
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_4i32_i32_34uu:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-SSE41-NEXT:    retl
@@ -582,17 +582,17 @@ define <4 x i32> @merge_4i32_i32_34uu(i3
 
 define <4 x i32> @merge_4i32_i32_45zz(i32* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4i32_i32_45zz:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4i32_i32_45zz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4i32_i32_45zz:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-SSE1-NEXT:    movl 16(%ecx), %edx
@@ -604,7 +604,7 @@ define <4 x i32> @merge_4i32_i32_45zz(i3
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_4i32_i32_45zz:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-SSE41-NEXT:    retl
@@ -619,19 +619,19 @@ define <4 x i32> @merge_4i32_i32_45zz(i3
 
 define <4 x i32> @merge_4i32_i32_45zz_inc4(i32* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4i32_i32_45zz_inc4:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    incl 16(%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4i32_i32_45zz_inc4:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    incl 16(%rdi)
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4i32_i32_45zz_inc4:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    pushl %edi
 ; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
 ; X32-SSE1-NEXT:    pushl %esi
@@ -653,7 +653,7 @@ define <4 x i32> @merge_4i32_i32_45zz_in
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_4i32_i32_45zz_inc4:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-SSE41-NEXT:    incl 16(%eax)
@@ -671,19 +671,19 @@ define <4 x i32> @merge_4i32_i32_45zz_in
 
 define <4 x i32> @merge_4i32_i32_45zz_inc5(i32* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4i32_i32_45zz_inc5:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    incl 20(%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4i32_i32_45zz_inc5:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    incl 20(%rdi)
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4i32_i32_45zz_inc5:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    pushl %edi
 ; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
 ; X32-SSE1-NEXT:    pushl %esi
@@ -705,7 +705,7 @@ define <4 x i32> @merge_4i32_i32_45zz_in
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_4i32_i32_45zz_inc5:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-SSE41-NEXT:    incl 20(%eax)
@@ -723,17 +723,17 @@ define <4 x i32> @merge_4i32_i32_45zz_in
 
 define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_8i16_i16_23u567u9:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movups 4(%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_8i16_i16_23u567u9:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups 4(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_8i16_i16_23u567u9:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    pushl %edi
 ; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
 ; X32-SSE1-NEXT:    pushl %esi
@@ -755,7 +755,7 @@ define <8 x i16> @merge_8i16_i16_23u567u
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_8i16_i16_23u567u9:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movups 4(%eax), %xmm0
 ; X32-SSE41-NEXT:    retl
@@ -782,17 +782,17 @@ define <8 x i16> @merge_8i16_i16_23u567u
 
 define <8 x i16> @merge_8i16_i16_34uuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_8i16_i16_34uuuuuu:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_8i16_i16_34uuuuuu:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_8i16_i16_34uuuuuu:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-SSE1-NEXT:    movl 6(%ecx), %ecx
@@ -800,7 +800,7 @@ define <8 x i16> @merge_8i16_i16_34uuuuu
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_8i16_i16_34uuuuuu:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE41-NEXT:    retl
@@ -815,17 +815,17 @@ define <8 x i16> @merge_8i16_i16_34uuuuu
 
 define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_8i16_i16_45u7zzzz:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_8i16_i16_45u7zzzz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_8i16_i16_45u7zzzz:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-SSE1-NEXT:    movl 8(%ecx), %edx
@@ -837,7 +837,7 @@ define <8 x i16> @merge_8i16_i16_45u7zzz
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_8i16_i16_45u7zzzz:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-SSE41-NEXT:    retl
@@ -859,17 +859,17 @@ define <8 x i16> @merge_8i16_i16_45u7zzz
 
 define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_16i8_i8_01u3456789ABCDuF:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movups (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_16i8_i8_01u3456789ABCDuF:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups (%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_16i8_i8_01u3456789ABCDuF:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    pushl %ebp
 ; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
 ; X32-SSE1-NEXT:    pushl %ebx
@@ -903,7 +903,7 @@ define <16 x i8> @merge_16i8_i8_01u34567
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_16i8_i8_01u3456789ABCDuF:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movups (%eax), %xmm0
 ; X32-SSE41-NEXT:    retl
@@ -954,17 +954,17 @@ define <16 x i8> @merge_16i8_i8_01u34567
 
 define <16 x i8> @merge_16i8_i8_01u3uuzzuuuuuzzz(i8* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-SSE1-NEXT:    movzwl (%ecx), %edx
@@ -977,7 +977,7 @@ define <16 x i8> @merge_16i8_i8_01u3uuzz
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE41-NEXT:    retl
@@ -1000,17 +1000,17 @@ define <16 x i8> @merge_16i8_i8_01u3uuzz
 
 define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-SSE1-NEXT:    movl (%ecx), %edx
@@ -1022,7 +1022,7 @@ define <16 x i8> @merge_16i8_i8_0123uu67
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-SSE41-NEXT:    retl
@@ -1052,19 +1052,19 @@ define <16 x i8> @merge_16i8_i8_0123uu67
 
 define void @merge_4i32_i32_combine(<4 x i32>* %dst, i32* %src) {
 ; SSE-LABEL: merge_4i32_i32_combine:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    movaps %xmm0, (%rdi)
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4i32_i32_combine:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vmovaps %xmm0, (%rdi)
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4i32_i32_combine:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-SSE1-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1074,7 +1074,7 @@ define void @merge_4i32_i32_combine(<4 x
 ; X32-SSE1-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: merge_4i32_i32_combine:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1096,21 +1096,21 @@ define void @merge_4i32_i32_combine(<4 x
 
 define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_2i64_i64_12_volatile:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_2i64_i64_12_volatile:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_2i64_i64_12_volatile:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    pushl %edi
 ; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
 ; X32-SSE1-NEXT:    pushl %esi
@@ -1132,7 +1132,7 @@ define <2 x i64> @merge_2i64_i64_12_vola
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: merge_2i64_i64_12_volatile:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE41-NEXT:    pinsrd $1, 12(%eax), %xmm0
@@ -1150,7 +1150,7 @@ define <2 x i64> @merge_2i64_i64_12_vola
 
 define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable noinline ssp {
 ; SSE2-LABEL: merge_4f32_f32_2345_volatile:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -1159,7 +1159,7 @@ define <4 x float> @merge_4f32_f32_2345_
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: merge_4f32_f32_2345_volatile:
-; SSE41:       # BB#0:
+; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
@@ -1167,7 +1167,7 @@ define <4 x float> @merge_4f32_f32_2345_
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4f32_f32_2345_volatile:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
@@ -1175,7 +1175,7 @@ define <4 x float> @merge_4f32_f32_2345_
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: merge_4f32_f32_2345_volatile:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -1187,7 +1187,7 @@ define <4 x float> @merge_4f32_f32_2345_
 ; X32-SSE1-NEXT:    retl
 ;
 ; X32-SSE41-LABEL: merge_4f32_f32_2345_volatile:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
@@ -1215,21 +1215,21 @@ define <4 x float> @merge_4f32_f32_2345_
 
 define <4 x float> @merge_4f32_f32_X0YY(float* %ptr0, float* %ptr1) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_4f32_f32_X0YY:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: merge_4f32_f32_X0YY:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0,0]
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE-LABEL: merge_4f32_f32_X0YY:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -1252,17 +1252,17 @@ define <4 x float> @merge_4f32_f32_X0YY(
 ; PR31309
 define <4 x i32> @load_i32_zext_i128_v4i32(i32* %ptr) {
 ; SSE-LABEL: load_i32_zext_i128_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: load_i32_zext_i128_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
 ; X32-SSE1-LABEL: load_i32_zext_i128_v4i32:
-; X32-SSE1:       # BB#0:
+; X32-SSE1:       # %bb.0:
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-SSE1-NEXT:    movl (%ecx), %ecx
@@ -1273,7 +1273,7 @@ define <4 x i32> @load_i32_zext_i128_v4i
 ; X32-SSE1-NEXT:    retl $4
 ;
 ; X32-SSE41-LABEL: load_i32_zext_i128_v4i32:
-; X32-SSE41:       # BB#0:
+; X32-SSE41:       # %bb.0:
 ; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-SSE41-NEXT:    retl

Modified: llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll Mon Dec  4 09:18:51 2017
@@ -8,12 +8,12 @@
 
 define <4 x double> @merge_4f64_2f64_23(<2 x double>* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4f64_2f64_23:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups 32(%rdi), %ymm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4f64_2f64_23:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovups 32(%eax), %ymm0
 ; X32-AVX-NEXT:    retl
@@ -27,12 +27,12 @@ define <4 x double> @merge_4f64_2f64_23(
 
 define <4 x double> @merge_4f64_2f64_2z(<2 x double>* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4f64_2f64_2z:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps 32(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4f64_2f64_2z:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovaps 32(%eax), %xmm0
 ; X32-AVX-NEXT:    retl
@@ -44,12 +44,12 @@ define <4 x double> @merge_4f64_2f64_2z(
 
 define <4 x double> @merge_4f64_f64_2345(double* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4f64_f64_2345:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups 16(%rdi), %ymm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4f64_f64_2345:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovups 16(%eax), %ymm0
 ; X32-AVX-NEXT:    retl
@@ -70,12 +70,12 @@ define <4 x double> @merge_4f64_f64_2345
 
 define <4 x double> @merge_4f64_f64_3zuu(double* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4f64_f64_3zuu:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4f64_f64_3zuu:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX-NEXT:    retl
@@ -88,12 +88,12 @@ define <4 x double> @merge_4f64_f64_3zuu
 
 define <4 x double> @merge_4f64_f64_34uu(double* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4f64_f64_34uu:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups 24(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4f64_f64_34uu:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovups 24(%eax), %xmm0
 ; X32-AVX-NEXT:    retl
@@ -108,12 +108,12 @@ define <4 x double> @merge_4f64_f64_34uu
 
 define <4 x double> @merge_4f64_f64_45zz(double* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4f64_f64_45zz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps 32(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4f64_f64_45zz:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovaps 32(%eax), %xmm0
 ; X32-AVX-NEXT:    retl
@@ -128,13 +128,13 @@ define <4 x double> @merge_4f64_f64_45zz
 
 define <4 x double> @merge_4f64_f64_34z6(double* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4f64_f64_34z6:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = mem[0,1],ymm0[2],mem[3]
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4f64_f64_34z6:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    vblendpd {{.*#+}} ymm0 = mem[0,1],ymm0[2],mem[3]
@@ -154,12 +154,12 @@ define <4 x double> @merge_4f64_f64_34z6
 
 define <4 x i64> @merge_4i64_2i64_3z(<2 x i64>* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4i64_2i64_3z:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps 48(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4i64_2i64_3z:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovaps 48(%eax), %xmm0
 ; X32-AVX-NEXT:    retl
@@ -171,12 +171,12 @@ define <4 x i64> @merge_4i64_2i64_3z(<2
 
 define <4 x i64> @merge_4i64_i64_1234(i64* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4i64_i64_1234:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups 8(%rdi), %ymm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4i64_i64_1234:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovups 8(%eax), %ymm0
 ; X32-AVX-NEXT:    retl
@@ -197,12 +197,12 @@ define <4 x i64> @merge_4i64_i64_1234(i6
 
 define <4 x i64> @merge_4i64_i64_1zzu(i64* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4i64_i64_1zzu:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4i64_i64_1zzu:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX-NEXT:    retl
@@ -216,12 +216,12 @@ define <4 x i64> @merge_4i64_i64_1zzu(i6
 
 define <4 x i64> @merge_4i64_i64_23zz(i64* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4i64_i64_23zz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps 16(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4i64_i64_23zz:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovaps 16(%eax), %xmm0
 ; X32-AVX-NEXT:    retl
@@ -236,7 +236,7 @@ define <4 x i64> @merge_4i64_i64_23zz(i6
 
 define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noinline ssp {
 ; AVX1-LABEL: merge_8f32_2f32_23z5:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX1-NEXT:    vmovups 16(%rdi), %xmm1
 ; AVX1-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
@@ -244,7 +244,7 @@ define <8 x float> @merge_8f32_2f32_23z5
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: merge_8f32_2f32_23z5:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX2-NEXT:    vmovdqu 16(%rdi), %xmm1
 ; AVX2-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
@@ -252,7 +252,7 @@ define <8 x float> @merge_8f32_2f32_23z5
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: merge_8f32_2f32_23z5:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX512F-NEXT:    vmovdqu 16(%rdi), %xmm1
 ; AVX512F-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
@@ -260,7 +260,7 @@ define <8 x float> @merge_8f32_2f32_23z5
 ; AVX512F-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_8f32_2f32_23z5:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    vblendpd {{.*#+}} ymm0 = mem[0,1],ymm0[2],mem[3]
@@ -279,13 +279,13 @@ define <8 x float> @merge_8f32_2f32_23z5
 
 define <8 x float> @merge_8f32_4f32_z2(<4 x float>* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_8f32_4f32_z2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_8f32_4f32_z2:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    vinsertf128 $1, 32(%eax), %ymm0, %ymm0
@@ -298,12 +298,12 @@ define <8 x float> @merge_8f32_4f32_z2(<
 
 define <8 x float> @merge_8f32_f32_12zzuuzz(float* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_8f32_f32_12zzuuzz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_8f32_f32_12zzuuzz:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX-NEXT:    retl
@@ -322,13 +322,13 @@ define <8 x float> @merge_8f32_f32_12zzu
 
 define <8 x float> @merge_8f32_f32_1u3u5zu8(float* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_8f32_f32_1u3u5zu8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_8f32_f32_1u3u5zu8:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
@@ -351,13 +351,13 @@ define <8 x float> @merge_8f32_f32_1u3u5
 
 define <8 x i32> @merge_8i32_4i32_z3(<4 x i32>* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_8i32_4i32_z3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vinsertf128 $1, 48(%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_8i32_4i32_z3:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    vinsertf128 $1, 48(%eax), %ymm0, %ymm0
@@ -370,14 +370,14 @@ define <8 x i32> @merge_8i32_4i32_z3(<4
 
 define <8 x i32> @merge_8i32_i32_56zz9uzz(i32* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_8i32_i32_56zz9uzz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_8i32_i32_56zz9uzz:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -401,13 +401,13 @@ define <8 x i32> @merge_8i32_i32_56zz9uz
 
 define <8 x i32> @merge_8i32_i32_1u3u5zu8(i32* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_8i32_i32_1u3u5zu8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_8i32_i32_1u3u5zu8:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
@@ -430,12 +430,12 @@ define <8 x i32> @merge_8i32_i32_1u3u5zu
 
 define <16 x i16> @merge_16i16_i16_89zzzuuuuuuuuuuuz(i16* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-AVX-NEXT:    retl
@@ -454,12 +454,12 @@ define <16 x i16> @merge_16i16_i16_89zzz
 
 define <16 x i16> @merge_16i16_i16_45u7uuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX-NEXT:    retl
@@ -477,12 +477,12 @@ define <16 x i16> @merge_16i16_i16_45u7u
 
 define <16 x i16> @merge_16i16_i16_0uu3uuuuuuuuCuEF(i16* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_16i16_i16_0uu3uuuuuuuuCuEF:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups (%rdi), %ymm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_16i16_i16_0uu3uuuuuuuuCuEF:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovups (%eax), %ymm0
 ; X32-AVX-NEXT:    retl
@@ -506,13 +506,13 @@ define <16 x i16> @merge_16i16_i16_0uu3u
 
 define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF(i16* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovups (%rdi), %ymm0
 ; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovups (%eax), %ymm0
 ; X32-AVX-NEXT:    vandps {{\.LCPI.*}}, %ymm0, %ymm0
@@ -540,12 +540,12 @@ define <16 x i16> @merge_16i16_i16_0uu3z
 
 define <32 x i8> @merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i8* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-AVX-NEXT:    retl
@@ -563,12 +563,12 @@ define <32 x i8> @merge_32i8_i8_45u7uuuu
 
 define <32 x i8> @merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu(i8* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-AVX-NEXT:    retl
@@ -594,14 +594,14 @@ define <32 x i8> @merge_32i8_i8_23u5uuuu
 
 define <4 x double> @merge_4f64_f64_34uz_volatile(double* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4f64_f64_34uz_volatile:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; AVX-NEXT:    vmovapd %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4f64_f64_34uz_volatile:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
@@ -619,7 +619,7 @@ define <4 x double> @merge_4f64_f64_34uz
 
 define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind uwtable noinline ssp {
 ; AVX1-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm1
 ; AVX1-NEXT:    vpinsrw $4, 24(%rdi), %xmm0, %xmm0
@@ -630,7 +630,7 @@ define <16 x i16> @merge_16i16_i16_0uu3z
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm1
 ; AVX2-NEXT:    vpinsrw $4, 24(%rdi), %xmm0, %xmm0
@@ -641,7 +641,7 @@ define <16 x i16> @merge_16i16_i16_0uu3z
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm1
 ; AVX512F-NEXT:    vpinsrw $4, 24(%rdi), %xmm0, %xmm0
@@ -652,7 +652,7 @@ define <16 x i16> @merge_16i16_i16_0uu3z
 ; AVX512F-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    vpinsrw $0, (%eax), %xmm0, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll Mon Dec  4 09:18:51 2017
@@ -7,14 +7,14 @@
 
 define <8 x double> @merge_8f64_2f64_12u4(<2 x double>* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_8f64_2f64_12u4:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovups 16(%rdi), %ymm0
 ; ALL-NEXT:    vinsertf128 $1, 64(%rdi), %ymm0, %ymm1
 ; ALL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_8f64_2f64_12u4:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovups 16(%eax), %ymm0
 ; X32-AVX512F-NEXT:    vinsertf128 $1, 64(%eax), %ymm0, %ymm1
@@ -34,7 +34,7 @@ define <8 x double> @merge_8f64_2f64_12u
 
 define <8 x double> @merge_8f64_2f64_23z5(<2 x double>* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_8f64_2f64_23z5:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovups 32(%rdi), %ymm0
 ; ALL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vinsertf128 $1, 80(%rdi), %ymm1, %ymm1
@@ -42,7 +42,7 @@ define <8 x double> @merge_8f64_2f64_23z
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_8f64_2f64_23z5:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovups 32(%eax), %ymm0
 ; X32-AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
@@ -63,13 +63,13 @@ define <8 x double> @merge_8f64_2f64_23z
 
 define <8 x double> @merge_8f64_4f64_z2(<4 x double>* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_8f64_4f64_z2:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; ALL-NEXT:    vinsertf64x4 $1, 64(%rdi), %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_8f64_4f64_z2:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX512F-NEXT:    vinsertf64x4 $1, 64(%eax), %zmm0, %zmm0
@@ -82,12 +82,12 @@ define <8 x double> @merge_8f64_4f64_z2(
 
 define <8 x double> @merge_8f64_f64_23uuuuu9(double* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_8f64_f64_23uuuuu9:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovups 16(%rdi), %zmm0
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_8f64_f64_23uuuuu9:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovups 16(%eax), %zmm0
 ; X32-AVX512F-NEXT:    retl
@@ -105,12 +105,12 @@ define <8 x double> @merge_8f64_f64_23uu
 
 define <8 x double> @merge_8f64_f64_12zzuuzz(double* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_8f64_f64_12zzuuzz:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovaps 8(%rdi), %xmm0
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_8f64_f64_12zzuuzz:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovaps 8(%eax), %xmm0
 ; X32-AVX512F-NEXT:    retl
@@ -129,7 +129,7 @@ define <8 x double> @merge_8f64_f64_12zz
 
 define <8 x double> @merge_8f64_f64_1u3u5zu8(double* %ptr) nounwind uwtable noinline ssp {
 ; AVX512F-LABEL: merge_8f64_f64_1u3u5zu8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    movb $32, %al
 ; AVX512F-NEXT:    kmovw %eax, %k0
 ; AVX512F-NEXT:    knotw %k0, %k1
@@ -137,7 +137,7 @@ define <8 x double> @merge_8f64_f64_1u3u
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: merge_8f64_f64_1u3u5zu8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    movb $32, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k0
 ; AVX512BW-NEXT:    knotw %k0, %k1
@@ -145,7 +145,7 @@ define <8 x double> @merge_8f64_f64_1u3u
 ; AVX512BW-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_8f64_f64_1u3u5zu8:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    movb $32, %cl
 ; X32-AVX512F-NEXT:    kmovw %ecx, %k0
@@ -170,13 +170,13 @@ define <8 x double> @merge_8f64_f64_1u3u
 
 define <8 x i64> @merge_8i64_4i64_z3(<4 x i64>* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_8i64_4i64_z3:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; ALL-NEXT:    vinsertf64x4 $1, 96(%rdi), %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_8i64_4i64_z3:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX512F-NEXT:    vinsertf64x4 $1, 96(%eax), %zmm0, %zmm0
@@ -189,14 +189,14 @@ define <8 x i64> @merge_8i64_4i64_z3(<4
 
 define <8 x i64> @merge_8i64_i64_56zz9uzz(i64* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_8i64_i64_56zz9uzz:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovaps 40(%rdi), %xmm0
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
 ; ALL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_8i64_i64_56zz9uzz:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovaps 40(%eax), %xmm0
 ; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
@@ -220,7 +220,7 @@ define <8 x i64> @merge_8i64_i64_56zz9uz
 
 define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline ssp {
 ; AVX512F-LABEL: merge_8i64_i64_1u3u5zu8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    movb $32, %al
 ; AVX512F-NEXT:    kmovw %eax, %k0
 ; AVX512F-NEXT:    knotw %k0, %k1
@@ -228,7 +228,7 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: merge_8i64_i64_1u3u5zu8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    movb $32, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k0
 ; AVX512BW-NEXT:    knotw %k0, %k1
@@ -236,7 +236,7 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu
 ; AVX512BW-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_8i64_i64_1u3u5zu8:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    movb $32, %cl
 ; X32-AVX512F-NEXT:    kmovw %ecx, %k0
@@ -261,12 +261,12 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu
 
 define <16 x float> @merge_16f32_f32_89zzzuuuuuuuuuuuz(float* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_16f32_f32_89zzzuuuuuuuuuuuz:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_16f32_f32_89zzzuuuuuuuuuuuz:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX512F-NEXT:    retl
@@ -285,12 +285,12 @@ define <16 x float> @merge_16f32_f32_89z
 
 define <16 x float> @merge_16f32_f32_45u7uuuuuuuuuuuu(float* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_16f32_f32_45u7uuuuuuuuuuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovups 16(%rdi), %xmm0
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_16f32_f32_45u7uuuuuuuuuuuu:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovups 16(%eax), %xmm0
 ; X32-AVX512F-NEXT:    retl
@@ -308,12 +308,12 @@ define <16 x float> @merge_16f32_f32_45u
 
 define <16 x float> @merge_16f32_f32_0uu3uuuuuuuuCuEF(float* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_16f32_f32_0uu3uuuuuuuuCuEF:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovups (%rdi), %zmm0
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_16f32_f32_0uu3uuuuuuuuCuEF:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovups (%eax), %zmm0
 ; X32-AVX512F-NEXT:    retl
@@ -337,7 +337,7 @@ define <16 x float> @merge_16f32_f32_0uu
 
 define <16 x float> @merge_16f32_f32_0uu3zzuuuuuzCuEF(float* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_16f32_f32_0uu3zzuuuuuzCuEF:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovups (%rdi), %zmm1
 ; ALL-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; ALL-NEXT:    vmovaps {{.*#+}} zmm0 = <0,u,u,3,20,21,u,u,u,u,u,u,12,29,14,15>
@@ -345,7 +345,7 @@ define <16 x float> @merge_16f32_f32_0uu
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_16f32_f32_0uu3zzuuuuuzCuEF:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovups (%eax), %zmm1
 ; X32-AVX512F-NEXT:    vxorps %xmm2, %xmm2, %xmm2
@@ -375,12 +375,12 @@ define <16 x float> @merge_16f32_f32_0uu
 
 define <16 x i32> @merge_16i32_i32_12zzzuuuuuuuuuuuz(i32* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX512F-NEXT:    retl
@@ -399,12 +399,12 @@ define <16 x i32> @merge_16i32_i32_12zzz
 
 define <16 x i32> @merge_16i32_i32_23u5uuuuuuuuuuuu(i32* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_16i32_i32_23u5uuuuuuuuuuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovups 8(%rdi), %xmm0
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_16i32_i32_23u5uuuuuuuuuuuu:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovups 8(%eax), %xmm0
 ; X32-AVX512F-NEXT:    retl
@@ -422,12 +422,12 @@ define <16 x i32> @merge_16i32_i32_23u5u
 
 define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF(i32* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovups (%rdi), %zmm0
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovups (%eax), %zmm0
 ; X32-AVX512F-NEXT:    retl
@@ -451,7 +451,7 @@ define <16 x i32> @merge_16i32_i32_0uu3u
 
 define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable noinline ssp {
 ; AVX512F-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    movw $8240, %ax # imm = 0x2030
 ; AVX512F-NEXT:    kmovw %eax, %k0
 ; AVX512F-NEXT:    knotw %k0, %k1
@@ -459,7 +459,7 @@ define <16 x i32> @merge_16i32_i32_0uu3z
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    movw $8240, %ax # imm = 0x2030
 ; AVX512BW-NEXT:    kmovd %eax, %k0
 ; AVX512BW-NEXT:    knotw %k0, %k1
@@ -467,7 +467,7 @@ define <16 x i32> @merge_16i32_i32_0uu3z
 ; AVX512BW-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    movw $8240, %cx # imm = 0x2030
 ; X32-AVX512F-NEXT:    kmovw %ecx, %k0
@@ -497,18 +497,18 @@ define <16 x i32> @merge_16i32_i32_0uu3z
 
 define <32 x i16> @merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz(i16* %ptr) nounwind uwtable noinline ssp {
 ; AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX512BW-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
@@ -529,12 +529,12 @@ define <32 x i16> @merge_32i16_i16_12u4u
 
 define <32 x i16> @merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX512F-NEXT:    retl
@@ -552,18 +552,18 @@ define <32 x i16> @merge_32i16_i16_45u7u
 
 define <32 x i16> @merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
 ; AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX512BW-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
@@ -584,18 +584,18 @@ define <32 x i16> @merge_32i16_i16_23uzu
 
 define <64 x i8> @merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz(i8* %ptr) nounwind uwtable noinline ssp {
 ; AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX512BW-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
@@ -622,18 +622,18 @@ define <64 x i8> @merge_64i8_i8_12u4uuu8
 
 define <64 x i8> @merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz(i8* %ptr) nounwind uwtable noinline ssp {
 ; AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX512BW-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
@@ -661,7 +661,7 @@ define <64 x i8> @merge_64i8_i8_12u4uuuu
 
 define <8 x double> @merge_8f64_f64_23uuuuu9_volatile(double* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_8f64_f64_23uuuuu9_volatile:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; ALL-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; ALL-NEXT:    vbroadcastsd 72(%rdi), %ymm1
@@ -669,7 +669,7 @@ define <8 x double> @merge_8f64_f64_23uu
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_8f64_f64_23uuuuu9_volatile:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-AVX512F-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
@@ -690,7 +690,7 @@ define <8 x double> @merge_8f64_f64_23uu
 
 define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile(i32* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; ALL-NEXT:    vpinsrd $3, 12(%rdi), %xmm0, %xmm0
 ; ALL-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -701,7 +701,7 @@ define <16 x i32> @merge_16i32_i32_0uu3u
 ; ALL-NEXT:    retq
 ;
 ; X32-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile:
-; X32-AVX512F:       # BB#0:
+; X32-AVX512F:       # %bb.0:
 ; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-AVX512F-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-AVX512F-NEXT:    vpinsrd $3, 12(%eax), %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/merge-consecutive-stores.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-consecutive-stores.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-consecutive-stores.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-consecutive-stores.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i32 @foo (i64* %so) nounwind uwtable ssp {
 ; CHECK-LABEL: foo:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl $0, 28(%eax)
 ; CHECK-NEXT:    movl $0, 24(%eax)

Modified: llvm/trunk/test/CodeGen/X86/merge-store-constants.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-store-constants.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-store-constants.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-store-constants.ll Mon Dec  4 09:18:51 2017
@@ -4,14 +4,14 @@
 
 define void @big_nonzero_16_bytes(i32* nocapture %a) {
 ; X32-LABEL: big_nonzero_16_bytes:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [1,2,3,4]
 ; X32-NEXT:    vmovups %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: big_nonzero_16_bytes:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [1,2,3,4]
 ; X64-NEXT:    vmovups %xmm0, (%rdi)
 ; X64-NEXT:    retq
@@ -32,14 +32,14 @@ define void @big_nonzero_16_bytes(i32* n
 
 define void @big_nonzero_16_bytes_big64bit_constants(i64* nocapture %a) {
 ; X32-LABEL: big_nonzero_16_bytes_big64bit_constants:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [1,1,1,3]
 ; X32-NEXT:    vmovups %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: big_nonzero_16_bytes_big64bit_constants:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movabsq $4294967297, %rax # imm = 0x100000001
 ; X64-NEXT:    movq %rax, (%rdi)
 ; X64-NEXT:    movabsq $12884901889, %rax # imm = 0x300000001
@@ -56,7 +56,7 @@ define void @big_nonzero_16_bytes_big64b
 
 define void @big_nonzero_32_bytes_splat(i32* nocapture %a) {
 ; X32-LABEL: big_nonzero_32_bytes_splat:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42]
 ; X32-NEXT:    vmovups %ymm0, (%eax)
@@ -64,7 +64,7 @@ define void @big_nonzero_32_bytes_splat(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: big_nonzero_32_bytes_splat:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42]
 ; X64-NEXT:    vmovups %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
@@ -92,7 +92,7 @@ define void @big_nonzero_32_bytes_splat(
 
 define void @big_nonzero_63_bytes(i8* nocapture %a) {
 ; X32-LABEL: big_nonzero_63_bytes:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovaps {{.*#+}} ymm0 = [1,0,2,0,3,0,4,0]
 ; X32-NEXT:    vmovups %ymm0, (%eax)
@@ -107,7 +107,7 @@ define void @big_nonzero_63_bytes(i8* no
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: big_nonzero_63_bytes:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps {{.*#+}} ymm0 = [1,2,3,4]
 ; X64-NEXT:    vmovups %ymm0, (%rdi)
 ; X64-NEXT:    movq $5, 32(%rdi)

Modified: llvm/trunk/test/CodeGen/X86/merge-store-partially-alias-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-store-partially-alias-loads.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-store-partially-alias-loads.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-store-partially-alias-loads.ll Mon Dec  4 09:18:51 2017
@@ -13,7 +13,7 @@
 ; X86-NEXT: movb [[HI1]], 3([[BASEREG]])
 ; X86-NEXT: retq
 
-; DBGDAG-LABEL: Optimized legalized selection DAG: BB#0 'merge_store_partial_overlap_load:'
+; DBGDAG-LABEL: Optimized legalized selection DAG: %bb.0 'merge_store_partial_overlap_load:'
 ; DBGDAG: [[ENTRYTOKEN:t[0-9]+]]: ch = EntryToken
 ; DBGDAG-DAG: [[BASEPTR:t[0-9]+]]: i64,ch = CopyFromReg [[ENTRYTOKEN]],
 ; DBGDAG-DAG: [[ADDPTR:t[0-9]+]]: i64 = add {{(nuw )?}}[[BASEPTR]], Constant:i64<2>

Modified: llvm/trunk/test/CodeGen/X86/merge_store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge_store.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge_store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge_store.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @merge_store(i32* nocapture %a) {
 ; CHECK-LABEL: merge_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    movabsq $4294967297, %rcx # imm = 0x100000001
 ; CHECK-NEXT:    .p2align 4, 0x90
@@ -14,7 +14,7 @@ define void @merge_store(i32* nocapture
 ; CHECK-NEXT:    addq $4, %rax
 ; CHECK-NEXT:    cmpl $1000, %eax # imm = 0x3E8
 ; CHECK-NEXT:    jl .LBB0_1
-; CHECK-NEXT:  # BB#2: # %for.end
+; CHECK-NEXT:  # %bb.2: # %for.end
 ; CHECK-NEXT:    retq
 entry:
   br label %for.body
@@ -43,7 +43,7 @@ entry:
 
 define void @indexed_store_merge(i64 %p, i8* %v) {
 ; CHECK-LABEL: indexed_store_merge:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $0, 2(%rsi,%rdi)
 ; CHECK-NEXT:    movb $0, (%rsi)
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/merge_store_duplicated_loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge_store_duplicated_loads.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge_store_duplicated_loads.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge_store_duplicated_loads.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-linux-gn
 
 define void @merge_double(double* noalias nocapture %st, double* noalias nocapture readonly %ld) #0 {
 ; CHECK-LABEL: merge_double:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; CHECK-NEXT:    movsd %xmm0, (%rdi)
@@ -31,7 +31,7 @@ define void @merge_double(double* noalia
 
 define void @merge_loadstore_int(i64* noalias nocapture readonly %p, i64* noalias nocapture %q) local_unnamed_addr #0 {
 ; CHECK-LABEL: merge_loadstore_int:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    movq 8(%rdi), %rcx
 ; CHECK-NEXT:    movq %rax, (%rsi)
@@ -55,7 +55,7 @@ entry:
 
 define i64 @merge_loadstore_int_with_extra_use(i64* noalias nocapture readonly %p, i64* noalias nocapture %q) local_unnamed_addr #0 {
 ; CHECK-LABEL: merge_loadstore_int_with_extra_use:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    movq 8(%rdi), %rcx
 ; CHECK-NEXT:    movq %rax, (%rsi)

Modified: llvm/trunk/test/CodeGen/X86/mfence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mfence.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mfence.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mfence.ll Mon Dec  4 09:18:51 2017
@@ -6,12 +6,12 @@
 
 define void @test() {
 ; X32-LABEL: test:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    mfence
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    mfence
 ; X64-NEXT:    retq
   fence seq_cst
@@ -20,14 +20,14 @@ define void @test() {
 
 define i32 @fence(i32* %ptr) {
 ; X32-LABEL: fence:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    mfence
 ; X32-NEXT:    movl (%eax), %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: fence:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    mfence
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/misched-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-copy.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-copy.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-copy.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 ; MUL_HiLo PhysReg use copies should be just above the mul.
 ; MUL_HiLo PhysReg def copies should be just below the mul.
 ;
-; CHECK: *** Final schedule for BB#1 ***
+; CHECK: *** Final schedule for %bb.1 ***
 ; CHECK:      %eax<def> = COPY
 ; CHECK-NEXT: MUL32r %{{[0-9]+}}, %eax<imp-def>, %edx<imp-def>, %eflags<imp-def,dead>, %eax<imp-use>;
 ; CHECK-NEXT: COPY %e{{[ad]}}x

Modified: llvm/trunk/test/CodeGen/X86/mmx-arg-passing-x86-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-arg-passing-x86-64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-arg-passing-x86-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-arg-passing-x86-64.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define void @t3() nounwind  {
 ; X86-64-LABEL: t3:
-; X86-64:       ## BB#0:
+; X86-64:       ## %bb.0:
 ; X86-64-NEXT:    movq _g_v8qi@{{.*}}(%rip), %rax
 ; X86-64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X86-64-NEXT:    movb $1, %al
@@ -21,7 +21,7 @@ define void @t3() nounwind  {
 
 define void @t4(x86_mmx %v1, x86_mmx %v2) nounwind  {
 ; X86-64-LABEL: t4:
-; X86-64:       ## BB#0:
+; X86-64:       ## %bb.0:
 ; X86-64-NEXT:    movdq2q %xmm1, %mm0
 ; X86-64-NEXT:    movq %mm0, -{{[0-9]+}}(%rsp)
 ; X86-64-NEXT:    movdq2q %xmm0, %mm0
@@ -41,7 +41,7 @@ define void @t4(x86_mmx %v1, x86_mmx %v2
 
 define void @t5() nounwind  {
 ; X86-64-LABEL: t5:
-; X86-64:       ## BB#0:
+; X86-64:       ## %bb.0:
 ; X86-64-NEXT:    pushq %rax
 ; X86-64-NEXT:    xorl %edi, %edi
 ; X86-64-NEXT:    callq _pass_v1di

Modified: llvm/trunk/test/CodeGen/X86/mmx-arg-passing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-arg-passing.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-arg-passing.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-arg-passing.ll Mon Dec  4 09:18:51 2017
@@ -12,13 +12,13 @@
 
 define void @t1(x86_mmx %v1) nounwind  {
 ; X86-32-LABEL: t1:
-; X86-32:       ## BB#0:
+; X86-32:       ## %bb.0:
 ; X86-32-NEXT:    movl L_u1$non_lazy_ptr, %eax
 ; X86-32-NEXT:    movq %mm0, (%eax)
 ; X86-32-NEXT:    retl
 ;
 ; X86-64-LABEL: t1:
-; X86-64:       ## BB#0:
+; X86-64:       ## %bb.0:
 ; X86-64-NEXT:    movdq2q %xmm0, %mm0
 ; X86-64-NEXT:    movq _u1@{{.*}}(%rip), %rax
 ; X86-64-NEXT:    movq %mm0, (%rax)
@@ -31,7 +31,7 @@ define void @t1(x86_mmx %v1) nounwind  {
 
 define void @t2(<1 x i64> %v1) nounwind  {
 ; X86-32-LABEL: t2:
-; X86-32:       ## BB#0:
+; X86-32:       ## %bb.0:
 ; X86-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-32-NEXT:    movl L_u2$non_lazy_ptr, %edx
@@ -40,7 +40,7 @@ define void @t2(<1 x i64> %v1) nounwind
 ; X86-32-NEXT:    retl
 ;
 ; X86-64-LABEL: t2:
-; X86-64:       ## BB#0:
+; X86-64:       ## %bb.0:
 ; X86-64-NEXT:    movq _u2@{{.*}}(%rip), %rax
 ; X86-64-NEXT:    movq %rdi, (%rax)
 ; X86-64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i64 @t0(x86_mmx* %p) {
 ; CHECK-LABEL: t0:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %mm0
 ; CHECK-NEXT:    paddq %mm0, %mm0
 ; CHECK-NEXT:    movd %mm0, %rax
@@ -16,7 +16,7 @@ define i64 @t0(x86_mmx* %p) {
 
 define i64 @t1(x86_mmx* %p) {
 ; CHECK-LABEL: t1:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %mm0
 ; CHECK-NEXT:    paddd %mm0, %mm0
 ; CHECK-NEXT:    movd %mm0, %rax
@@ -29,7 +29,7 @@ define i64 @t1(x86_mmx* %p) {
 
 define i64 @t2(x86_mmx* %p) {
 ; CHECK-LABEL: t2:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %mm0
 ; CHECK-NEXT:    paddw %mm0, %mm0
 ; CHECK-NEXT:    movd %mm0, %rax
@@ -42,7 +42,7 @@ define i64 @t2(x86_mmx* %p) {
 
 define i64 @t3(x86_mmx* %p) {
 ; CHECK-LABEL: t3:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %mm0
 ; CHECK-NEXT:    paddb %mm0, %mm0
 ; CHECK-NEXT:    movd %mm0, %rax
@@ -57,7 +57,7 @@ define i64 @t3(x86_mmx* %p) {
 
 define void @t4(<1 x i64> %A, <1 x i64> %B) {
 ; CHECK-LABEL: t4:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movd %rdi, %mm0
 ; CHECK-NEXT:    movd %rsi, %mm1
 ; CHECK-NEXT:    paddusw %mm0, %mm1
@@ -76,7 +76,7 @@ entry:
 
 define i64 @t5(i32 %a, i32 %b) nounwind readnone {
 ; CHECK-LABEL: t5:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movd %esi, %xmm0
 ; CHECK-NEXT:    movd %edi, %xmm1
 ; CHECK-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -92,7 +92,7 @@ declare x86_mmx @llvm.x86.mmx.pslli.q(x8
 
 define <1 x i64> @t6(i64 %t) {
 ; CHECK-LABEL: t6:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movd %rdi, %mm0
 ; CHECK-NEXT:    psllq $48, %mm0
 ; CHECK-NEXT:    movd %mm0, %rax

Modified: llvm/trunk/test/CodeGen/X86/mmx-coalescing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-coalescing.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-coalescing.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-coalescing.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 define i32 @test(%SA* %pSA, i16* %A, i32 %B, i32 %C, i32 %D, i8* %E) {
 entry:
 ; CHECK-LABEL: test
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:  pshufw
 ; CHECK-NEXT:  movd
 ; CHECK-NOT:  movd

Modified: llvm/trunk/test/CodeGen/X86/mmx-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-cvt.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-cvt.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define void @cvt_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
 ; X86-LABEL: cvt_v2f64_v2i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -25,7 +25,7 @@ define void @cvt_v2f64_v2i32(<2 x double
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: cvt_v2f64_v2i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtpd2pi %xmm0, %mm0
 ; X64-NEXT:    paddd %mm0, %mm0
 ; X64-NEXT:    movq %mm0, (%rdi)
@@ -43,7 +43,7 @@ define void @cvt_v2f64_v2i32(<2 x double
 
 define void @cvtt_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
 ; X86-LABEL: cvtt_v2f64_v2i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -61,7 +61,7 @@ define void @cvtt_v2f64_v2i32(<2 x doubl
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: cvtt_v2f64_v2i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttpd2pi %xmm0, %mm0
 ; X64-NEXT:    paddd %mm0, %mm0
 ; X64-NEXT:    movq %mm0, (%rdi)
@@ -79,7 +79,7 @@ define void @cvtt_v2f64_v2i32(<2 x doubl
 
 define void @fptosi_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
 ; X86-LABEL: fptosi_v2f64_v2i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -97,7 +97,7 @@ define void @fptosi_v2f64_v2i32(<2 x dou
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fptosi_v2f64_v2i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttpd2pi %xmm0, %mm0
 ; X64-NEXT:    paddd %mm0, %mm0
 ; X64-NEXT:    movq %mm0, (%rdi)
@@ -113,7 +113,7 @@ define void @fptosi_v2f64_v2i32(<2 x dou
 
 define void @cvt_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
 ; X86-LABEL: cvt_v2f32_v2i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -131,7 +131,7 @@ define void @cvt_v2f32_v2i32(<4 x float>
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: cvt_v2f32_v2i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvtps2pi %xmm0, %mm0
 ; X64-NEXT:    paddd %mm0, %mm0
 ; X64-NEXT:    movq %mm0, (%rdi)
@@ -149,7 +149,7 @@ define void @cvt_v2f32_v2i32(<4 x float>
 
 define void @cvtt_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
 ; X86-LABEL: cvtt_v2f32_v2i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -167,7 +167,7 @@ define void @cvtt_v2f32_v2i32(<4 x float
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: cvtt_v2f32_v2i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttps2pi %xmm0, %mm0
 ; X64-NEXT:    paddd %mm0, %mm0
 ; X64-NEXT:    movq %mm0, (%rdi)
@@ -185,7 +185,7 @@ define void @cvtt_v2f32_v2i32(<4 x float
 
 define void @fptosi_v4f32_v4i32(<4 x float>, <1 x i64>*) nounwind {
 ; X86-LABEL: fptosi_v4f32_v4i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -203,7 +203,7 @@ define void @fptosi_v4f32_v4i32(<4 x flo
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fptosi_v4f32_v4i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttps2pi %xmm0, %mm0
 ; X64-NEXT:    paddd %mm0, %mm0
 ; X64-NEXT:    movq %mm0, (%rdi)
@@ -220,7 +220,7 @@ define void @fptosi_v4f32_v4i32(<4 x flo
 
 define void @fptosi_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
 ; X86-LABEL: fptosi_v2f32_v2i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -238,7 +238,7 @@ define void @fptosi_v2f32_v2i32(<4 x flo
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fptosi_v2f32_v2i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cvttps2pi %xmm0, %mm0
 ; X64-NEXT:    paddd %mm0, %mm0
 ; X64-NEXT:    movq %mm0, (%rdi)
@@ -259,7 +259,7 @@ define void @fptosi_v2f32_v2i32(<4 x flo
 
 define <2 x double> @sitofp_v2i32_v2f64(<1 x i64>*) nounwind {
 ; X86-LABEL: sitofp_v2i32_v2f64:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -274,7 +274,7 @@ define <2 x double> @sitofp_v2i32_v2f64(
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: sitofp_v2i32_v2f64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    paddd %mm0, %mm0
 ; X64-NEXT:    movq2dq %mm0, %xmm0
@@ -293,7 +293,7 @@ define <2 x double> @sitofp_v2i32_v2f64(
 
 define <4 x float> @sitofp_v2i32_v2f32(<1 x i64>*) nounwind {
 ; X86-LABEL: sitofp_v2i32_v2f32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -309,7 +309,7 @@ define <4 x float> @sitofp_v2i32_v2f32(<
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: sitofp_v2i32_v2f32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    paddd %mm0, %mm0
 ; X64-NEXT:    movq %mm0, -{{[0-9]+}}(%rsp)
@@ -327,7 +327,7 @@ define <4 x float> @sitofp_v2i32_v2f32(<
 
 define <4 x float> @cvt_v2i32_v2f32(<1 x i64>*) nounwind {
 ; X86-LABEL: cvt_v2i32_v2f32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -343,7 +343,7 @@ define <4 x float> @cvt_v2i32_v2f32(<1 x
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: cvt_v2i32_v2f32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    paddd %mm0, %mm0
 ; X64-NEXT:    movd %mm0, %rax

Modified: llvm/trunk/test/CodeGen/X86/mmx-fold-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mmx-fold-load.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mmx-fold-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mmx-fold-load.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i64 @t0(<1 x i64>* %a, i32* %b) nounwind {
 ; X86-LABEL: t0:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -22,7 +22,7 @@ define i64 @t0(<1 x i64>* %a, i32* %b) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t0:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    movd (%rsi), %mm1
 ; X64-NEXT:    psllq %mm1, %mm0
@@ -40,7 +40,7 @@ declare x86_mmx @llvm.x86.mmx.pslli.q(x8
 
 define i64 @t1(<1 x i64>* %a, i32* %b) nounwind {
 ; X86-LABEL: t1:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -58,7 +58,7 @@ define i64 @t1(<1 x i64>* %a, i32* %b) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t1:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    movd (%rsi), %mm1
 ; X64-NEXT:    psrlq %mm1, %mm0
@@ -76,7 +76,7 @@ declare x86_mmx @llvm.x86.mmx.psrli.q(x8
 
 define i64 @t2(<1 x i64>* %a, i32* %b) nounwind {
 ; X86-LABEL: t2:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -94,7 +94,7 @@ define i64 @t2(<1 x i64>* %a, i32* %b) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    movd (%rsi), %mm1
 ; X64-NEXT:    psllw %mm1, %mm0
@@ -112,7 +112,7 @@ declare x86_mmx @llvm.x86.mmx.pslli.w(x8
 
 define i64 @t3(<1 x i64>* %a, i32* %b) nounwind {
 ; X86-LABEL: t3:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -130,7 +130,7 @@ define i64 @t3(<1 x i64>* %a, i32* %b) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t3:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    movd (%rsi), %mm1
 ; X64-NEXT:    psrlw %mm1, %mm0
@@ -148,7 +148,7 @@ declare x86_mmx @llvm.x86.mmx.psrli.w(x8
 
 define i64 @t4(<1 x i64>* %a, i32* %b) nounwind {
 ; X86-LABEL: t4:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -166,7 +166,7 @@ define i64 @t4(<1 x i64>* %a, i32* %b) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    movd (%rsi), %mm1
 ; X64-NEXT:    pslld %mm1, %mm0
@@ -184,7 +184,7 @@ declare x86_mmx @llvm.x86.mmx.pslli.d(x8
 
 define i64 @t5(<1 x i64>* %a, i32* %b) nounwind {
 ; X86-LABEL: t5:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -202,7 +202,7 @@ define i64 @t5(<1 x i64>* %a, i32* %b) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t5:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    movd (%rsi), %mm1
 ; X64-NEXT:    psrld %mm1, %mm0
@@ -220,7 +220,7 @@ declare x86_mmx @llvm.x86.mmx.psrli.d(x8
 
 define i64 @t6(<1 x i64>* %a, i32* %b) nounwind {
 ; X86-LABEL: t6:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -238,7 +238,7 @@ define i64 @t6(<1 x i64>* %a, i32* %b) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t6:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    movd (%rsi), %mm1
 ; X64-NEXT:    psraw %mm1, %mm0
@@ -256,7 +256,7 @@ declare x86_mmx @llvm.x86.mmx.psrai.w(x8
 
 define i64 @t7(<1 x i64>* %a, i32* %b) nounwind {
 ; X86-LABEL: t7:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -274,7 +274,7 @@ define i64 @t7(<1 x i64>* %a, i32* %b) n
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t7:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    movd (%rsi), %mm1
 ; X64-NEXT:    psrad %mm1, %mm0
@@ -292,7 +292,7 @@ declare x86_mmx @llvm.x86.mmx.psrai.d(x8
 
 define i64 @tt0(x86_mmx %t, x86_mmx* %q) nounwind {
 ; X86-LABEL: tt0:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -308,7 +308,7 @@ define i64 @tt0(x86_mmx %t, x86_mmx* %q)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: tt0:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    paddb (%rdi), %mm0
 ; X64-NEXT:    movd %mm0, %rax
 ; X64-NEXT:    emms
@@ -325,7 +325,7 @@ declare void @llvm.x86.mmx.emms()
 
 define i64 @tt1(x86_mmx %t, x86_mmx* %q) nounwind {
 ; X86-LABEL: tt1:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -341,7 +341,7 @@ define i64 @tt1(x86_mmx %t, x86_mmx* %q)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: tt1:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    paddw (%rdi), %mm0
 ; X64-NEXT:    movd %mm0, %rax
 ; X64-NEXT:    emms
@@ -357,7 +357,7 @@ declare x86_mmx @llvm.x86.mmx.padd.w(x86
 
 define i64 @tt2(x86_mmx %t, x86_mmx* %q) nounwind {
 ; X86-LABEL: tt2:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -373,7 +373,7 @@ define i64 @tt2(x86_mmx %t, x86_mmx* %q)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: tt2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    paddd (%rdi), %mm0
 ; X64-NEXT:    movd %mm0, %rax
 ; X64-NEXT:    emms
@@ -389,7 +389,7 @@ declare x86_mmx @llvm.x86.mmx.padd.d(x86
 
 define i64 @tt3(x86_mmx %t, x86_mmx* %q) nounwind {
 ; X86-LABEL: tt3:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -405,7 +405,7 @@ define i64 @tt3(x86_mmx %t, x86_mmx* %q)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: tt3:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    paddq (%rdi), %mm0
 ; X64-NEXT:    movd %mm0, %rax
 ; X64-NEXT:    emms
@@ -421,7 +421,7 @@ declare x86_mmx @llvm.x86.mmx.padd.q(x86
 
 define i64 @tt4(x86_mmx %t, x86_mmx* %q) nounwind {
 ; X86-LABEL: tt4:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -437,7 +437,7 @@ define i64 @tt4(x86_mmx %t, x86_mmx* %q)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: tt4:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    paddusb (%rdi), %mm0
 ; X64-NEXT:    movd %mm0, %rax
 ; X64-NEXT:    emms
@@ -453,7 +453,7 @@ declare x86_mmx @llvm.x86.mmx.paddus.b(x
 
 define i64 @tt5(x86_mmx %t, x86_mmx* %q) nounwind {
 ; X86-LABEL: tt5:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -469,7 +469,7 @@ define i64 @tt5(x86_mmx %t, x86_mmx* %q)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: tt5:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    paddusw (%rdi), %mm0
 ; X64-NEXT:    movd %mm0, %rax
 ; X64-NEXT:    emms
@@ -485,7 +485,7 @@ declare x86_mmx @llvm.x86.mmx.paddus.w(x
 
 define i64 @tt6(x86_mmx %t, x86_mmx* %q) nounwind {
 ; X86-LABEL: tt6:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -501,7 +501,7 @@ define i64 @tt6(x86_mmx %t, x86_mmx* %q)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: tt6:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psrlw (%rdi), %mm0
 ; X64-NEXT:    movd %mm0, %rax
 ; X64-NEXT:    emms
@@ -517,7 +517,7 @@ declare x86_mmx @llvm.x86.mmx.psrl.w(x86
 
 define i64 @tt7(x86_mmx %t, x86_mmx* %q) nounwind {
 ; X86-LABEL: tt7:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -533,7 +533,7 @@ define i64 @tt7(x86_mmx %t, x86_mmx* %q)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: tt7:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psrld (%rdi), %mm0
 ; X64-NEXT:    movd %mm0, %rax
 ; X64-NEXT:    emms
@@ -549,7 +549,7 @@ declare x86_mmx @llvm.x86.mmx.psrl.d(x86
 
 define i64 @tt8(x86_mmx %t, x86_mmx* %q) nounwind {
 ; X86-LABEL: tt8:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -565,7 +565,7 @@ define i64 @tt8(x86_mmx %t, x86_mmx* %q)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: tt8:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    psrlq (%rdi), %mm0
 ; X64-NEXT:    movd %mm0, %rax
 ; X64-NEXT:    emms
@@ -581,7 +581,7 @@ declare x86_mmx @llvm.x86.mmx.psrl.q(x86
 
 define void @test_psrlq_by_volatile_shift_amount(x86_mmx* %t) nounwind {
 ; X86-LABEL: test_psrlq_by_volatile_shift_amount:
-; X86:       # BB#0: # %entry
+; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
@@ -599,7 +599,7 @@ define void @test_psrlq_by_volatile_shif
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_psrlq_by_volatile_shift_amount:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl $1, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movd -{{[0-9]+}}(%rsp), %mm0
 ; X64-NEXT:    movl $255, %eax




More information about the llvm-commits mailing list