[llvm] r319665 - [CodeGen] Unify MBB reference format in both MIR and debug output

Francis Visoiu Mistrih via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 4 09:18:56 PST 2017


Modified: llvm/trunk/test/CodeGen/PowerPC/testComparesllgtus.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/testComparesllgtus.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/testComparesllgtus.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/testComparesllgtus.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llgtus(i16 zeroext %a, i16 zeroext %b) {
 ; CHECK-LABEL: test_llgtus:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub [[REG:r[0-9]+]], r4, r3
 ; CHECK-NEXT:    rldicl r3, [[REG]], 1, 63
 ; CHECK-NEXT:    blr
@@ -23,7 +23,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llgtus_sext(i16 zeroext %a, i16 zeroext %b) {
 ; CHECK-LABEL: test_llgtus_sext:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub [[REG:r[0-9]+]], r4, r3
 ; CHECK-NEXT:    sradi r3, [[REG]], 63
 ; CHECK-NEXT:    blr
@@ -36,7 +36,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llgtus_z(i16 zeroext %a) {
 ; CHECK-LABEL: test_llgtus_z:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cntlzw r3, r3
 ; CHECK-NEXT:    srwi r3, r3, 5
 ; CHECK-NEXT:    xori r3, r3, 1
@@ -50,7 +50,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llgtus_sext_z(i16 zeroext %a) {
 ; CHECK-LABEL: test_llgtus_sext_z:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cntlzw r3, r3
 ; CHECK-NEXT:    srwi r3, r3, 5
 ; CHECK-NEXT:    xori r3, r3, 1
@@ -65,7 +65,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llgtus_store(i16 zeroext %a, i16 zeroext %b) {
 ; CHECK-LABEL: test_llgtus_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK:         sub [[REG:r[0-9]+]], r4, r3
 ; CHECK:         rldicl {{r[0-9]+}}, [[REG]], 1, 63
 entry:
@@ -78,7 +78,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llgtus_sext_store(i16 zeroext %a, i16 zeroext %b) {
 ; CHECK-LABEL: test_llgtus_sext_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK:         sub [[REG:r[0-9]+]], r4, r3
 ; CHECK:         sradi {{r[0-9]+}}, [[REG]], 63
 entry:
@@ -91,7 +91,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llgtus_z_store(i16 zeroext %a) {
 ; CHECK-LABEL: test_llgtus_z_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    cntlzw r3, r3
 ; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)
@@ -109,7 +109,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llgtus_sext_z_store(i16 zeroext %a) {
 ; CHECK-LABEL: test_llgtus_sext_z_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    cntlzw r3, r3
 ; CHECK-NEXT:    srwi r3, r3, 5

Modified: llvm/trunk/test/CodeGen/PowerPC/testCompareslllesc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/testCompareslllesc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/testCompareslllesc.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/testCompareslllesc.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define i64 @test_lllesc(i8 signext %a, i8 signext %b) {
 ; CHECK-LABEL: test_lllesc:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    rldicl r3, r3, 1, 63
 ; CHECK-NEXT:    xori r3, r3, 1
@@ -23,7 +23,7 @@ entry:
 
 define i64 @test_lllesc_sext(i8 signext %a, i8 signext %b) {
 ; CHECK-LABEL: test_lllesc_sext:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    rldicl r3, r3, 1, 63
 ; CHECK-NEXT:    addi r3, r3, -1
@@ -36,7 +36,7 @@ entry:
 
 define void @test_lllesc_store(i8 signext %a, i8 signext %b) {
 ; CHECK-LABEL: test_lllesc_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    ld r12, .LC0 at toc@l(r5)
@@ -53,7 +53,7 @@ entry:
 
 define void @test_lllesc_sext_store(i8 signext %a, i8 signext %b) {
 ; CHECK-LABEL: test_lllesc_sext_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    ld r12, .LC0 at toc@l(r5)

Modified: llvm/trunk/test/CodeGen/PowerPC/testCompareslllesi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/testCompareslllesi.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/testCompareslllesi.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/testCompareslllesi.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define i64 @test_lllesi(i32 signext %a, i32 signext %b)  {
 ; CHECK-LABEL: test_lllesi:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    rldicl r3, r3, 1, 63
 ; CHECK-NEXT:    xori r3, r3, 1
@@ -23,7 +23,7 @@ entry:
 
 define i64 @test_lllesi_sext(i32 signext %a, i32 signext %b)  {
 ; CHECK-LABEL: test_lllesi_sext:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    rldicl r3, r3, 1, 63
 ; CHECK-NEXT:    addi r3, r3, -1
@@ -36,7 +36,7 @@ entry:
 
 define void @test_lllesi_store(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: test_lllesi_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    ld r12, .LC0 at toc@l(r5)
@@ -53,7 +53,7 @@ entry:
 
 define void @test_lllesi_sext_store(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: test_lllesi_sext_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    ld r12, .LC0 at toc@l(r5)

Modified: llvm/trunk/test/CodeGen/PowerPC/testCompareslllesll.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/testCompareslllesll.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/testCompareslllesll.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/testCompareslllesll.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_lllesll(i64 %a, i64 %b)  {
 ; CHECK-LABEL: test_lllesll:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sradi r5, r4, 63
 ; CHECK-NEXT:    rldicl r6, r3, 1, 63
 ; CHECK-NEXT:    subfc r12, r3, r4
@@ -25,7 +25,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_lllesll_sext(i64 %a, i64 %b)  {
 ; CHECK-LABEL: test_lllesll_sext:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sradi r5, r4, 63
 ; CHECK-NEXT:    rldicl r6, r3, 1, 63
 ; CHECK-NEXT:    subfc r12, r3, r4
@@ -41,7 +41,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_lllesll_z(i64 %a)  {
 ; CHECK-LABEL: test_lllesll_z:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi r4, r3, -1
 ; CHECK-NEXT:    or r3, r4, r3
 ; CHECK-NEXT:    rldicl r3, r3, 1, 63
@@ -55,7 +55,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_lllesll_sext_z(i64 %a)  {
 ; CHECK-LABEL: test_lllesll_sext_z:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi r4, r3, -1
 ; CHECK-NEXT:    or r3, r4, r3
 ; CHECK-NEXT:    sradi r3, r3, 63
@@ -69,7 +69,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_lllesll_store(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_lllesll_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK:    sradi r6, r4, 63
 ; CHECK:    subfc r4, r3, r4
 ; CHECK:    rldicl r3, r3, 1, 63
@@ -86,7 +86,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_lllesll_sext_store(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_lllesll_sext_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK:    sradi r6, r4, 63
 ; CHECK-DAG:    rldicl r3, r3, 1, 63
 ; CHECK-DAG:    subfc r4, r3, r4
@@ -104,7 +104,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_lllesll_z_store(i64 %a) {
 ; CHECK-LABEL: test_lllesll_z_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    addi r5, r3, -1
 ; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)
@@ -122,7 +122,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_lllesll_sext_z_store(i64 %a) {
 ; CHECK-LABEL: test_lllesll_sext_z_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    addi r5, r3, -1
 ; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)

Modified: llvm/trunk/test/CodeGen/PowerPC/testComparesllless.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/testComparesllless.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/testComparesllless.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/testComparesllless.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define i64 @test_llless(i16 signext %a, i16 signext %b)  {
 ; CHECK-LABEL: test_llless:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    rldicl r3, r3, 1, 63
 ; CHECK-NEXT:    xori r3, r3, 1
@@ -23,7 +23,7 @@ entry:
 
 define i64 @test_llless_sext(i16 signext %a, i16 signext %b)  {
 ; CHECK-LABEL: test_llless_sext:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    rldicl r3, r3, 1, 63
 ; CHECK-NEXT:    addi r3, r3, -1
@@ -36,7 +36,7 @@ entry:
 
 define void @test_llless_store(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: test_llless_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    ld r12, .LC0 at toc@l(r5)
@@ -53,7 +53,7 @@ entry:
 
 define void @test_llless_sext_store(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: test_llless_sext_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    sub r3, r4, r3
 ; CHECK-NEXT:    ld r12, .LC0 at toc@l(r5)

Modified: llvm/trunk/test/CodeGen/PowerPC/testComparesllltsll.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/testComparesllltsll.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/testComparesllltsll.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/testComparesllltsll.ll Mon Dec  4 09:18:51 2017
@@ -11,7 +11,7 @@
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llltsll(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llltsll:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sradi [[REG1:r[0-9]+]], r3, 63
 ; CHECK-NEXT:    rldicl [[REG2:r[0-9]+]], r4, 1, 63
 ; CHECK-NEXT:    subfc [[REG3:r[0-9]+]], r4, r3
@@ -27,7 +27,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llltsll_sext(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llltsll_sext:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sradi [[REG1:r[0-9]+]], r3, 63
 ; CHECK-NEXT:    rldicl [[REG2:r[0-9]+]], r4, 1, 63
 ; CHECK-NEXT:    subfc [[REG3:r[0-9]+]], r4, r3
@@ -44,7 +44,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llltsll_sext_z(i64 %a) {
 ; CHECK-LABEL: test_llltsll_sext_z:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sradi r3, r3, 63
 ; CHECK-NEXT:    blr
 entry:
@@ -56,7 +56,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llltsll_store(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llltsll_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK:         sradi [[REG1:r[0-9]+]], r3, 63
 ; CHECK:         rldicl [[REG2:r[0-9]+]], r4, 1, 63
 ; CHECK-DIAG:    subfc [[REG3:r[0-9]+]], r4, r3
@@ -73,7 +73,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llltsll_sext_store(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llltsll_sext_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK:         sradi [[REG1:r[0-9]+]], r3, 63
 ; CHECK:         rldicl [[REG2:r[0-9]+]], r4, 1, 63
 ; CHECK-DIAG:    subfc [[REG3:r[0-9]+]], r4, r3

Modified: llvm/trunk/test/CodeGen/PowerPC/testComparesllltuc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/testComparesllltuc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/testComparesllltuc.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/testComparesllltuc.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llltuc(i8 zeroext %a, i8 zeroext %b) {
 ; CHECK-LABEL: test_llltuc:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub [[REG:r[0-9]+]], r3, r4
 ; CHECK-NEXT:    rldicl r3, [[REG]], 1, 63
 ; CHECK-NEXT:    blr
@@ -23,7 +23,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llltuc_sext(i8 zeroext %a, i8 zeroext %b) {
 ; CHECK-LABEL: test_llltuc_sext:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub [[REG:r[0-9]+]], r3, r4
 ; CHECK-NEXT:    sradi r3, [[REG]], 63
 ; CHECK-NEXT:    blr
@@ -36,7 +36,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llltuc_store(i8 zeroext %a, i8 zeroext %b) {
 ; CHECK-LABEL: test_llltuc_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK:         sub [[REG:r[2-9]+]], r3, r4
 ; CHECK:         rldicl {{r[0-9]+}}, [[REG]], 1, 63
 entry:
@@ -49,7 +49,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llltuc_sext_store(i8 zeroext %a, i8 zeroext %b) {
 ; CHECK-LABEL: test_llltuc_sext_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK:         sub [[REG:r[0-9]+]], r3, r4
 ; CHECK:         sradi {{r[0-9]+}}, [[REG]], 63
 entry:

Modified: llvm/trunk/test/CodeGen/PowerPC/testComparesllltui.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/testComparesllltui.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/testComparesllltui.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/testComparesllltui.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llltui(i32 zeroext %a, i32 zeroext %b) {
 ; CHECK-LABEL: test_llltui:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NOT:     clrldi
 ; CHECK-NEXT:    sub [[REG:r[0-9]+]], r3, r4
 ; CHECK-NEXT:    rldicl r3, [[REG]], 1, 63
@@ -24,7 +24,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llltui_sext(i32 zeroext %a, i32 zeroext %b) {
 ; CHECK-LABEL: test_llltui_sext:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub [[REG:r[0-9]+]], r3, r4
 ; CHECK-NEXT:    sradi r3, [[REG]], 63
 ; CHECK-NEXT:    blr
@@ -37,7 +37,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llltui_z(i32 zeroext %a) {
 ; CHECK-LABEL: test_llltui_z:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li r3, 0
 ; CHECK-NEXT:    blr
 entry:
@@ -47,7 +47,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llltui_sext_z(i32 zeroext %a) {
 ; CHECK-LABEL: test_llltui_sext_z:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li r3, 0
 ; CHECK-NEXT:    blr
 entry:
@@ -57,7 +57,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llltui_store(i32 zeroext %a, i32 zeroext %b) {
 ; CHECK-LABEL: test_llltui_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NOT:     clrldi
 ; CHECK:         sub [[REG:r[2-9]+]], r3, r4
 ; CHECK:         rldicl {{r[0-9]+}}, [[REG]], 1, 63
@@ -71,7 +71,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llltui_sext_store(i32 zeroext %a, i32 zeroext %b) {
 ; CHECK-LABEL: test_llltui_sext_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NOT:     clrldi
 ; CHECK:         sub [[REG:r[0-9]+]], r3, r4
 ; CHECK:         sradi {{r[0-9]+}}, [[REG]], 63
@@ -85,7 +85,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llltui_z_store(i32 zeroext %a) {
 ; CHECK-LABEL: test_llltui_z_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK:         li [[REG:r[0-9]+]], 0
 ; CHECK:         stw [[REG]], 0(r3)
 ; CHECK-NEXT:    blr
@@ -97,7 +97,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llltui_sext_z_store(i32 zeroext %a) {
 ; CHECK-LABEL: test_llltui_sext_z_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK:         li [[REG:r[0-9]+]], 0
 ; CHECK:         stw [[REG]], 0(r3)
 ; CHECK-NEXT:    blr

Modified: llvm/trunk/test/CodeGen/PowerPC/testComparesllltus.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/testComparesllltus.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/testComparesllltus.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/testComparesllltus.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llltus(i16 zeroext %a, i16 zeroext %b) {
 ; CHECK-LABEL: test_llltus:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub [[REG:r[0-9]+]], r3, r4
 ; CHECK-NEXT:    rldicl r3, [[REG]], 1, 63
 ; CHECK-NEXT:    blr
@@ -23,7 +23,7 @@ entry:
 ; Function Attrs: norecurse nounwind readnone
 define i64 @test_llltus_sext(i16 zeroext %a, i16 zeroext %b) {
 ; CHECK-LABEL: test_llltus_sext:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    sub [[REG:r[0-9]+]], r3, r4
 ; CHECK-NEXT:    sradi r3, [[REG]], 63
 ; CHECK-NEXT:    blr
@@ -48,7 +48,7 @@ entry:
 ; Function Attrs: norecurse nounwind
 define void @test_llltus_sext_store(i16 zeroext %a, i16 zeroext %b) {
 ; CHECK-LABEL: test_llltus_sext_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK:         sub [[REG:r[0-9]+]], r3, r4
 ; CHECK:         sradi {{r[0-9]+}}, [[REG]], 63
 entry:

Modified: llvm/trunk/test/CodeGen/PowerPC/testComparesllnesll.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/testComparesllnesll.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/testComparesllnesll.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/testComparesllnesll.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define i64 @test_llnesll(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llnesll:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xor r3, r3, r4
 ; CHECK-NEXT:    addic r4, r3, -1
 ; CHECK-NEXT:    subfe r3, r4, r3
@@ -23,7 +23,7 @@ entry:
 
 define i64 @test_llnesll_sext(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llnesll_sext:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xor r3, r3, r4
 ; CHECK-NEXT:    subfic r3, r3, 0
 ; CHECK-NEXT:    subfe r3, r3, r3
@@ -36,7 +36,7 @@ entry:
 
 define i64 @test_llnesll_z(i64 %a) {
 ; CHECK-LABEL: test_llnesll_z:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addic r4, r3, -1
 ; CHECK-NEXT:    subfe r3, r4, r3
 ; CHECK-NEXT:    blr
@@ -48,7 +48,7 @@ entry:
 
 define i64 @test_llnesll_sext_z(i64 %a) {
 ; CHECK-LABEL: test_llnesll_sext_z:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subfic r3, r3, 0
 ; CHECK-NEXT:    subfe r3, r3, r3
 ; CHECK-NEXT:    blr
@@ -60,7 +60,7 @@ entry:
 
 define void @test_llnesll_store(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llnesll_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    xor r3, r3, r4
 ; CHECK-NEXT:    ld r12, .LC0 at toc@l(r5)
@@ -77,7 +77,7 @@ entry:
 
 define void @test_llnesll_sext_store(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llnesll_sext_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    xor r3, r3, r4
 ; CHECK-NEXT:    ld r12, .LC0 at toc@l(r5)
@@ -94,7 +94,7 @@ entry:
 
 define void @test_llnesll_z_store(i64 %a) {
 ; CHECK-LABEL: test_llnesll_z_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    addic r5, r3, -1
 ; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)
@@ -110,7 +110,7 @@ entry:
 
 define void @test_llnesll_sext_z_store(i64 %a) {
 ; CHECK-LABEL: test_llnesll_sext_z_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    subfic r3, r3, 0
 ; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)

Modified: llvm/trunk/test/CodeGen/PowerPC/testComparesllneull.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/testComparesllneull.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/testComparesllneull.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/testComparesllneull.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define i64 @test_llneull(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llneull:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xor r3, r3, r4
 ; CHECK-NEXT:    addic r4, r3, -1
 ; CHECK-NEXT:    subfe r3, r4, r3
@@ -23,7 +23,7 @@ entry:
 
 define i64 @test_llneull_sext(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llneull_sext:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xor r3, r3, r4
 ; CHECK-NEXT:    subfic r3, r3, 0
 ; CHECK-NEXT:    subfe r3, r3, r3
@@ -36,7 +36,7 @@ entry:
 
 define i64 @test_llneull_z(i64 %a) {
 ; CHECK-LABEL: test_llneull_z:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addic r4, r3, -1
 ; CHECK-NEXT:    subfe r3, r4, r3
 ; CHECK-NEXT:    blr
@@ -48,7 +48,7 @@ entry:
 
 define i64 @test_llneull_sext_z(i64 %a) {
 ; CHECK-LABEL: test_llneull_sext_z:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    subfic r3, r3, 0
 ; CHECK-NEXT:    subfe r3, r3, r3
 ; CHECK-NEXT:    blr
@@ -60,7 +60,7 @@ entry:
 
 define void @test_llneull_store(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llneull_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    xor r3, r3, r4
 ; CHECK-NEXT:    ld r12, .LC0 at toc@l(r5)
@@ -77,7 +77,7 @@ entry:
 
 define void @test_llneull_sext_store(i64 %a, i64 %b) {
 ; CHECK-LABEL: test_llneull_sext_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r5, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    xor r3, r3, r4
 ; CHECK-NEXT:    ld r12, .LC0 at toc@l(r5)
@@ -94,7 +94,7 @@ entry:
 
 define void @test_llneull_z_store(i64 %a) {
 ; CHECK-LABEL: test_llneull_z_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    addic r5, r3, -1
 ; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)
@@ -110,7 +110,7 @@ entry:
 
 define void @test_llneull_sext_z_store(i64 %a) {
 ; CHECK-LABEL: test_llneull_sext_z_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addis r4, r2, .LC0 at toc@ha
 ; CHECK-NEXT:    subfic r3, r3, 0
 ; CHECK-NEXT:    ld r4, .LC0 at toc@l(r4)

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_add_sub_quadword.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_add_sub_quadword.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_add_sub_quadword.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_add_sub_quadword.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@ define <1 x i128> @out_of_bounds_inserte
        %result = add <1 x i128> %x, %tmpvec
        ret <1 x i128> %result
 ; CHECK-LABEL: @out_of_bounds_insertelement
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
 ; CHECK-NEXT: blr
 }
 

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9.ll Mon Dec  4 09:18:51 2017
@@ -4,12 +4,12 @@
 
 define zeroext i8 @test1(<16 x i8> %a, i32 signext %index) {
 ; CHECK-LE-LABEL: test1:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vextubrx 3, 5, 2
 ; CHECK-LE-NEXT:    clrldi 3, 3, 56
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test1:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vextublx 3, 5, 2
 ; CHECK-BE-NEXT:    clrldi 3, 3, 56
 ; CHECK-BE-NEXT:    blr
@@ -21,12 +21,12 @@ entry:
 
 define signext i8 @test2(<16 x i8> %a, i32 signext %index) {
 ; CHECK-LE-LABEL: test2:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vextubrx 3, 5, 2
 ; CHECK-LE-NEXT:    extsb 3, 3
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test2:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vextublx 3, 5, 2
 ; CHECK-BE-NEXT:    extsb 3, 3
 ; CHECK-BE-NEXT:    blr
@@ -38,13 +38,13 @@ entry:
 
 define zeroext i16 @test3(<8 x i16> %a, i32 signext %index) {
 ; CHECK-LE-LABEL: test3:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    rlwinm 3, 5, 1, 28, 30
 ; CHECK-LE-NEXT:    vextuhrx 3, 3, 2
 ; CHECK-LE-NEXT:    clrldi 3, 3, 48
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test3:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    rlwinm 3, 5, 1, 28, 30
 ; CHECK-BE-NEXT:    vextuhlx 3, 3, 2
 ; CHECK-BE-NEXT:    clrldi 3, 3, 48
@@ -57,13 +57,13 @@ entry:
 
 define signext i16 @test4(<8 x i16> %a, i32 signext %index) {
 ; CHECK-LE-LABEL: test4:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    rlwinm 3, 5, 1, 28, 30
 ; CHECK-LE-NEXT:    vextuhrx 3, 3, 2
 ; CHECK-LE-NEXT:    extsh 3, 3
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test4:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    rlwinm 3, 5, 1, 28, 30
 ; CHECK-BE-NEXT:    vextuhlx 3, 3, 2
 ; CHECK-BE-NEXT:    extsh 3, 3
@@ -76,12 +76,12 @@ entry:
 
 define zeroext i32 @test5(<4 x i32> %a, i32 signext %index) {
 ; CHECK-LE-LABEL: test5:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    rlwinm 3, 5, 2, 28, 29
 ; CHECK-LE-NEXT:    vextuwrx 3, 3, 2
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test5:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    rlwinm 3, 5, 2, 28, 29
 ; CHECK-BE-NEXT:    vextuwlx 3, 3, 2
 ; CHECK-BE-NEXT:    blr
@@ -93,13 +93,13 @@ entry:
 
 define signext i32 @test6(<4 x i32> %a, i32 signext %index) {
 ; CHECK-LE-LABEL: test6:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    rlwinm 3, 5, 2, 28, 29
 ; CHECK-LE-NEXT:    vextuwrx 3, 3, 2
 ; CHECK-LE-NEXT:    extsw 3, 3
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test6:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    rlwinm 3, 5, 2, 28, 29
 ; CHECK-BE-NEXT:    vextuwlx 3, 3, 2
 ; CHECK-BE-NEXT:    extsw 3, 3
@@ -113,13 +113,13 @@ entry:
 ; Test with immediate index
 define zeroext i8 @test7(<16 x i8> %a) {
 ; CHECK-LE-LABEL: test7:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    li 3, 1
 ; CHECK-LE-NEXT:    vextubrx 3, 3, 2
 ; CHECK-LE-NEXT:    clrldi 3, 3, 56
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test7:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    li 3, 1
 ; CHECK-BE-NEXT:    vextublx 3, 3, 2
 ; CHECK-BE-NEXT:    clrldi 3, 3, 56
@@ -132,13 +132,13 @@ entry:
 
 define zeroext i16 @test8(<8 x i16> %a) {
 ; CHECK-LE-LABEL: test8:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    li 3, 2
 ; CHECK-LE-NEXT:    vextuhrx 3, 3, 2
 ; CHECK-LE-NEXT:    clrldi 3, 3, 48
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test8:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    li 3, 2
 ; CHECK-BE-NEXT:    vextuhlx 3, 3, 2
 ; CHECK-BE-NEXT:    clrldi 3, 3, 48
@@ -151,12 +151,12 @@ entry:
 
 define zeroext i32 @test9(<4 x i32> %a) {
 ; CHECK-LE-LABEL: test9:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    li 3, 12
 ; CHECK-LE-NEXT:    vextuwrx 3, 3, 2
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test9:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    li 3, 12
 ; CHECK-BE-NEXT:    vextuwlx 3, 3, 2
 ; CHECK-BE-NEXT:    blr

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9_2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9_2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9_2.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9_2.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define zeroext i8 @test_add1(<16 x i8> %a, i32 signext %index, i8 zeroext %c) {
 ; CHECK-LE-LABEL: test_add1:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vextubrx 3, 5, 2
 ; CHECK-LE-NEXT:    add 3, 3, 6
 ; CHECK-LE-NEXT:    rlwinm 3, 3, 0, 24, 31
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test_add1:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vextublx 3, 5, 2
 ; CHECK-BE-NEXT:    add 3, 3, 6
 ; CHECK-BE-NEXT:    rlwinm 3, 3, 0, 24, 31
@@ -26,13 +26,13 @@ entry:
 
 define signext i8 @test_add2(<16 x i8> %a, i32 signext %index, i8 signext %c) {
 ; CHECK-LE-LABEL: test_add2:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vextubrx 3, 5, 2
 ; CHECK-LE-NEXT:    add 3, 3, 6
 ; CHECK-LE-NEXT:    extsb 3, 3
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test_add2:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vextublx 3, 5, 2
 ; CHECK-BE-NEXT:    add 3, 3, 6
 ; CHECK-BE-NEXT:    extsb 3, 3
@@ -48,14 +48,14 @@ entry:
 
 define zeroext i16 @test_add3(<8 x i16> %a, i32 signext %index, i16 zeroext %c) {
 ; CHECK-LE-LABEL: test_add3:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    rlwinm 3, 5, 1, 28, 30
 ; CHECK-LE-NEXT:    vextuhrx 3, 3, 2
 ; CHECK-LE-NEXT:    add 3, 3, 6
 ; CHECK-LE-NEXT:    rlwinm 3, 3, 0, 16, 31
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test_add3:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    rlwinm 3, 5, 1, 28, 30
 ; CHECK-BE-NEXT:    vextuhlx 3, 3, 2
 ; CHECK-BE-NEXT:    add 3, 3, 6
@@ -72,14 +72,14 @@ entry:
 
 define signext i16 @test_add4(<8 x i16> %a, i32 signext %index, i16 signext %c) {
 ; CHECK-LE-LABEL: test_add4:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    rlwinm 3, 5, 1, 28, 30
 ; CHECK-LE-NEXT:    vextuhrx 3, 3, 2
 ; CHECK-LE-NEXT:    add 3, 3, 6
 ; CHECK-LE-NEXT:    extsh 3, 3
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test_add4:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    rlwinm 3, 5, 1, 28, 30
 ; CHECK-BE-NEXT:    vextuhlx 3, 3, 2
 ; CHECK-BE-NEXT:    add 3, 3, 6
@@ -96,14 +96,14 @@ entry:
 
 define zeroext i32 @test_add5(<4 x i32> %a, i32 signext %index, i32 zeroext %c) {
 ; CHECK-LE-LABEL: test_add5:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    rlwinm 3, 5, 2, 28, 29
 ; CHECK-LE-NEXT:    vextuwrx 3, 3, 2
 ; CHECK-LE-NEXT:    add 3, 3, 6
 ; CHECK-LE-NEXT:    clrldi 3, 3, 32
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test_add5:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    rlwinm 3, 5, 2, 28, 29
 ; CHECK-BE-NEXT:    vextuwlx 3, 3, 2
 ; CHECK-BE-NEXT:    add 3, 3, 6
@@ -117,14 +117,14 @@ entry:
 
 define signext i32 @test_add6(<4 x i32> %a, i32 signext %index, i32 signext %c) {
 ; CHECK-LE-LABEL: test_add6:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    rlwinm 3, 5, 2, 28, 29
 ; CHECK-LE-NEXT:    vextuwrx 3, 3, 2
 ; CHECK-LE-NEXT:    add 3, 3, 6
 ; CHECK-LE-NEXT:    extsw 3, 3
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test_add6:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    rlwinm 3, 5, 2, 28, 29
 ; CHECK-BE-NEXT:    vextuwlx 3, 3, 2
 ; CHECK-BE-NEXT:    add 3, 3, 6
@@ -139,11 +139,11 @@ entry:
 ; When extracting word element 2 on LE, it's better to use mfvsrwz rather than vextuwrx
 define zeroext i32 @test7(<4 x i32> %a) {
 ; CHECK-LE-LABEL: test7:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    mfvsrwz 3, 34
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test7:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    li 3, 8
 ; CHECK-BE-NEXT:    vextuwlx 3, 3, 2
 ; CHECK-BE-NEXT:    blr
@@ -154,13 +154,13 @@ entry:
 
 define zeroext i32 @testadd_7(<4 x i32> %a, i32 zeroext %c) {
 ; CHECK-LE-LABEL: testadd_7:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    mfvsrwz 3, 34
 ; CHECK-LE-NEXT:    add 3, 3, 5
 ; CHECK-LE-NEXT:    clrldi 3, 3, 32
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: testadd_7:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    li 3, 8
 ; CHECK-BE-NEXT:    vextuwlx 3, 3, 2
 ; CHECK-BE-NEXT:    add 3, 3, 5
@@ -174,12 +174,12 @@ entry:
 
 define signext i32 @test8(<4 x i32> %a) {
 ; CHECK-LE-LABEL: test8:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    mfvsrwz 3, 34
 ; CHECK-LE-NEXT:    extsw 3, 3
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test8:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    li 3, 8
 ; CHECK-BE-NEXT:    vextuwlx 3, 3, 2
 ; CHECK-BE-NEXT:    extsw 3, 3
@@ -191,13 +191,13 @@ entry:
 
 define signext i32 @testadd_8(<4 x i32> %a, i32 signext %c) {
 ; CHECK-LE-LABEL: testadd_8:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    mfvsrwz 3, 34
 ; CHECK-LE-NEXT:    add 3, 3, 5
 ; CHECK-LE-NEXT:    extsw 3, 3
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: testadd_8:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    li 3, 8
 ; CHECK-BE-NEXT:    vextuwlx 3, 3, 2
 ; CHECK-BE-NEXT:    add 3, 3, 5
@@ -212,13 +212,13 @@ entry:
 ; When extracting word element 1 on BE, it's better to use mfvsrwz rather than vextuwlx
 define signext i32 @test9(<4 x i32> %a) {
 ; CHECK-LE-LABEL: test9:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    li 3, 4
 ; CHECK-LE-NEXT:    vextuwrx 3, 3, 2
 ; CHECK-LE-NEXT:    extsw 3, 3
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: test9:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mfvsrwz 3, 34
 ; CHECK-BE-NEXT:    extsw 3, 3
 ; CHECK-BE-NEXT:    blr
@@ -229,14 +229,14 @@ entry:
 
 define signext i32 @testadd_9(<4 x i32> %a, i32 signext %c) {
 ; CHECK-LE-LABEL: testadd_9:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    li 3, 4
 ; CHECK-LE-NEXT:    vextuwrx 3, 3, 2
 ; CHECK-LE-NEXT:    add 3, 3, 5
 ; CHECK-LE-NEXT:    extsw 3, 3
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: testadd_9:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    mfvsrwz 3, 34
 ; CHECK-BE-NEXT:    add 3, 3, 5
 ; CHECK-BE-NEXT:    extsw 3, 3

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_int_ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_int_ext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_int_ext.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_int_ext.ll Mon Dec  4 09:18:51 2017
@@ -4,11 +4,11 @@
 
 define <4 x i32> @vextsb2wLE(<16 x i8> %a) {
 ; CHECK-LE-LABEL: vextsb2wLE:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vextsb2w 2, 2
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: vextsb2wLE:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE:         vperm 2, 2, 2, 3
 ; CHECK-BE-NEXT:    vextsb2w 2, 2
 ; CHECK-BE-NEXT:    blr
@@ -31,11 +31,11 @@ entry:
 
 define <2 x i64> @vextsb2dLE(<16 x i8> %a) {
 ; CHECK-LE-LABEL: vextsb2dLE:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vextsb2d 2, 2
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: vextsb2dLE:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE:         vperm 2, 2, 2, 3
 ; CHECK-BE-NEXT:    vextsb2d 2, 2
 ; CHECK-BE-NEXT:    blr
@@ -52,11 +52,11 @@ entry:
 
 define <4 x i32> @vextsh2wLE(<8 x i16> %a) {
 ; CHECK-LE-LABEL: vextsh2wLE:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vextsh2w 2, 2
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: vextsh2wLE:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE:         vperm 2, 2, 2, 3
 ; CHECK-BE-NEXT:    vextsh2w 2, 2
 ; CHECK-BE-NEXT:    blr
@@ -79,11 +79,11 @@ entry:
 
 define <2 x i64> @vextsh2dLE(<8 x i16> %a) {
 ; CHECK-LE-LABEL: vextsh2dLE:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vextsh2d 2, 2
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: vextsh2dLE:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE:         vperm 2, 2, 2, 3
 ; CHECK-BE-NEXT:    vextsh2d 2, 2
 ; CHECK-BE-NEXT:    blr
@@ -100,11 +100,11 @@ entry:
 
 define <2 x i64> @vextsw2dLE(<4 x i32> %a) {
 ; CHECK-LE-LABEL: vextsw2dLE:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vextsw2d 2, 2
 ; CHECK-LE-NEXT:    blr
 ; CHECK-BE-LABEL: vextsw2dLE:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE:         vmrgew
 ; CHECK-BE-NEXT:    vextsw2d 2, 2
 ; CHECK-BE-NEXT:    blr
@@ -121,11 +121,11 @@ entry:
 
 define <4 x i32> @vextsb2wBE(<16 x i8> %a) {
 ; CHECK-BE-LABEL: vextsb2wBE:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vextsb2w 2, 2
 ; CHECK-BE-NEXT:    blr
 ; CHECK-LE-LABEL: vextsb2wBE:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vsldoi 2, 2, 2, 13
 ; CHECK-LE-NEXT:    vextsb2w 2, 2
 ; CHECK-LE-NEXT:    blr
@@ -147,11 +147,11 @@ entry:
 
 define <2 x i64> @vextsb2dBE(<16 x i8> %a) {
 ; CHECK-BE-LABEL: vextsb2dBE:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vextsb2d 2, 2
 ; CHECK-BE-NEXT:    blr
 ; CHECK-LE-LABEL: vextsb2dBE:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vsldoi 2, 2, 2, 9
 ; CHECK-LE-NEXT:    vextsb2d 2, 2
 ; CHECK-LE-NEXT:    blr
@@ -167,11 +167,11 @@ entry:
 
 define <4 x i32> @vextsh2wBE(<8 x i16> %a) {
 ; CHECK-BE-LABEL: vextsh2wBE:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vextsh2w 2, 2
 ; CHECK-BE-NEXT:    blr
 ; CHECK-LE-LABEL: vextsh2wBE:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vsldoi 2, 2, 2, 14
 ; CHECK-LE-NEXT:    vextsh2w 2, 2
 ; CHECK-LE-NEXT:    blr
@@ -193,11 +193,11 @@ entry:
 
 define <2 x i64> @vextsh2dBE(<8 x i16> %a) {
 ; CHECK-BE-LABEL: vextsh2dBE:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vextsh2d 2, 2
 ; CHECK-BE-NEXT:    blr
 ; CHECK-LE-LABEL: vextsh2dBE:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vsldoi 2, 2, 2, 10
 ; CHECK-LE-NEXT:    vextsh2d 2, 2
 ; CHECK-LE-NEXT:    blr
@@ -213,11 +213,11 @@ entry:
 
 define <2 x i64> @vextsw2dBE(<4 x i32> %a) {
 ; CHECK-BE-LABEL: vextsw2dBE:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NEXT:    vextsw2d 2, 2
 ; CHECK-BE-NEXT:    blr
 ; CHECK-LE-LABEL: vextsw2dBE:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NEXT:    vsldoi 2, 2, 2, 12
 ; CHECK-LE-NEXT:    vextsw2d 2, 2
 ; CHECK-LE-NEXT:    blr
@@ -233,11 +233,11 @@ entry:
 
 define <2 x i64> @vextDiffVectors(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LE-LABEL: vextDiffVectors:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NOT:     vextsw2d
 
 ; CHECK-BE-LABEL: vextDiffVectors:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NOT:     vextsw2d
 entry:
   %vecext = extractelement <4 x i32> %a, i32 0
@@ -252,11 +252,11 @@ entry:
 define <8 x i16> @testInvalidExtend(<16 x i8> %a) {
 entry:
 ; CHECK-LE-LABEL: testInvalidExtend:
-; CHECK-LE:       # BB#0: # %entry
+; CHECK-LE:       # %bb.0: # %entry
 ; CHECK-LE-NOT:     vexts
 
 ; CHECK-BE-LABEL: testInvalidExtend:
-; CHECK-BE:       # BB#0: # %entry
+; CHECK-BE:       # %bb.0: # %entry
 ; CHECK-BE-NOT:     vexts
 
   %vecext = extractelement <16 x i8> %a, i32 0

Modified: llvm/trunk/test/CodeGen/PowerPC/vec_revb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vec_revb.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vec_revb.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vec_revb.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <8 x i16> @testXXBRH(<8 x i16> %a) {
 ; CHECK-LABEL: testXXBRH:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xxbrh 34, 34
 ; CHECK-NEXT:    blr
 
@@ -16,7 +16,7 @@ entry:
 
 define <4 x i32> @testXXBRW(<4 x i32> %a) {
 ; CHECK-LABEL: testXXBRW:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xxbrw 34, 34
 ; CHECK-NEXT:    blr
 
@@ -29,7 +29,7 @@ entry:
 
 define <2 x double> @testXXBRD(<2 x double> %a) {
 ; CHECK-LABEL: testXXBRD:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xxbrd 34, 34
 ; CHECK-NEXT:    blr
 
@@ -42,7 +42,7 @@ entry:
 
 define <1 x i128> @testXXBRQ(<1 x i128> %a) {
 ; CHECK-LABEL: testXXBRQ:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xxbrq 34, 34
 ; CHECK-NEXT:    blr
 

Modified: llvm/trunk/test/CodeGen/PowerPC/vselect-constants.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/vselect-constants.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/vselect-constants.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/vselect-constants.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 
 define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) {
 ; CHECK-LABEL: sel_C1_or_C2_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vspltisw 3, -16
 ; CHECK-NEXT:    vspltisw 4, 15
 ; CHECK-NEXT:    addis 3, 2, .LCPI0_0 at toc@ha
@@ -29,7 +29,7 @@ define <4 x i32> @sel_C1_or_C2_vec(<4 x
 
 define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) {
 ; CHECK-LABEL: cmp_sel_C1_or_C2_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpequw 2, 2, 3
 ; CHECK-NEXT:    addis 3, 2, .LCPI1_0 at toc@ha
 ; CHECK-NEXT:    addis 4, 2, .LCPI1_1 at toc@ha
@@ -46,7 +46,7 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<
 
 define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) {
 ; CHECK-LABEL: sel_Cplus1_or_C_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vspltisw 3, 1
 ; CHECK-NEXT:    addis 3, 2, .LCPI2_0 at toc@ha
 ; CHECK-NEXT:    addi 3, 3, .LCPI2_0 at toc@l
@@ -60,7 +60,7 @@ define <4 x i32> @sel_Cplus1_or_C_vec(<4
 
 define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
 ; CHECK-LABEL: cmp_sel_Cplus1_or_C_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpequw 2, 2, 3
 ; CHECK-NEXT:    addis 3, 2, .LCPI3_0 at toc@ha
 ; CHECK-NEXT:    addi 3, 3, .LCPI3_0 at toc@l
@@ -74,7 +74,7 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_ve
 
 define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) {
 ; CHECK-LABEL: sel_Cminus1_or_C_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vspltisw 3, -16
 ; CHECK-NEXT:    vspltisw 4, 15
 ; CHECK-NEXT:    addis 3, 2, .LCPI4_0 at toc@ha
@@ -91,7 +91,7 @@ define <4 x i32> @sel_Cminus1_or_C_vec(<
 
 define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
 ; CHECK-LABEL: cmp_sel_Cminus1_or_C_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpequw 2, 2, 3
 ; CHECK-NEXT:    addis 3, 2, .LCPI5_0 at toc@ha
 ; CHECK-NEXT:    addi 3, 3, .LCPI5_0 at toc@l
@@ -105,7 +105,7 @@ define <4 x i32> @cmp_sel_Cminus1_or_C_v
 
 define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) {
 ; CHECK-LABEL: sel_minus1_or_0_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vspltisw 3, -16
 ; CHECK-NEXT:    vspltisw 4, 15
 ; CHECK-NEXT:    vsubuwm 3, 4, 3
@@ -118,7 +118,7 @@ define <4 x i32> @sel_minus1_or_0_vec(<4
 
 define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
 ; CHECK-LABEL: cmp_sel_minus1_or_0_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpequw 2, 2, 3
 ; CHECK-NEXT:    blr
   %cond = icmp eq <4 x i32> %x, %y
@@ -128,7 +128,7 @@ define <4 x i32> @cmp_sel_minus1_or_0_ve
 
 define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) {
 ; CHECK-LABEL: sel_0_or_minus1_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vspltisw 3, 1
 ; CHECK-NEXT:    vspltisb 4, -1
 ; CHECK-NEXT:    xxland 34, 34, 35
@@ -140,7 +140,7 @@ define <4 x i32> @sel_0_or_minus1_vec(<4
 
 define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) {
 ; CHECK-LABEL: cmp_sel_0_or_minus1_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpequw 2, 2, 3
 ; CHECK-NEXT:    xxlnor 34, 34, 34
 ; CHECK-NEXT:    blr
@@ -151,7 +151,7 @@ define <4 x i32> @cmp_sel_0_or_minus1_ve
 
 define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) {
 ; CHECK-LABEL: sel_1_or_0_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vspltisw 3, 1
 ; CHECK-NEXT:    xxland 34, 34, 35
 ; CHECK-NEXT:    blr
@@ -161,7 +161,7 @@ define <4 x i32> @sel_1_or_0_vec(<4 x i1
 
 define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
 ; CHECK-LABEL: cmp_sel_1_or_0_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpequw 2, 2, 3
 ; CHECK-NEXT:    vspltisw 19, 1
 ; CHECK-NEXT:    xxland 34, 34, 51
@@ -173,7 +173,7 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4
 
 define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) {
 ; CHECK-LABEL: sel_0_or_1_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vspltisw 3, 1
 ; CHECK-NEXT:    xxlandc 34, 35, 34
 ; CHECK-NEXT:    blr
@@ -183,7 +183,7 @@ define <4 x i32> @sel_0_or_1_vec(<4 x i1
 
 define <4 x i32> @cmp_sel_0_or_1_vec(<4 x i32> %x, <4 x i32> %y) {
 ; CHECK-LABEL: cmp_sel_0_or_1_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpequw 2, 2, 3
 ; CHECK-NEXT:    vspltisw 19, 1
 ; CHECK-NEXT:    xxlnor 0, 34, 34

Modified: llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i64 @addc_adde(i64 %a, i64 %b) {
 ; RV32I-LABEL: addc_adde:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    add a1, a1, a3
 ; RV32I-NEXT:    add a2, a0, a2
 ; RV32I-NEXT:    sltu a0, a2, a0
@@ -19,7 +19,7 @@ define i64 @addc_adde(i64 %a, i64 %b) {
 
 define i64 @subc_sube(i64 %a, i64 %b) {
 ; RV32I-LABEL: subc_sube:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sub a1, a1, a3
 ; RV32I-NEXT:    sltu a3, a0, a2
 ; RV32I-NEXT:    sub a1, a1, a3

Modified: llvm/trunk/test/CodeGen/RISCV/alu32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/alu32.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/alu32.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/alu32.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@
 
 define i32 @addi(i32 %a) nounwind {
 ; RV32I-LABEL: addi:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a0, a0, 1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = add i32 %a, 1
@@ -19,7 +19,7 @@ define i32 @addi(i32 %a) nounwind {
 
 define i32 @slti(i32 %a) nounwind {
 ; RV32I-LABEL: slti:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slti a0, a0, 2
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = icmp slt i32 %a, 2
@@ -29,7 +29,7 @@ define i32 @slti(i32 %a) nounwind {
 
 define i32 @sltiu(i32 %a) nounwind {
 ; RV32I-LABEL: sltiu:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltiu a0, a0, 3
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = icmp ult i32 %a, 3
@@ -39,7 +39,7 @@ define i32 @sltiu(i32 %a) nounwind {
 
 define i32 @xori(i32 %a) nounwind {
 ; RV32I-LABEL: xori:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xori a0, a0, 4
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = xor i32 %a, 4
@@ -48,7 +48,7 @@ define i32 @xori(i32 %a) nounwind {
 
 define i32 @ori(i32 %a) nounwind {
 ; RV32I-LABEL: ori:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ori a0, a0, 5
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = or i32 %a, 5
@@ -57,7 +57,7 @@ define i32 @ori(i32 %a) nounwind {
 
 define i32 @andi(i32 %a) nounwind {
 ; RV32I-LABEL: andi:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 6
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = and i32 %a, 6
@@ -66,7 +66,7 @@ define i32 @andi(i32 %a) nounwind {
 
 define i32 @slli(i32 %a) nounwind {
 ; RV32I-LABEL: slli:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 7
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = shl i32 %a, 7
@@ -75,7 +75,7 @@ define i32 @slli(i32 %a) nounwind {
 
 define i32 @srli(i32 %a) nounwind {
 ; RV32I-LABEL: srli:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srli a0, a0, 8
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = lshr i32 %a, 8
@@ -84,7 +84,7 @@ define i32 @srli(i32 %a) nounwind {
 
 define i32 @srai(i32 %a) nounwind {
 ; RV32I-LABEL: srai:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srai a0, a0, 9
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = ashr i32 %a, 9
@@ -95,7 +95,7 @@ define i32 @srai(i32 %a) nounwind {
 
 define i32 @add(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: add:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = add i32 %a, %b
@@ -104,7 +104,7 @@ define i32 @add(i32 %a, i32 %b) nounwind
 
 define i32 @sub(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sub:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sub a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = sub i32 %a, %b
@@ -113,7 +113,7 @@ define i32 @sub(i32 %a, i32 %b) nounwind
 
 define i32 @sll(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sll:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sll a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = shl i32 %a, %b
@@ -122,7 +122,7 @@ define i32 @sll(i32 %a, i32 %b) nounwind
 
 define i32 @slt(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: slt:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = icmp slt i32 %a, %b
@@ -132,7 +132,7 @@ define i32 @slt(i32 %a, i32 %b) nounwind
 
 define i32 @sltu(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sltu:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = icmp ult i32 %a, %b
@@ -142,7 +142,7 @@ define i32 @sltu(i32 %a, i32 %b) nounwin
 
 define i32 @xor(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: xor:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = xor i32 %a, %b
@@ -151,7 +151,7 @@ define i32 @xor(i32 %a, i32 %b) nounwind
 
 define i32 @srl(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: srl:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srl a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = lshr i32 %a, %b
@@ -160,7 +160,7 @@ define i32 @srl(i32 %a, i32 %b) nounwind
 
 define i32 @sra(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sra:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sra a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = ashr i32 %a, %b
@@ -169,7 +169,7 @@ define i32 @sra(i32 %a, i32 %b) nounwind
 
 define i32 @or(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: or:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = or i32 %a, %b
@@ -178,7 +178,7 @@ define i32 @or(i32 %a, i32 %b) nounwind
 
 define i32 @and(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: and:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = and i32 %a, %b

Modified: llvm/trunk/test/CodeGen/RISCV/bare-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/bare-select.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/bare-select.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/bare-select.ll Mon Dec  4 09:18:51 2017
@@ -4,10 +4,10 @@
 
 define i32 @bare_select(i1 %a, i32 %b, i32 %c) {
 ; RV32I-LABEL: bare_select:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    bne a0, zero, .LBB0_2
-; RV32I-NEXT:  # BB#1:
+; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    addi a1, a2, 0
 ; RV32I-NEXT:  .LBB0_2:
 ; RV32I-NEXT:    addi a0, a1, 0

Modified: llvm/trunk/test/CodeGen/RISCV/blockaddress.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/blockaddress.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/blockaddress.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/blockaddress.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @test_blockaddress() nounwind {
 ; RV32I-LABEL: test_blockaddress:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 0(s0)
 ; RV32I-NEXT:    lui a0, %hi(addr)
 ; RV32I-NEXT:    addi a0, a0, %lo(addr)

Modified: llvm/trunk/test/CodeGen/RISCV/branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/branch.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/branch.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/branch.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define void @foo(i32 %a, i32 *%b, i1 %c) {
 ; RV32I-LABEL: foo:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lw a3, 0(a1)
 ; RV32I-NEXT:    beq a3, a0, .LBB0_12
 ; RV32I-NEXT:    jal zero, .LBB0_1

Modified: llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll Mon Dec  4 09:18:51 2017
@@ -14,7 +14,7 @@ declare i32 @llvm.ctpop.i32(i32)
 
 define i16 @test_bswap_i16(i16 %a) nounwind {
 ; RV32I-LABEL: test_bswap_i16:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a1, 4080
 ; RV32I-NEXT:    addi a1, a1, 0
 ; RV32I-NEXT:    slli a2, a0, 8
@@ -29,7 +29,7 @@ define i16 @test_bswap_i16(i16 %a) nounw
 
 define i32 @test_bswap_i32(i32 %a) nounwind {
 ; RV32I-LABEL: test_bswap_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -256
 ; RV32I-NEXT:    srli a2, a0, 8
@@ -50,7 +50,7 @@ define i32 @test_bswap_i32(i32 %a) nounw
 
 define i64 @test_bswap_i64(i64 %a) nounwind {
 ; RV32I-LABEL: test_bswap_i64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 16
 ; RV32I-NEXT:    addi a3, a2, -256
 ; RV32I-NEXT:    srli a2, a1, 8
@@ -81,7 +81,7 @@ define i64 @test_bswap_i64(i64 %a) nounw
 
 define i8 @test_cttz_i8(i8 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i8:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    addi a1, a0, 0
 ; RV32I-NEXT:    addi a0, zero, 8
@@ -123,7 +123,7 @@ define i8 @test_cttz_i8(i8 %a) nounwind
 
 define i16 @test_cttz_i16(i16 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i16:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    addi a1, a0, 0
 ; RV32I-NEXT:    addi a0, zero, 16
@@ -167,7 +167,7 @@ define i16 @test_cttz_i16(i16 %a) nounwi
 
 define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    addi a1, a0, 0
 ; RV32I-NEXT:    addi a0, zero, 32
@@ -208,7 +208,7 @@ define i32 @test_cttz_i32(i32 %a) nounwi
 
 define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV32I-LABEL: test_ctlz_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    addi a1, a0, 0
 ; RV32I-NEXT:    addi a0, zero, 32
@@ -257,7 +257,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwi
 
 define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 28(s0)
 ; RV32I-NEXT:    sw s1, 24(s0)
 ; RV32I-NEXT:    sw s2, 20(s0)
@@ -311,7 +311,7 @@ define i64 @test_cttz_i64(i64 %a) nounwi
 ; RV32I-NEXT:    addi a1, s3, 0
 ; RV32I-NEXT:    jalr ra, s6, 0
 ; RV32I-NEXT:    bne s2, zero, .LBB7_2
-; RV32I-NEXT:  # BB#1:
+; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    addi s1, a0, 32
 ; RV32I-NEXT:  .LBB7_2:
@@ -332,7 +332,7 @@ define i64 @test_cttz_i64(i64 %a) nounwi
 
 define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i8_zero_undef:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    addi a1, a0, -1
 ; RV32I-NEXT:    xori a0, a0, -1
@@ -367,7 +367,7 @@ define i8 @test_cttz_i8_zero_undef(i8 %a
 
 define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i16_zero_undef:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    addi a1, a0, -1
 ; RV32I-NEXT:    xori a0, a0, -1
@@ -402,7 +402,7 @@ define i16 @test_cttz_i16_zero_undef(i16
 
 define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i32_zero_undef:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    addi a1, a0, -1
 ; RV32I-NEXT:    xori a0, a0, -1
@@ -437,7 +437,7 @@ define i32 @test_cttz_i32_zero_undef(i32
 
 define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i64_zero_undef:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 28(s0)
 ; RV32I-NEXT:    sw s1, 24(s0)
 ; RV32I-NEXT:    sw s2, 20(s0)
@@ -491,7 +491,7 @@ define i64 @test_cttz_i64_zero_undef(i64
 ; RV32I-NEXT:    addi a1, s3, 0
 ; RV32I-NEXT:    jalr ra, s6, 0
 ; RV32I-NEXT:    bne s2, zero, .LBB11_2
-; RV32I-NEXT:  # BB#1:
+; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    addi s1, a0, 32
 ; RV32I-NEXT:  .LBB11_2:
@@ -512,7 +512,7 @@ define i64 @test_cttz_i64_zero_undef(i64
 
 define i32 @test_ctpop_i32(i32 %a) nounwind {
 ; RV32I-LABEL: test_ctpop_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a1, 349525
 ; RV32I-NEXT:    addi a1, a1, 1365

Modified: llvm/trunk/test/CodeGen/RISCV/calls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/calls.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/calls.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/calls.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@ declare i32 @external_function(i32)
 
 define i32 @test_call_external(i32 %a) nounwind {
 ; RV32I-LABEL: test_call_external:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a1, %hi(external_function)
 ; RV32I-NEXT:    addi a1, a1, %lo(external_function)
@@ -19,7 +19,7 @@ define i32 @test_call_external(i32 %a) n
 
 define i32 @defined_function(i32 %a) nounwind {
 ; RV32I-LABEL: defined_function:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a0, a0, 1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = add i32 %a, 1
@@ -28,7 +28,7 @@ define i32 @defined_function(i32 %a) nou
 
 define i32 @test_call_defined(i32 %a) nounwind {
 ; RV32I-LABEL: test_call_defined:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a1, %hi(defined_function)
 ; RV32I-NEXT:    addi a1, a1, %lo(defined_function)
@@ -41,7 +41,7 @@ define i32 @test_call_defined(i32 %a) no
 
 define i32 @test_call_indirect(i32 (i32)* %a, i32 %b) nounwind {
 ; RV32I-LABEL: test_call_indirect:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    addi a2, a0, 0
 ; RV32I-NEXT:    addi a0, a1, 0
@@ -57,7 +57,7 @@ define i32 @test_call_indirect(i32 (i32)
 
 define fastcc i32 @fastcc_function(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: fastcc_function:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
  %1 = add i32 %a, %b
@@ -66,7 +66,7 @@ define fastcc i32 @fastcc_function(i32 %
 
 define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: test_call_fastcc:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    sw s1, 8(s0)
 ; RV32I-NEXT:    addi s1, a0, 0

Modified: llvm/trunk/test/CodeGen/RISCV/div.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/div.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/div.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/div.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i32 @udiv(i32 %a, i32 %b) {
 ; RV32I-LABEL: udiv:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a2, %hi(__udivsi3)
 ; RV32I-NEXT:    addi a2, a2, %lo(__udivsi3)
@@ -17,7 +17,7 @@ define i32 @udiv(i32 %a, i32 %b) {
 
 define i32 @udiv_constant(i32 %a) {
 ; RV32I-LABEL: udiv_constant:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a1, %hi(__udivsi3)
 ; RV32I-NEXT:    addi a2, a1, %lo(__udivsi3)
@@ -31,7 +31,7 @@ define i32 @udiv_constant(i32 %a) {
 
 define i32 @udiv_pow2(i32 %a) {
 ; RV32I-LABEL: udiv_pow2:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srli a0, a0, 3
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = udiv i32 %a, 8
@@ -40,7 +40,7 @@ define i32 @udiv_pow2(i32 %a) {
 
 define i64 @udiv64(i64 %a, i64 %b) {
 ; RV32I-LABEL: udiv64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a4, %hi(__udivdi3)
 ; RV32I-NEXT:    addi a4, a4, %lo(__udivdi3)
@@ -53,7 +53,7 @@ define i64 @udiv64(i64 %a, i64 %b) {
 
 define i64 @udiv64_constant(i64 %a) {
 ; RV32I-LABEL: udiv64_constant:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a2, %hi(__udivdi3)
 ; RV32I-NEXT:    addi a4, a2, %lo(__udivdi3)
@@ -68,7 +68,7 @@ define i64 @udiv64_constant(i64 %a) {
 
 define i32 @sdiv(i32 %a, i32 %b) {
 ; RV32I-LABEL: sdiv:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a2, %hi(__divsi3)
 ; RV32I-NEXT:    addi a2, a2, %lo(__divsi3)
@@ -81,7 +81,7 @@ define i32 @sdiv(i32 %a, i32 %b) {
 
 define i32 @sdiv_constant(i32 %a) {
 ; RV32I-LABEL: sdiv_constant:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a1, %hi(__divsi3)
 ; RV32I-NEXT:    addi a2, a1, %lo(__divsi3)
@@ -95,7 +95,7 @@ define i32 @sdiv_constant(i32 %a) {
 
 define i32 @sdiv_pow2(i32 %a) {
 ; RV32I-LABEL: sdiv_pow2:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srai a1, a0, 31
 ; RV32I-NEXT:    srli a1, a1, 29
 ; RV32I-NEXT:    add a0, a0, a1
@@ -107,7 +107,7 @@ define i32 @sdiv_pow2(i32 %a) {
 
 define i64 @sdiv64(i64 %a, i64 %b) {
 ; RV32I-LABEL: sdiv64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a4, %hi(__divdi3)
 ; RV32I-NEXT:    addi a4, a4, %lo(__divdi3)
@@ -120,7 +120,7 @@ define i64 @sdiv64(i64 %a, i64 %b) {
 
 define i64 @sdiv64_constant(i64 %a) {
 ; RV32I-LABEL: sdiv64_constant:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a2, %hi(__divdi3)
 ; RV32I-NEXT:    addi a4, a2, %lo(__divdi3)

Modified: llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define i32 @icmp_eq(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_eq:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    sltiu a0, a0, 1
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -18,7 +18,7 @@ define i32 @icmp_eq(i32 %a, i32 %b) noun
 
 define i32 @icmp_ne(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_ne:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    sltu a0, zero, a0
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -29,7 +29,7 @@ define i32 @icmp_ne(i32 %a, i32 %b) noun
 
 define i32 @icmp_ugt(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_ugt:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a1, a0
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = icmp ugt i32 %a, %b
@@ -39,7 +39,7 @@ define i32 @icmp_ugt(i32 %a, i32 %b) nou
 
 define i32 @icmp_uge(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_uge:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -50,7 +50,7 @@ define i32 @icmp_uge(i32 %a, i32 %b) nou
 
 define i32 @icmp_ult(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_ult:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = icmp ult i32 %a, %b
@@ -60,7 +60,7 @@ define i32 @icmp_ult(i32 %a, i32 %b) nou
 
 define i32 @icmp_ule(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_ule:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a1, a0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -71,7 +71,7 @@ define i32 @icmp_ule(i32 %a, i32 %b) nou
 
 define i32 @icmp_sgt(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_sgt:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a1, a0
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = icmp sgt i32 %a, %b
@@ -81,7 +81,7 @@ define i32 @icmp_sgt(i32 %a, i32 %b) nou
 
 define i32 @icmp_sge(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_sge:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a0, a1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -92,7 +92,7 @@ define i32 @icmp_sge(i32 %a, i32 %b) nou
 
 define i32 @icmp_slt(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_slt:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a0, a1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = icmp slt i32 %a, %b
@@ -102,7 +102,7 @@ define i32 @icmp_slt(i32 %a, i32 %b) nou
 
 define i32 @icmp_sle(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_sle:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a1, a0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    jalr zero, ra, 0

Modified: llvm/trunk/test/CodeGen/RISCV/imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/imm.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/imm.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/imm.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i32 @zero() nounwind {
 ; RV32I-LABEL: zero:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a0, zero, 0
 ; RV32I-NEXT:    jalr zero, ra, 0
   ret i32 0
@@ -14,7 +14,7 @@ define i32 @zero() nounwind {
 
 define i32 @pos_small() nounwind {
 ; RV32I-LABEL: pos_small:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a0, zero, 2047
 ; RV32I-NEXT:    jalr zero, ra, 0
   ret i32 2047
@@ -22,7 +22,7 @@ define i32 @pos_small() nounwind {
 
 define i32 @neg_small() nounwind {
 ; RV32I-LABEL: neg_small:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a0, zero, -2048
 ; RV32I-NEXT:    jalr zero, ra, 0
   ret i32 -2048
@@ -30,7 +30,7 @@ define i32 @neg_small() nounwind {
 
 define i32 @pos_i32() nounwind {
 ; RV32I-LABEL: pos_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 423811
 ; RV32I-NEXT:    addi a0, a0, -1297
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -39,7 +39,7 @@ define i32 @pos_i32() nounwind {
 
 define i32 @neg_i32() nounwind {
 ; RV32I-LABEL: neg_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 912092
 ; RV32I-NEXT:    addi a0, a0, -273
 ; RV32I-NEXT:    jalr zero, ra, 0

Modified: llvm/trunk/test/CodeGen/RISCV/indirectbr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/indirectbr.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/indirectbr.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/indirectbr.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i32 @indirectbr(i8* %target) nounwind {
 ; RV32I-LABEL: indirectbr:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 0(s0)
 ; RV32I-NEXT:    jalr zero, a0, 0
 ; RV32I-NEXT:  .LBB0_1: # %ret
@@ -20,7 +20,7 @@ ret:
 
 define i32 @indirectbr_with_offset(i8* %a) nounwind {
 ; RV32I-LABEL: indirectbr_with_offset:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 0(s0)
 ; RV32I-NEXT:    jalr zero, a0, 1380
 ; RV32I-NEXT:  .LBB1_1: # %ret

Modified: llvm/trunk/test/CodeGen/RISCV/jumptable.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/jumptable.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/jumptable.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/jumptable.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define void @jt(i32 %in, i32* %out) {
 ; RV32I-LABEL: jt:
-; RV32I:       # BB#0: # %entry
+; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    addi a2, zero, 2
 ; RV32I-NEXT:    blt a2, a0, .LBB0_3
 ; RV32I-NEXT:    jal zero, .LBB0_1

Modified: llvm/trunk/test/CodeGen/RISCV/mem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/mem.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/mem.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/mem.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i32 @lb(i8 *%a) nounwind {
 ; RV32I-LABEL: lb:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lb a1, 0(a0)
 ; RV32I-NEXT:    lb a0, 1(a0)
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -20,7 +20,7 @@ define i32 @lb(i8 *%a) nounwind {
 
 define i32 @lh(i16 *%a) nounwind {
 ; RV32I-LABEL: lh:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lh a1, 0(a0)
 ; RV32I-NEXT:    lh a0, 4(a0)
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -34,7 +34,7 @@ define i32 @lh(i16 *%a) nounwind {
 
 define i32 @lw(i32 *%a) nounwind {
 ; RV32I-LABEL: lw:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lw a1, 0(a0)
 ; RV32I-NEXT:    lw a0, 12(a0)
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -46,7 +46,7 @@ define i32 @lw(i32 *%a) nounwind {
 
 define i32 @lbu(i8 *%a) nounwind {
 ; RV32I-LABEL: lbu:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lbu a1, 0(a0)
 ; RV32I-NEXT:    lbu a0, 4(a0)
 ; RV32I-NEXT:    add a0, a0, a1
@@ -62,7 +62,7 @@ define i32 @lbu(i8 *%a) nounwind {
 
 define i32 @lhu(i16 *%a) nounwind {
 ; RV32I-LABEL: lhu:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lhu a1, 0(a0)
 ; RV32I-NEXT:    lhu a0, 10(a0)
 ; RV32I-NEXT:    add a0, a0, a1
@@ -80,7 +80,7 @@ define i32 @lhu(i16 *%a) nounwind {
 
 define void @sb(i8 *%a, i8 %b) nounwind {
 ; RV32I-LABEL: sb:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sb a1, 6(a0)
 ; RV32I-NEXT:    sb a1, 0(a0)
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -92,7 +92,7 @@ define void @sb(i8 *%a, i8 %b) nounwind
 
 define void @sh(i16 *%a, i16 %b) nounwind {
 ; RV32I-LABEL: sh:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sh a1, 14(a0)
 ; RV32I-NEXT:    sh a1, 0(a0)
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -104,7 +104,7 @@ define void @sh(i16 *%a, i16 %b) nounwin
 
 define void @sw(i32 *%a, i32 %b) nounwind {
 ; RV32I-LABEL: sw:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw a1, 32(a0)
 ; RV32I-NEXT:    sw a1, 0(a0)
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -117,7 +117,7 @@ define void @sw(i32 *%a, i32 %b) nounwin
 ; Check load and store to an i1 location
 define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
 ; RV32I-LABEL: load_sext_zext_anyext_i1:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lb a1, 0(a0)
 ; RV32I-NEXT:    lbu a1, 1(a0)
 ; RV32I-NEXT:    lbu a0, 2(a0)
@@ -139,7 +139,7 @@ define i32 @load_sext_zext_anyext_i1(i1
 
 define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
 ; RV32I-LABEL: load_sext_zext_anyext_i1_i16:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lb a1, 0(a0)
 ; RV32I-NEXT:    lbu a1, 1(a0)
 ; RV32I-NEXT:    lbu a0, 2(a0)
@@ -165,7 +165,7 @@ define i16 @load_sext_zext_anyext_i1_i16
 define i32 @lw_sw_global(i32 %a) nounwind {
 ; TODO: the addi should be folded in to the lw/sw operations
 ; RV32I-LABEL: lw_sw_global:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a1, %hi(G)
 ; RV32I-NEXT:    addi a2, a1, %lo(G)
 ; RV32I-NEXT:    lw a1, 0(a2)
@@ -188,7 +188,7 @@ define i32 @lw_sw_global(i32 %a) nounwin
 define i32 @lw_sw_constant(i32 %a) nounwind {
 ; TODO: the addi should be folded in to the lw/sw
 ; RV32I-LABEL: lw_sw_constant:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a1, 912092
 ; RV32I-NEXT:    addi a2, a1, -273
 ; RV32I-NEXT:    lw a1, 0(a2)

Modified: llvm/trunk/test/CodeGen/RISCV/mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/mul.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/mul.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/mul.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i32 @square(i32 %a) {
 ; RV32I-LABEL: square:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a1, %hi(__mulsi3)
 ; RV32I-NEXT:    addi a2, a1, %lo(__mulsi3)
@@ -18,7 +18,7 @@ define i32 @square(i32 %a) {
 
 define i32 @mul(i32 %a, i32 %b) {
 ; RV32I-LABEL: mul:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a2, %hi(__mulsi3)
 ; RV32I-NEXT:    addi a2, a2, %lo(__mulsi3)
@@ -31,7 +31,7 @@ define i32 @mul(i32 %a, i32 %b) {
 
 define i32 @mul_constant(i32 %a) {
 ; RV32I-LABEL: mul_constant:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a1, %hi(__mulsi3)
 ; RV32I-NEXT:    addi a2, a1, %lo(__mulsi3)
@@ -45,7 +45,7 @@ define i32 @mul_constant(i32 %a) {
 
 define i32 @mul_pow2(i32 %a) {
 ; RV32I-LABEL: mul_pow2:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 3
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = mul i32 %a, 8
@@ -54,7 +54,7 @@ define i32 @mul_pow2(i32 %a) {
 
 define i64 @mul64(i64 %a, i64 %b) {
 ; RV32I-LABEL: mul64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a4, %hi(__muldi3)
 ; RV32I-NEXT:    addi a4, a4, %lo(__muldi3)
@@ -67,7 +67,7 @@ define i64 @mul64(i64 %a, i64 %b) {
 
 define i64 @mul64_constant(i64 %a) {
 ; RV32I-LABEL: mul64_constant:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a2, %hi(__muldi3)
 ; RV32I-NEXT:    addi a4, a2, %lo(__muldi3)

Modified: llvm/trunk/test/CodeGen/RISCV/rem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/rem.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/rem.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/rem.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i32 @urem(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: urem:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a2, %hi(__umodsi3)
 ; RV32I-NEXT:    addi a2, a2, %lo(__umodsi3)
@@ -17,7 +17,7 @@ define i32 @urem(i32 %a, i32 %b) nounwin
 
 define i32 @srem(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: srem:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a2, %hi(__modsi3)
 ; RV32I-NEXT:    addi a2, a2, %lo(__modsi3)

Modified: llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define i32 @rotl(i32 %x, i32 %y) {
 ; RV32I-LABEL: rotl:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a2, zero, 32
 ; RV32I-NEXT:    sub a2, a2, a1
 ; RV32I-NEXT:    sll a1, a0, a1
@@ -23,7 +23,7 @@ define i32 @rotl(i32 %x, i32 %y) {
 
 define i32 @rotr(i32 %x, i32 %y) {
 ; RV32I-LABEL: rotr:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a2, zero, 32
 ; RV32I-NEXT:    sub a2, a2, a1
 ; RV32I-NEXT:    srl a1, a0, a1

Modified: llvm/trunk/test/CodeGen/RISCV/select-cc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/select-cc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/select-cc.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/select-cc.ll Mon Dec  4 09:18:51 2017
@@ -4,55 +4,55 @@
 
 define i32 @foo(i32 %a, i32 *%b) {
 ; RV32I-LABEL: foo:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lw a2, 0(a1)
 ; RV32I-NEXT:    beq a0, a2, .LBB0_2
-; RV32I-NEXT:  # BB#1:
+; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    addi a0, a2, 0
 ; RV32I-NEXT:  .LBB0_2:
 ; RV32I-NEXT:    lw a2, 0(a1)
 ; RV32I-NEXT:    bne a0, a2, .LBB0_4
-; RV32I-NEXT:  # BB#3:
+; RV32I-NEXT:  # %bb.3:
 ; RV32I-NEXT:    addi a0, a2, 0
 ; RV32I-NEXT:  .LBB0_4:
 ; RV32I-NEXT:    lw a2, 0(a1)
 ; RV32I-NEXT:    bltu a2, a0, .LBB0_6
-; RV32I-NEXT:  # BB#5:
+; RV32I-NEXT:  # %bb.5:
 ; RV32I-NEXT:    addi a0, a2, 0
 ; RV32I-NEXT:  .LBB0_6:
 ; RV32I-NEXT:    lw a2, 0(a1)
 ; RV32I-NEXT:    bgeu a0, a2, .LBB0_8
-; RV32I-NEXT:  # BB#7:
+; RV32I-NEXT:  # %bb.7:
 ; RV32I-NEXT:    addi a0, a2, 0
 ; RV32I-NEXT:  .LBB0_8:
 ; RV32I-NEXT:    lw a2, 0(a1)
 ; RV32I-NEXT:    bltu a0, a2, .LBB0_10
-; RV32I-NEXT:  # BB#9:
+; RV32I-NEXT:  # %bb.9:
 ; RV32I-NEXT:    addi a0, a2, 0
 ; RV32I-NEXT:  .LBB0_10:
 ; RV32I-NEXT:    lw a2, 0(a1)
 ; RV32I-NEXT:    bgeu a2, a0, .LBB0_12
-; RV32I-NEXT:  # BB#11:
+; RV32I-NEXT:  # %bb.11:
 ; RV32I-NEXT:    addi a0, a2, 0
 ; RV32I-NEXT:  .LBB0_12:
 ; RV32I-NEXT:    lw a2, 0(a1)
 ; RV32I-NEXT:    blt a2, a0, .LBB0_14
-; RV32I-NEXT:  # BB#13:
+; RV32I-NEXT:  # %bb.13:
 ; RV32I-NEXT:    addi a0, a2, 0
 ; RV32I-NEXT:  .LBB0_14:
 ; RV32I-NEXT:    lw a2, 0(a1)
 ; RV32I-NEXT:    bge a0, a2, .LBB0_16
-; RV32I-NEXT:  # BB#15:
+; RV32I-NEXT:  # %bb.15:
 ; RV32I-NEXT:    addi a0, a2, 0
 ; RV32I-NEXT:  .LBB0_16:
 ; RV32I-NEXT:    lw a2, 0(a1)
 ; RV32I-NEXT:    blt a0, a2, .LBB0_18
-; RV32I-NEXT:  # BB#17:
+; RV32I-NEXT:  # %bb.17:
 ; RV32I-NEXT:    addi a0, a2, 0
 ; RV32I-NEXT:  .LBB0_18:
 ; RV32I-NEXT:    lw a1, 0(a1)
 ; RV32I-NEXT:    bge a1, a0, .LBB0_20
-; RV32I-NEXT:  # BB#19:
+; RV32I-NEXT:  # %bb.19:
 ; RV32I-NEXT:    addi a0, a1, 0
 ; RV32I-NEXT:  .LBB0_20:
 ; RV32I-NEXT:    jalr zero, ra, 0

Modified: llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i8 @sext_i1_to_i8(i1 %a) {
 ; RV32I-LABEL: sext_i1_to_i8:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    sub a0, zero, a0
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -14,7 +14,7 @@ define i8 @sext_i1_to_i8(i1 %a) {
 
 define i16 @sext_i1_to_i16(i1 %a) {
 ; RV32I-LABEL: sext_i1_to_i16:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    sub a0, zero, a0
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -24,7 +24,7 @@ define i16 @sext_i1_to_i16(i1 %a) {
 
 define i32 @sext_i1_to_i32(i1 %a) {
 ; RV32I-LABEL: sext_i1_to_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    sub a0, zero, a0
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -34,7 +34,7 @@ define i32 @sext_i1_to_i32(i1 %a) {
 
 define i64 @sext_i1_to_i64(i1 %a) {
 ; RV32I-LABEL: sext_i1_to_i64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    sub a0, zero, a0
 ; RV32I-NEXT:    addi a1, a0, 0
@@ -45,7 +45,7 @@ define i64 @sext_i1_to_i64(i1 %a) {
 
 define i16 @sext_i8_to_i16(i8 %a) {
 ; RV32I-LABEL: sext_i8_to_i16:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -55,7 +55,7 @@ define i16 @sext_i8_to_i16(i8 %a) {
 
 define i32 @sext_i8_to_i32(i8 %a) {
 ; RV32I-LABEL: sext_i8_to_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -65,7 +65,7 @@ define i32 @sext_i8_to_i32(i8 %a) {
 
 define i64 @sext_i8_to_i64(i8 %a) {
 ; RV32I-LABEL: sext_i8_to_i64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a1, a0, 24
 ; RV32I-NEXT:    srai a0, a1, 24
 ; RV32I-NEXT:    srai a1, a1, 31
@@ -76,7 +76,7 @@ define i64 @sext_i8_to_i64(i8 %a) {
 
 define i32 @sext_i16_to_i32(i16 %a) {
 ; RV32I-LABEL: sext_i16_to_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -86,7 +86,7 @@ define i32 @sext_i16_to_i32(i16 %a) {
 
 define i64 @sext_i16_to_i64(i16 %a) {
 ; RV32I-LABEL: sext_i16_to_i64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a1, a0, 16
 ; RV32I-NEXT:    srai a0, a1, 16
 ; RV32I-NEXT:    srai a1, a1, 31
@@ -97,7 +97,7 @@ define i64 @sext_i16_to_i64(i16 %a) {
 
 define i64 @sext_i32_to_i64(i32 %a) {
 ; RV32I-LABEL: sext_i32_to_i64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srai a1, a0, 31
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = sext i32 %a to i64
@@ -106,7 +106,7 @@ define i64 @sext_i32_to_i64(i32 %a) {
 
 define i8 @zext_i1_to_i8(i1 %a) {
 ; RV32I-LABEL: zext_i1_to_i8:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = zext i1 %a to i8
@@ -115,7 +115,7 @@ define i8 @zext_i1_to_i8(i1 %a) {
 
 define i16 @zext_i1_to_i16(i1 %a) {
 ; RV32I-LABEL: zext_i1_to_i16:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = zext i1 %a to i16
@@ -124,7 +124,7 @@ define i16 @zext_i1_to_i16(i1 %a) {
 
 define i32 @zext_i1_to_i32(i1 %a) {
 ; RV32I-LABEL: zext_i1_to_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = zext i1 %a to i32
@@ -133,7 +133,7 @@ define i32 @zext_i1_to_i32(i1 %a) {
 
 define i64 @zext_i1_to_i64(i1 %a) {
 ; RV32I-LABEL: zext_i1_to_i64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    addi a1, zero, 0
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -143,7 +143,7 @@ define i64 @zext_i1_to_i64(i1 %a) {
 
 define i16 @zext_i8_to_i16(i8 %a) {
 ; RV32I-LABEL: zext_i8_to_i16:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = zext i8 %a to i16
@@ -152,7 +152,7 @@ define i16 @zext_i8_to_i16(i8 %a) {
 
 define i32 @zext_i8_to_i32(i8 %a) {
 ; RV32I-LABEL: zext_i8_to_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = zext i8 %a to i32
@@ -161,7 +161,7 @@ define i32 @zext_i8_to_i32(i8 %a) {
 
 define i64 @zext_i8_to_i64(i8 %a) {
 ; RV32I-LABEL: zext_i8_to_i64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    addi a1, zero, 0
 ; RV32I-NEXT:    jalr zero, ra, 0
@@ -171,7 +171,7 @@ define i64 @zext_i8_to_i64(i8 %a) {
 
 define i32 @zext_i16_to_i32(i16 %a) {
 ; RV32I-LABEL: zext_i16_to_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
@@ -182,7 +182,7 @@ define i32 @zext_i16_to_i32(i16 %a) {
 
 define i64 @zext_i16_to_i64(i16 %a) {
 ; RV32I-LABEL: zext_i16_to_i64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
@@ -194,7 +194,7 @@ define i64 @zext_i16_to_i64(i16 %a) {
 
 define i64 @zext_i32_to_i64(i32 %a) {
 ; RV32I-LABEL: zext_i32_to_i64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a1, zero, 0
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = zext i32 %a to i64
@@ -206,7 +206,7 @@ define i64 @zext_i32_to_i64(i32 %a) {
 
 define i1 @trunc_i8_to_i1(i8 %a) {
 ; RV32I-LABEL: trunc_i8_to_i1:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = trunc i8 %a to i1
   ret i1 %1
@@ -214,7 +214,7 @@ define i1 @trunc_i8_to_i1(i8 %a) {
 
 define i1 @trunc_i16_to_i1(i16 %a) {
 ; RV32I-LABEL: trunc_i16_to_i1:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = trunc i16 %a to i1
   ret i1 %1
@@ -222,7 +222,7 @@ define i1 @trunc_i16_to_i1(i16 %a) {
 
 define i1 @trunc_i32_to_i1(i32 %a) {
 ; RV32I-LABEL: trunc_i32_to_i1:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = trunc i32 %a to i1
   ret i1 %1
@@ -230,7 +230,7 @@ define i1 @trunc_i32_to_i1(i32 %a) {
 
 define i1 @trunc_i64_to_i1(i64 %a) {
 ; RV32I-LABEL: trunc_i64_to_i1:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = trunc i64 %a to i1
   ret i1 %1
@@ -238,7 +238,7 @@ define i1 @trunc_i64_to_i1(i64 %a) {
 
 define i8 @trunc_i16_to_i8(i16 %a) {
 ; RV32I-LABEL: trunc_i16_to_i8:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = trunc i16 %a to i8
   ret i8 %1
@@ -246,7 +246,7 @@ define i8 @trunc_i16_to_i8(i16 %a) {
 
 define i8 @trunc_i32_to_i8(i32 %a) {
 ; RV32I-LABEL: trunc_i32_to_i8:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = trunc i32 %a to i8
   ret i8 %1
@@ -254,7 +254,7 @@ define i8 @trunc_i32_to_i8(i32 %a) {
 
 define i8 @trunc_i64_to_i8(i64 %a) {
 ; RV32I-LABEL: trunc_i64_to_i8:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = trunc i64 %a to i8
   ret i8 %1
@@ -262,7 +262,7 @@ define i8 @trunc_i64_to_i8(i64 %a) {
 
 define i16 @trunc_i32_to_i16(i32 %a) {
 ; RV32I-LABEL: trunc_i32_to_i16:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = trunc i32 %a to i16
   ret i16 %1
@@ -270,7 +270,7 @@ define i16 @trunc_i32_to_i16(i32 %a) {
 
 define i16 @trunc_i64_to_i16(i64 %a) {
 ; RV32I-LABEL: trunc_i64_to_i16:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = trunc i64 %a to i16
   ret i16 %1
@@ -278,7 +278,7 @@ define i16 @trunc_i64_to_i16(i64 %a) {
 
 define i32 @trunc_i64_to_i32(i64 %a) {
 ; RV32I-LABEL: trunc_i64_to_i32:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jalr zero, ra, 0
   %1 = trunc i64 %a to i32
   ret i32 %1

Modified: llvm/trunk/test/CodeGen/RISCV/shifts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/shifts.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/shifts.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/shifts.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define i64 @lshr64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: lshr64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a3, %hi(__lshrdi3)
 ; RV32I-NEXT:    addi a3, a3, %lo(__lshrdi3)
@@ -20,7 +20,7 @@ define i64 @lshr64(i64 %a, i64 %b) nounw
 
 define i64 @ashr64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: ashr64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a3, %hi(__ashrdi3)
 ; RV32I-NEXT:    addi a3, a3, %lo(__ashrdi3)
@@ -33,7 +33,7 @@ define i64 @ashr64(i64 %a, i64 %b) nounw
 
 define i64 @shl64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: shl64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sw ra, 12(s0)
 ; RV32I-NEXT:    lui a3, %hi(__ashldi3)
 ; RV32I-NEXT:    addi a3, a3, %lo(__ashldi3)

Modified: llvm/trunk/test/CodeGen/RISCV/wide-mem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/wide-mem.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/wide-mem.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/wide-mem.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i64 @load_i64(i64 *%a) nounwind {
 ; RV32I-LABEL: load_i64:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lw a2, 0(a0)
 ; RV32I-NEXT:    lw a1, 4(a0)
 ; RV32I-NEXT:    addi a0, a2, 0
@@ -21,7 +21,7 @@ define i64 @load_i64(i64 *%a) nounwind {
 ; generate two addi
 define i64 @load_i64_global() nounwind {
 ; RV32I-LABEL: load_i64_global:
-; RV32I:       # BB#0:
+; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, %hi(val64)
 ; RV32I-NEXT:    addi a0, a0, %lo(val64)
 ; RV32I-NEXT:    lw a0, 0(a0)

Modified: llvm/trunk/test/CodeGen/SPARC/analyze-branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SPARC/analyze-branch.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SPARC/analyze-branch.ll (original)
+++ llvm/trunk/test/CodeGen/SPARC/analyze-branch.ll Mon Dec  4 09:18:51 2017
@@ -18,7 +18,7 @@ define void @test_Bcc_fallthrough_taken(
 ; CHECK: cmp {{%[goli][0-9]+}}, 42
 ; CHECK: bne [[FALSE:.LBB[0-9]+_[0-9]+]]
 ; CHECK-NEXT: nop
-; CHECK-NEXT: ! BB#
+; CHECK-NEXT: ! %bb.
 ; CHECK-NEXT: call test_true
 
 ; CHECK: [[FALSE]]:
@@ -42,7 +42,7 @@ define void @test_Bcc_fallthrough_nottak
 
 ; CHECK: be [[TRUE:.LBB[0-9]+_[0-9]+]]
 ; CHECK-NEXT: nop
-; CHECK-NEXT: ! BB#
+; CHECK-NEXT: ! %bb.
 ; CHECK-NEXT: call test_false
 
 ; CHECK: [[TRUE]]:

Modified: llvm/trunk/test/CodeGen/SPARC/vector-extract-elt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SPARC/vector-extract-elt.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SPARC/vector-extract-elt.ll (original)
+++ llvm/trunk/test/CodeGen/SPARC/vector-extract-elt.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 ; look-thru for extractelement then we we know that the add will yield a
 ; non-negative result.
 define i1 @test1(<4 x i16>* %in) {
-; CHECK-LABEL: ! BB#0:
+; CHECK-LABEL: ! %bb.0:
 ; CHECK-NEXT:        retl
 ; CHECK-NEXT:        sethi 0, %o0
   %vec2 = load <4 x i16>, <4 x i16>* %in, align 1

Modified: llvm/trunk/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 ; store i1 true, i1* %g_717.sink.i, align 4
 ; %.b = load i1, i1* @g_2, align 4
 
-; CHECK: # BB#6: # %crc32_gentab.exit
+; CHECK: # %bb.6: # %crc32_gentab.exit
 ; CHECK:        larl    %r2, g_2
 ; CHECK-NEXT:   llc     %r3, 0(%r2)
 ; CHECK-NOT:    %r2

Modified: llvm/trunk/test/CodeGen/SystemZ/dag-combine-02.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/dag-combine-02.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/dag-combine-02.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/dag-combine-02.ll Mon Dec  4 09:18:51 2017
@@ -93,7 +93,7 @@ define signext i32 @main(i32 signext, i8
   br i1 %60, label %61, label %13
 
 ; <label>:61:                                     ; preds = %13
-; CHECK-LABEL: BB#6:
+; CHECK-LABEL: %bb.6:
 ; CHECK: stgrl   %r1, g_56
 ; CHECK: llhrl   %r1, g_56+6
 ; CHECK: stgrl   %r2, g_56

Modified: llvm/trunk/test/CodeGen/SystemZ/int-cmp-51.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-cmp-51.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-cmp-51.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/int-cmp-51.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@ declare void @bar(i8)
 
 ; Check the low end of the CH range.
 define void @f1(i32 %lhs) {
-; CHECK-LABEL: BB#1:
+; CHECK-LABEL: %bb.1:
 ; CHECK-NOT: cijlh %r0, 1, .LBB0_3
 
 entry:

Modified: llvm/trunk/test/CodeGen/SystemZ/pr32372.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/pr32372.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/pr32372.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/pr32372.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @pr32372(i8*) {
 ; CHECK-LABEL: pr32372:
-; CHECK:       # BB#0: # %BB
+; CHECK:       # %bb.0: # %BB
 ; CHECK-NEXT:    llc %r1, 0(%r2)
 ; CHECK-NEXT:    mvhhi 0(%r1), -3825
 ; CHECK-NEXT:    llill %r0, 0

Modified: llvm/trunk/test/CodeGen/SystemZ/pr32505.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/pr32505.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/pr32505.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/pr32505.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@ target triple = "s390x-ibm-linux"
 
 define <2 x float> @pr32505(<2 x i8> * %a) {
 ; CHECK-LABEL: pr32505:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lbh %r0, 1(%r2)
 ; CHECK-NEXT:    lbh %r1, 0(%r2)
 ; CHECK-NEXT:    ldgr %f0, %r1

Modified: llvm/trunk/test/CodeGen/SystemZ/strcmp-01.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/strcmp-01.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/strcmp-01.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/strcmp-01.ll Mon Dec  4 09:18:51 2017
@@ -11,7 +11,7 @@ define i32 @f1(i8 *%src1, i8 *%src2) {
 ; CHECK: [[LABEL:\.[^:]*]]:
 ; CHECK: clst %r2, %r3
 ; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
+; CHECK-NEXT: %bb.{{[0-9]+}}
 ; CHECK-NEXT: ipm [[REG:%r[0-5]]]
 ; CHECK: srl [[REG]], 28
 ; CHECK: rll %r2, [[REG]], 31
@@ -27,7 +27,7 @@ define void @f2(i8 *%src1, i8 *%src2, i3
 ; CHECK: [[LABEL:\.[^:]*]]:
 ; CHECK: clst %r2, %r3
 ; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
+; CHECK-NEXT: %bb.{{[0-9]+}}
 ; CHECK-NEXT: ber %r14
 ; CHECK: br %r14
   %res = call i32 @strcmp(i8 *%src1, i8 *%src2)
@@ -50,7 +50,7 @@ define i32 @f3(i8 *%src1, i8 *%src2, i32
 ; CHECK: [[LABEL:\.[^:]*]]:
 ; CHECK: clst %r2, %r3
 ; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
+; CHECK-NEXT: %bb.{{[0-9]+}}
 ; CHECK-NEXT: ipm [[REG:%r[0-5]]]
 ; CHECK: srl [[REG]], 28
 ; CHECK: rll %r2, [[REG]], 31

Modified: llvm/trunk/test/CodeGen/SystemZ/strlen-01.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/strlen-01.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/strlen-01.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/strlen-01.ll Mon Dec  4 09:18:51 2017
@@ -15,7 +15,7 @@ define i64 @f1(i32 %dummy, i8 *%src) {
 ; CHECK: [[LABEL:\.[^:]*]]:
 ; CHECK-NEXT: srst %r2, [[REG]]
 ; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
+; CHECK-NEXT: %bb.{{[0-9]+}}
 ; CHECK-NEXT: sgr %r2, %r3
 ; CHECK: br %r14
   %res = call i64 @strlen(i8 *%src)
@@ -31,7 +31,7 @@ define i64 @f2(i64 %len, i8 *%src) {
 ; CHECK: [[LABEL:\.[^:]*]]:
 ; CHECK-NEXT: srst %r2, [[REG]]
 ; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
+; CHECK-NEXT: %bb.{{[0-9]+}}
 ; CHECK-NEXT: sgr %r2, %r3
 ; CHECK: br %r14
   %res = call i64 @strnlen(i8 *%src, i64 %len)

Modified: llvm/trunk/test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define <2 x i8> @fun0(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i8> %val5, <2 x i8> %val6) {
 ; CHECK-LABEL: fun0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqb [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vceqb [[REG1:%v[0-9]+]], %v28, %v30
 ; CHECK-NEXT:    vn %v0, [[REG0]], [[REG1]]
@@ -22,7 +22,7 @@ define <2 x i8> @fun0(<2 x i8> %val1, <2
 
 define <2 x i16> @fun1(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i16> %val5, <2 x i16> %val6) {
 ; CHECK-LABEL: fun1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqb [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vceqb [[REG1:%v[0-9]+]], %v28, %v30
 ; CHECK-NEXT:    vn %v0, [[REG0]], [[REG1]]
@@ -38,7 +38,7 @@ define <2 x i16> @fun1(<2 x i8> %val1, <
 
 define <16 x i8> @fun2(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i8> %val5, <16 x i8> %val6) {
 ; CHECK-LABEL: fun2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqh [[REG0:%v[0-9]+]], %v30, %v27
 ; CHECK-DAG:     vceqh [[REG1:%v[0-9]+]], %v28, %v25
 ; CHECK-DAG:     vceqb [[REG2:%v[0-9]+]], %v24, %v26
@@ -55,7 +55,7 @@ define <16 x i8> @fun2(<16 x i8> %val1,
 
 define <16 x i16> @fun3(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i16> %val5, <16 x i16> %val6) {
 ; CHECK-LABEL: fun3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqb [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vuphb [[REG2:%v[0-9]+]], [[REG0]]
 ; CHECK-DAG:     vmrlg [[REG1:%v[0-9]+]], [[REG0]], [[REG0]]
@@ -78,7 +78,7 @@ define <16 x i16> @fun3(<16 x i8> %val1,
 
 define <32 x i8> @fun4(<32 x i8> %val1, <32 x i8> %val2, <32 x i8> %val3, <32 x i8> %val4, <32 x i8> %val5, <32 x i8> %val6) {
 ; CHECK-LABEL: fun4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqb [[REG0:%v[0-9]+]], %v24, %v28
 ; CHECK-DAG:     vceqb [[REG1:%v[0-9]+]], %v26, %v30
 ; CHECK-DAG:     vceqb [[REG2:%v[0-9]+]], %v25, %v29
@@ -101,7 +101,7 @@ define <32 x i8> @fun4(<32 x i8> %val1,
 
 define <2 x i8> @fun5(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i8> %val5, <2 x i8> %val6) {
 ; CHECK-LABEL: fun5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqh [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vpkh [[REG1:%v[0-9]+]], [[REG0]], [[REG0]]
 ; CHECK-DAG:     vceqb [[REG2:%v[0-9]+]], %v28, %v30
@@ -117,7 +117,7 @@ define <2 x i8> @fun5(<2 x i16> %val1, <
 
 define <2 x i16> @fun6(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i16> %val5, <2 x i16> %val6) {
 ; CHECK-LABEL: fun6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqb %v1, %v28, %v30
 ; CHECK-NEXT:    vceqh %v0, %v24, %v26
 ; CHECK-NEXT:    vuphb %v1, %v1
@@ -133,7 +133,7 @@ define <2 x i16> @fun6(<2 x i16> %val1,
 
 define <2 x i32> @fun7(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i32> %val5, <2 x i32> %val6) {
 ; CHECK-LABEL: fun7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqb %v1, %v28, %v30
 ; CHECK-NEXT:    vceqh %v0, %v24, %v26
 ; CHECK-NEXT:    vuphb %v1, %v1
@@ -150,7 +150,7 @@ define <2 x i32> @fun7(<2 x i16> %val1,
 
 define <8 x i8> @fun8(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i8> %val5, <8 x i8> %val6) {
 ; CHECK-LABEL: fun8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqh [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vceqh [[REG1:%v[0-9]+]], %v28, %v30
 ; CHECK-NEXT:    vx %v0, [[REG0]], [[REG1]]
@@ -166,7 +166,7 @@ define <8 x i8> @fun8(<8 x i16> %val1, <
 
 define <8 x i16> @fun9(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i16> %val5, <8 x i16> %val6) {
 ; CHECK-LABEL: fun9:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqh [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vceqh [[REG1:%v[0-9]+]], %v28, %v30
 ; CHECK-NEXT:    vx %v0, [[REG0]], [[REG1]]
@@ -181,7 +181,7 @@ define <8 x i16> @fun9(<8 x i16> %val1,
 
 define <8 x i32> @fun10(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i32> %val5, <8 x i32> %val6) {
 ; CHECK-LABEL: fun10:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqh [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vceqh [[REG1:%v[0-9]+]], %v28, %v30
 ; CHECK-NEXT:    vx [[REG2:%v[0-9]+]], [[REG0]], [[REG1]]
@@ -200,7 +200,7 @@ define <8 x i32> @fun10(<8 x i16> %val1,
 
 define <16 x i8> @fun11(<16 x i16> %val1, <16 x i16> %val2, <16 x i32> %val3, <16 x i32> %val4, <16 x i8> %val5, <16 x i8> %val6) {
 ; CHECK-LABEL: fun11:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vl [[REG0:%v[0-9]+]], 192(%r15)
 ; CHECK-DAG:     vl [[REG1:%v[0-9]+]], 208(%r15)
 ; CHECK-DAG:     vl [[REG2:%v[0-9]+]], 160(%r15)
@@ -229,7 +229,7 @@ define <16 x i8> @fun11(<16 x i16> %val1
 
 define <16 x i16> @fun12(<16 x i16> %val1, <16 x i16> %val2, <16 x i32> %val3, <16 x i32> %val4, <16 x i16> %val5, <16 x i16> %val6) {
 ; CHECK-LABEL: fun12:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vl [[REG0:%v[0-9]+]], 192(%r15)
 ; CHECK-DAG:     vl [[REG1:%v[0-9]+]], 208(%r15)
 ; CHECK-DAG:     vl [[REG2:%v[0-9]+]], 160(%r15)
@@ -260,7 +260,7 @@ define <16 x i16> @fun12(<16 x i16> %val
 
 define <2 x i16> @fun13(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i16> %val5, <2 x i16> %val6) {
 ; CHECK-LABEL: fun13:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqg %v1, %v28, %v30
 ; CHECK-NEXT:    vceqf %v0, %v24, %v26
 ; CHECK-NEXT:    vpkg %v1, %v1, %v1
@@ -277,7 +277,7 @@ define <2 x i16> @fun13(<2 x i32> %val1,
 
 define <2 x i32> @fun14(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i32> %val5, <2 x i32> %val6) {
 ; CHECK-LABEL: fun14:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqg %v1, %v28, %v30
 ; CHECK-NEXT:    vceqf %v0, %v24, %v26
 ; CHECK-NEXT:    vpkg %v1, %v1, %v1
@@ -293,7 +293,7 @@ define <2 x i32> @fun14(<2 x i32> %val1,
 
 define <2 x i64> @fun15(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i64> %val5, <2 x i64> %val6) {
 ; CHECK-LABEL: fun15:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqf [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vuphf [[REG1:%v[0-9]+]], [[REG0]]
 ; CHECK-DAG:     vceqg [[REG2:%v[0-9]+]], %v28, %v30
@@ -309,7 +309,7 @@ define <2 x i64> @fun15(<2 x i32> %val1,
 
 define <4 x i16> @fun16(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x i16> %val4, <4 x i16> %val5, <4 x i16> %val6) {
 ; CHECK-LABEL: fun16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqf [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vpkf [[REG1:%v[0-9]+]], [[REG0]], [[REG0]]
 ; CHECK-DAG:     vceqh [[REG2:%v[0-9]+]], %v28, %v30
@@ -325,7 +325,7 @@ define <4 x i16> @fun16(<4 x i32> %val1,
 
 define <4 x i32> @fun17(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x i16> %val4, <4 x i32> %val5, <4 x i32> %val6) {
 ; CHECK-LABEL: fun17:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqh %v1, %v28, %v30
 ; CHECK-NEXT:    vceqf %v0, %v24, %v26
 ; CHECK-NEXT:    vuphh %v1, %v1
@@ -341,7 +341,7 @@ define <4 x i32> @fun17(<4 x i32> %val1,
 
 define <4 x i64> @fun18(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x i16> %val4, <4 x i64> %val5, <4 x i64> %val6) {
 ; CHECK-LABEL: fun18:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqh %v1, %v28, %v30
 ; CHECK-NEXT:    vceqf %v0, %v24, %v26
 ; CHECK-NEXT:    vuphh %v1, %v1
@@ -361,7 +361,7 @@ define <4 x i64> @fun18(<4 x i32> %val1,
 
 define <8 x i16> @fun19(<8 x i32> %val1, <8 x i32> %val2, <8 x i32> %val3, <8 x i32> %val4, <8 x i16> %val5, <8 x i16> %val6) {
 ; CHECK-LABEL: fun19:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqf [[REG0:%v[0-9]+]], %v24, %v28
 ; CHECK-DAG:     vceqf [[REG1:%v[0-9]+]], %v26, %v30
 ; CHECK-DAG:     vceqf [[REG2:%v[0-9]+]], %v25, %v29
@@ -382,7 +382,7 @@ define <8 x i16> @fun19(<8 x i32> %val1,
 
 define <8 x i32> @fun20(<8 x i32> %val1, <8 x i32> %val2, <8 x i32> %val3, <8 x i32> %val4, <8 x i32> %val5, <8 x i32> %val6) {
 ; CHECK-LABEL: fun20:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqf [[REG0:%v[0-9]+]], %v24, %v28
 ; CHECK-DAG:     vceqf [[REG1:%v[0-9]+]], %v26, %v30
 ; CHECK-DAG:     vceqf [[REG2:%v[0-9]+]], %v25, %v29
@@ -405,7 +405,7 @@ define <8 x i32> @fun20(<8 x i32> %val1,
 
 define <2 x i32> @fun21(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i32> %val5, <2 x i32> %val6) {
 ; CHECK-LABEL: fun21:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqg [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vceqg [[REG1:%v[0-9]+]], %v28, %v30
 ; CHECK-NEXT:    vn %v0, [[REG0]], [[REG1]]
@@ -421,7 +421,7 @@ define <2 x i32> @fun21(<2 x i64> %val1,
 
 define <2 x i64> @fun22(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i64> %val5, <2 x i64> %val6) {
 ; CHECK-LABEL: fun22:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqg [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vceqg [[REG1:%v[0-9]+]], %v28, %v30
 ; CHECK-NEXT:    vn %v0, [[REG0]], [[REG1]]
@@ -436,7 +436,7 @@ define <2 x i64> @fun22(<2 x i64> %val1,
 
 define <4 x i32> @fun23(<4 x i64> %val1, <4 x i64> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i32> %val5, <4 x i32> %val6) {
 ; CHECK-LABEL: fun23:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqg %v0, %v26, %v30
 ; CHECK-NEXT:    vceqg %v1, %v24, %v28
 ; CHECK-NEXT:    vpkg %v0, %v1, %v0
@@ -453,7 +453,7 @@ define <4 x i32> @fun23(<4 x i64> %val1,
 
 define <4 x i64> @fun24(<4 x i64> %val1, <4 x i64> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i64> %val5, <4 x i64> %val6) {
 ; CHECK-LABEL: fun24:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqf [[REG0:%v[0-9]+]], %v25, %v27
 ; CHECK-NEXT:    vuphf [[REG1:%v[0-9]+]], [[REG0]]
 ; CHECK-NEXT:    vmrlg [[REG2:%v[0-9]+]], [[REG0]], [[REG0]]
@@ -476,7 +476,7 @@ define <4 x i64> @fun24(<4 x i64> %val1,
 
 define <2 x float> @fun25(<2 x float> %val1, <2 x float> %val2, <2 x double> %val3, <2 x double> %val4, <2 x float> %val5, <2 x float> %val6) {
 ; CHECK-LABEL: fun25:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmrlf %v0, %v26, %v26
 ; CHECK-NEXT:    vmrlf %v1, %v24, %v24
 ; CHECK-NEXT:    vldeb %v0, %v0
@@ -495,7 +495,7 @@ define <2 x float> @fun25(<2 x float> %v
 ; CHECK-NEXT:    br %r14
 ;
 ; CHECK-Z14-LABEL: fun25:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-NEXT:    vfchdb %v1, %v28, %v30
 ; CHECK-Z14-NEXT:    vfchsb %v0, %v24, %v26
 ; CHECK-Z14-NEXT:    vpkg %v1, %v1, %v1
@@ -511,7 +511,7 @@ define <2 x float> @fun25(<2 x float> %v
 
 define <2 x double> @fun26(<2 x float> %val1, <2 x float> %val2, <2 x double> %val3, <2 x double> %val4, <2 x double> %val5, <2 x double> %val6) {
 ; CHECK-LABEL: fun26:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmrlf %v0, %v26, %v26
 ; CHECK-NEXT:    vmrlf %v1, %v24, %v24
 ; CHECK-NEXT:    vldeb %v0, %v0
@@ -530,7 +530,7 @@ define <2 x double> @fun26(<2 x float> %
 ; CHECK-NEXT:    br %r14
 ;
 ; CHECK-Z14-LABEL: fun26:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-NEXT:    vfchsb %v0, %v24, %v26
 ; CHECK-Z14-NEXT:    vuphf %v0, %v0
 ; CHECK-Z14-NEXT:    vfchdb %v1, %v28, %v30
@@ -547,7 +547,7 @@ define <2 x double> @fun26(<2 x float> %
 ; Also check a widening select of a vector of floats
 define <2 x float> @fun27(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x float> %val5, <2 x float> %val6) {
 ; CHECK-LABEL: fun27:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqb [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vceqb [[REG1:%v[0-9]+]], %v28, %v30
 ; CHECK-NEXT:    vo %v0, [[REG0]], [[REG1]]
@@ -564,7 +564,7 @@ define <2 x float> @fun27(<2 x i8> %val1
 
 define <4 x float> @fun28(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x float> %val5, <4 x float> %val6) {
 ; CHECK-LABEL: fun28:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vmrlf [[REG0:%v[0-9]+]], %v26, %v26
 ; CHECK-DAG:     vmrlf [[REG1:%v[0-9]+]], %v24, %v24
 ; CHECK-DAG:     vldeb [[REG2:%v[0-9]+]], [[REG0]]
@@ -592,7 +592,7 @@ define <4 x float> @fun28(<4 x float> %v
 ; CHECK-NEXT:    br %r14
 ;
 ; CHECK-Z14-LABEL: fun28:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-NEXT:    vfchsb %v0, %v24, %v26
 ; CHECK-Z14-NEXT:    vfchsb %v1, %v28, %v30
 ; CHECK-Z14-NEXT:    vx %v0, %v0, %v1
@@ -607,7 +607,7 @@ define <4 x float> @fun28(<4 x float> %v
 
 define <4 x double> @fun29(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x double> %val5, <4 x double> %val6) {
 ; CHECK-LABEL: fun29:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmrlf %v0, %v26, %v26
 ; CHECK-NEXT:    vmrlf %v1, %v24, %v24
 ; CHECK-NEXT:    vldeb %v0, %v0
@@ -639,7 +639,7 @@ define <4 x double> @fun29(<4 x float> %
 ; CHECK-NEXT:    br %r14
 ;
 ; CHECK-Z14-LABEL: fun29:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-NEXT:    vfchsb %v0, %v24, %v26
 ; CHECK-Z14-NEXT:    vfchsb %v1, %v28, %v30
 ; CHECK-Z14-NEXT:    vx %v0, %v0, %v1
@@ -658,7 +658,7 @@ define <4 x double> @fun29(<4 x float> %
 
 define <8 x float> @fun30(<8 x float> %val1, <8 x float> %val2, <8 x double> %val3, <8 x double> %val4, <8 x float> %val5, <8 x float> %val6) {
 ; CHECK-LABEL: fun30:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmrlf %v16, %v28, %v28
 ; CHECK-NEXT:    vmrlf %v17, %v24, %v24
 ; CHECK-NEXT:    vldeb %v16, %v16
@@ -702,7 +702,7 @@ define <8 x float> @fun30(<8 x float> %v
 ; CHECK-NEXT:    br %r14
 ;
 ; CHECK-Z14-LABEL: fun30:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-NEXT:    vl %v4, 192(%r15)
 ; CHECK-Z14-NEXT:    vl %v5, 208(%r15)
 ; CHECK-Z14-NEXT:    vl %v6, 160(%r15)
@@ -733,7 +733,7 @@ define <8 x float> @fun30(<8 x float> %v
 
 define <2 x float> @fun31(<2 x double> %val1, <2 x double> %val2, <2 x double> %val3, <2 x double> %val4, <2 x float> %val5, <2 x float> %val6) {
 ; CHECK-LABEL: fun31:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vfchdb [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vfchdb [[REG1:%v[0-9]+]], %v28, %v30
 ; CHECK-NEXT:    vx %v0, [[REG0]], [[REG1]]
@@ -749,7 +749,7 @@ define <2 x float> @fun31(<2 x double> %
 
 define <2 x double> @fun32(<2 x double> %val1, <2 x double> %val2, <2 x double> %val3, <2 x double> %val4, <2 x double> %val5, <2 x double> %val6) {
 ; CHECK-LABEL: fun32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vfchdb [[REG0:%v[0-9]+]], %v24, %v26
 ; CHECK-DAG:     vfchdb [[REG1:%v[0-9]+]], %v28, %v30
 ; CHECK-NEXT:    vx %v0, [[REG0]], [[REG1]]
@@ -764,7 +764,7 @@ define <2 x double> @fun32(<2 x double>
 
 define <4 x float> @fun33(<4 x double> %val1, <4 x double> %val2, <4 x float> %val3, <4 x float> %val4, <4 x float> %val5, <4 x float> %val6) {
 ; CHECK-LABEL: fun33:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfchdb %v0, %v26, %v30
 ; CHECK-NEXT:    vfchdb %v1, %v24, %v28
 ; CHECK-NEXT:    vpkg %v0, %v1, %v0
@@ -784,7 +784,7 @@ define <4 x float> @fun33(<4 x double> %
 ; CHECK-NEXT:    br %r14
 ;
 ; CHECK-Z14-LABEL: fun33:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-NEXT:    vfchdb %v0, %v26, %v30
 ; CHECK-Z14-NEXT:    vfchdb %v1, %v24, %v28
 ; CHECK-Z14-NEXT:    vpkg %v0, %v1, %v0
@@ -801,7 +801,7 @@ define <4 x float> @fun33(<4 x double> %
 
 define <4 x double> @fun34(<4 x double> %val1, <4 x double> %val2, <4 x float> %val3, <4 x float> %val4, <4 x double> %val5, <4 x double> %val6) {
 ; CHECK-LABEL: fun34:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmrlf [[REG0:%v[0-9]+]], %v27, %v27
 ; CHECK-NEXT:    vmrlf [[REG1:%v[0-9]+]], %v25, %v25
 ; CHECK-NEXT:    vldeb [[REG2:%v[0-9]+]], [[REG0]]
@@ -827,7 +827,7 @@ define <4 x double> @fun34(<4 x double>
 ; CHECK-NEXT:    br %r14
 ;
 ; CHECK-Z14-LABEL: fun34:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-NEXT:    vfchsb %v4, %v25, %v27
 ; CHECK-Z14-NEXT:    vuphf %v5, %v4
 ; CHECK-Z14-NEXT:    vmrlg %v4, %v4, %v4

Modified: llvm/trunk/test/CodeGen/SystemZ/vec-cmpsel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/vec-cmpsel.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/vec-cmpsel.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/vec-cmpsel.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define <2 x i8> @fun0(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4) {
 ; CHECK-LABEL: fun0:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqb %v0, %v24, %v26
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
 ; CHECK-NEXT:    br %r14
@@ -17,7 +17,7 @@ define <2 x i8> @fun0(<2 x i8> %val1, <2
 
 define <2 x i16> @fun1(<2 x i8> %val1, <2 x i8> %val2, <2 x i16> %val3, <2 x i16> %val4) {
 ; CHECK-LABEL: fun1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqb %v0, %v24, %v26
 ; CHECK-NEXT:    vuphb %v0, %v0
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
@@ -29,7 +29,7 @@ define <2 x i16> @fun1(<2 x i8> %val1, <
 
 define <16 x i8> @fun2(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3, <16 x i8> %val4) {
 ; CHECK-LABEL: fun2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqb %v0, %v24, %v26
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
 ; CHECK-NEXT:    br %r14
@@ -40,7 +40,7 @@ define <16 x i8> @fun2(<16 x i8> %val1,
 
 define <16 x i16> @fun3(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4) {
 ; CHECK-LABEL: fun3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqb %v0, %v24, %v26
 ; CHECK-DAG:     vuphb [[REG0:%v[0-9]+]], %v0
 ; CHECK-DAG:     vmrlg [[REG1:%v[0-9]+]], %v0, %v0
@@ -55,7 +55,7 @@ define <16 x i16> @fun3(<16 x i8> %val1,
 
 define <32 x i8> @fun4(<32 x i8> %val1, <32 x i8> %val2, <32 x i8> %val3, <32 x i8> %val4) {
 ; CHECK-LABEL: fun4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqb [[REG0:%v[0-9]+]], %v26, %v30
 ; CHECK-DAG:     vceqb [[REG1:%v[0-9]+]], %v24, %v28
 ; CHECK-DAG:     vsel %v24, %v25, %v29, [[REG1]]
@@ -68,7 +68,7 @@ define <32 x i8> @fun4(<32 x i8> %val1,
 
 define <2 x i8> @fun5(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8> %val4) {
 ; CHECK-LABEL: fun5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqh %v0, %v24, %v26
 ; CHECK-NEXT:    vpkh %v0, %v0, %v0
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
@@ -80,7 +80,7 @@ define <2 x i8> @fun5(<2 x i16> %val1, <
 
 define <2 x i16> @fun6(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4) {
 ; CHECK-LABEL: fun6:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqh %v0, %v24, %v26
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
 ; CHECK-NEXT:    br %r14
@@ -91,7 +91,7 @@ define <2 x i16> @fun6(<2 x i16> %val1,
 
 define <2 x i32> @fun7(<2 x i16> %val1, <2 x i16> %val2, <2 x i32> %val3, <2 x i32> %val4) {
 ; CHECK-LABEL: fun7:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqh %v0, %v24, %v26
 ; CHECK-NEXT:    vuphh %v0, %v0
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
@@ -103,7 +103,7 @@ define <2 x i32> @fun7(<2 x i16> %val1,
 
 define <8 x i8> @fun8(<8 x i16> %val1, <8 x i16> %val2, <8 x i8> %val3, <8 x i8> %val4) {
 ; CHECK-LABEL: fun8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqh %v0, %v24, %v26
 ; CHECK-NEXT:    vpkh %v0, %v0, %v0
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
@@ -115,7 +115,7 @@ define <8 x i8> @fun8(<8 x i16> %val1, <
 
 define <8 x i16> @fun9(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4) {
 ; CHECK-LABEL: fun9:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqh %v0, %v24, %v26
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
 ; CHECK-NEXT:    br %r14
@@ -126,7 +126,7 @@ define <8 x i16> @fun9(<8 x i16> %val1,
 
 define <8 x i32> @fun10(<8 x i16> %val1, <8 x i16> %val2, <8 x i32> %val3, <8 x i32> %val4) {
 ; CHECK-LABEL: fun10:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqh %v0, %v24, %v26
 ; CHECK-DAG:     vuphh [[REG0:%v[0-9]+]], %v0
 ; CHECK-DAG:     vmrlg [[REG1:%v[0-9]+]], %v0, %v0
@@ -141,7 +141,7 @@ define <8 x i32> @fun10(<8 x i16> %val1,
 
 define <16 x i8> @fun11(<16 x i16> %val1, <16 x i16> %val2, <16 x i8> %val3, <16 x i8> %val4) {
 ; CHECK-LABEL: fun11:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqh %v0, %v26, %v30
 ; CHECK-NEXT:    vceqh %v1, %v24, %v28
 ; CHECK-NEXT:    vpkh %v0, %v1, %v0
@@ -154,7 +154,7 @@ define <16 x i8> @fun11(<16 x i16> %val1
 
 define <16 x i16> @fun12(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4) {
 ; CHECK-LABEL: fun12:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqh [[REG0:%v[0-9]+]], %v26, %v30
 ; CHECK-DAG:     vceqh [[REG1:%v[0-9]+]], %v24, %v28
 ; CHECK-DAG:     vsel %v24, %v25, %v29, [[REG1]]
@@ -167,7 +167,7 @@ define <16 x i16> @fun12(<16 x i16> %val
 
 define <2 x i16> @fun13(<2 x i32> %val1, <2 x i32> %val2, <2 x i16> %val3, <2 x i16> %val4) {
 ; CHECK-LABEL: fun13:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqf %v0, %v24, %v26
 ; CHECK-NEXT:    vpkf %v0, %v0, %v0
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
@@ -179,7 +179,7 @@ define <2 x i16> @fun13(<2 x i32> %val1,
 
 define <2 x i32> @fun14(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4) {
 ; CHECK-LABEL: fun14:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqf %v0, %v24, %v26
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
 ; CHECK-NEXT:    br %r14
@@ -190,7 +190,7 @@ define <2 x i32> @fun14(<2 x i32> %val1,
 
 define <2 x i64> @fun15(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4) {
 ; CHECK-LABEL: fun15:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqf %v0, %v24, %v26
 ; CHECK-NEXT:    vuphf %v0, %v0
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
@@ -202,7 +202,7 @@ define <2 x i64> @fun15(<2 x i32> %val1,
 
 define <4 x i16> @fun16(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x i16> %val4) {
 ; CHECK-LABEL: fun16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqf %v0, %v24, %v26
 ; CHECK-NEXT:    vpkf %v0, %v0, %v0
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
@@ -214,7 +214,7 @@ define <4 x i16> @fun16(<4 x i32> %val1,
 
 define <4 x i32> @fun17(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4) {
 ; CHECK-LABEL: fun17:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqf %v0, %v24, %v26
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
 ; CHECK-NEXT:    br %r14
@@ -225,7 +225,7 @@ define <4 x i32> @fun17(<4 x i32> %val1,
 
 define <4 x i64> @fun18(<4 x i32> %val1, <4 x i32> %val2, <4 x i64> %val3, <4 x i64> %val4) {
 ; CHECK-LABEL: fun18:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqf %v0, %v24, %v26
 ; CHECK-DAG:     vuphf [[REG0:%v[0-9]+]], %v0
 ; CHECK-DAG:     vmrlg [[REG1:%v[0-9]+]], %v0, %v0
@@ -240,7 +240,7 @@ define <4 x i64> @fun18(<4 x i32> %val1,
 
 define <8 x i16> @fun19(<8 x i32> %val1, <8 x i32> %val2, <8 x i16> %val3, <8 x i16> %val4) {
 ; CHECK-LABEL: fun19:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqf %v0, %v26, %v30
 ; CHECK-NEXT:    vceqf %v1, %v24, %v28
 ; CHECK-NEXT:    vpkf %v0, %v1, %v0
@@ -253,7 +253,7 @@ define <8 x i16> @fun19(<8 x i32> %val1,
 
 define <8 x i32> @fun20(<8 x i32> %val1, <8 x i32> %val2, <8 x i32> %val3, <8 x i32> %val4) {
 ; CHECK-LABEL: fun20:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqf [[REG0:%v[0-9]+]], %v26, %v30
 ; CHECK-DAG:     vceqf [[REG1:%v[0-9]+]], %v24, %v28
 ; CHECK-DAG:     vsel %v24, %v25, %v29, [[REG1]]
@@ -266,7 +266,7 @@ define <8 x i32> @fun20(<8 x i32> %val1,
 
 define <2 x i32> @fun21(<2 x i64> %val1, <2 x i64> %val2, <2 x i32> %val3, <2 x i32> %val4) {
 ; CHECK-LABEL: fun21:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqg %v0, %v24, %v26
 ; CHECK-NEXT:    vpkg %v0, %v0, %v0
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
@@ -278,7 +278,7 @@ define <2 x i32> @fun21(<2 x i64> %val1,
 
 define <2 x i64> @fun22(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4) {
 ; CHECK-LABEL: fun22:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqg %v0, %v24, %v26
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
 ; CHECK-NEXT:    br %r14
@@ -289,7 +289,7 @@ define <2 x i64> @fun22(<2 x i64> %val1,
 
 define <4 x i32> @fun23(<4 x i64> %val1, <4 x i64> %val2, <4 x i32> %val3, <4 x i32> %val4) {
 ; CHECK-LABEL: fun23:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqg %v0, %v26, %v30
 ; CHECK-NEXT:    vceqg %v1, %v24, %v28
 ; CHECK-NEXT:    vpkg %v0, %v1, %v0
@@ -302,7 +302,7 @@ define <4 x i32> @fun23(<4 x i64> %val1,
 
 define <4 x i64> @fun24(<4 x i64> %val1, <4 x i64> %val2, <4 x i64> %val3, <4 x i64> %val4) {
 ; CHECK-LABEL: fun24:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vceqg [[REG0:%v[0-9]+]], %v26, %v30
 ; CHECK-DAG:     vceqg [[REG1:%v[0-9]+]], %v24, %v28
 ; CHECK-DAG:     vsel %v24, %v25, %v29, [[REG1]]
@@ -315,7 +315,7 @@ define <4 x i64> @fun24(<4 x i64> %val1,
 
 define <2 x float> @fun25(<2 x float> %val1, <2 x float> %val2, <2 x float> %val3, <2 x float> %val4) {
 ; CHECK-LABEL: fun25:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmrlf %v0, %v26, %v26
 ; CHECK-NEXT:    vmrlf %v1, %v24, %v24
 ; CHECK-NEXT:    vldeb %v0, %v0
@@ -331,7 +331,7 @@ define <2 x float> @fun25(<2 x float> %v
 ; CHECK-NEXT:    br %r14
 
 ; CHECK-Z14-LABEL: fun25:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-NEXT:    vfchsb  %v0, %v24, %v26
 ; CHECK-Z14-NEXT:    vsel    %v24, %v28, %v30, %v0
 ; CHECK-Z14-NEXT:    br %r14
@@ -343,7 +343,7 @@ define <2 x float> @fun25(<2 x float> %v
 
 define <2 x double> @fun26(<2 x float> %val1, <2 x float> %val2, <2 x double> %val3, <2 x double> %val4) {
 ; CHECK-LABEL: fun26:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmrlf %v0, %v26, %v26
 ; CHECK-NEXT:    vmrlf %v1, %v24, %v24
 ; CHECK-NEXT:    vldeb %v0, %v0
@@ -360,7 +360,7 @@ define <2 x double> @fun26(<2 x float> %
 ; CHECK-NEXT:    br %r14
 
 ; CHECK-Z14-LABEL: fun26:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-NEXT:    vfchsb  %v0, %v24, %v26
 ; CHECK-Z14-NEXT:    vuphf   %v0, %v0
 ; CHECK-Z14-NEXT:    vsel    %v24, %v28, %v30, %v0
@@ -374,7 +374,7 @@ define <2 x double> @fun26(<2 x float> %
 ; Test a widening select of floats.
 define <2 x float> @fun27(<2 x i8> %val1, <2 x i8> %val2, <2 x float> %val3, <2 x float> %val4) {
 ; CHECK-LABEL: fun27:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vceqb %v0, %v24, %v26
 ; CHECK-NEXT:    vuphb %v0, %v0
 ; CHECK-NEXT:    vuphh %v0, %v0
@@ -388,7 +388,7 @@ define <2 x float> @fun27(<2 x i8> %val1
 
 define <4 x float> @fun28(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4) {
 ; CHECK-LABEL: fun28:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmrlf %v0, %v26, %v26
 ; CHECK-NEXT:    vmrlf %v1, %v24, %v24
 ; CHECK-NEXT:    vldeb %v0, %v0
@@ -404,7 +404,7 @@ define <4 x float> @fun28(<4 x float> %v
 ; CHECK-NEXT:    br %r14
 
 ; CHECK-Z14-LABEL: fun28:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-NEXT:    vfchsb  %v0, %v24, %v26
 ; CHECK-Z14-NEXT:    vsel    %v24, %v28, %v30, %v0
 ; CHECK-Z14-NEXT:    br %r14
@@ -416,7 +416,7 @@ define <4 x float> @fun28(<4 x float> %v
 
 define <4 x double> @fun29(<4 x float> %val1, <4 x float> %val2, <4 x double> %val3, <4 x double> %val4) {
 ; CHECK-LABEL: fun29:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmrlf %v0, %v26, %v26
 ; CHECK-NEXT:    vmrlf %v1, %v24, %v24
 ; CHECK-NEXT:    vldeb %v0, %v0
@@ -436,7 +436,7 @@ define <4 x double> @fun29(<4 x float> %
 ; CHECK-NEXT:    br %r14
 
 ; CHECK-Z14-LABEL: fun29:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-NEXT:    vfchsb  %v0, %v24, %v26
 ; CHECK-Z14-DAG:     vuphf   [[REG0:%v[0-9]+]], %v0
 ; CHECK-Z14-DAG:     vmrlg   [[REG1:%v[0-9]+]], %v0, %v0
@@ -452,7 +452,7 @@ define <4 x double> @fun29(<4 x float> %
 
 define <8 x float> @fun30(<8 x float> %val1, <8 x float> %val2, <8 x float> %val3, <8 x float> %val4) {
 ; CHECK-Z14-LABEL: fun30:
-; CHECK-Z14:       # BB#0:
+; CHECK-Z14:       # %bb.0:
 ; CHECK-Z14-DAG:     vfchsb  [[REG0:%v[0-9]+]], %v26, %v30
 ; CHECK-Z14-DAG:     vfchsb  [[REG1:%v[0-9]+]], %v24, %v28
 ; CHECK-Z14-DAG:     vsel    %v24, %v25, %v29, [[REG1]]
@@ -465,7 +465,7 @@ define <8 x float> @fun30(<8 x float> %v
 
 define <2 x float> @fun31(<2 x double> %val1, <2 x double> %val2, <2 x float> %val3, <2 x float> %val4) {
 ; CHECK-LABEL: fun31:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfchdb %v0, %v24, %v26
 ; CHECK-NEXT:    vpkg %v0, %v0, %v0
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
@@ -478,7 +478,7 @@ define <2 x float> @fun31(<2 x double> %
 
 define <2 x double> @fun32(<2 x double> %val1, <2 x double> %val2, <2 x double> %val3, <2 x double> %val4) {
 ; CHECK-LABEL: fun32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfchdb %v0, %v24, %v26
 ; CHECK-NEXT:    vsel %v24, %v28, %v30, %v0
 ; CHECK-NEXT:    br %r14
@@ -489,7 +489,7 @@ define <2 x double> @fun32(<2 x double>
 
 define <4 x float> @fun33(<4 x double> %val1, <4 x double> %val2, <4 x float> %val3, <4 x float> %val4) {
 ; CHECK-LABEL: fun33:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vfchdb %v0, %v26, %v30
 ; CHECK-NEXT:    vfchdb %v1, %v24, %v28
 ; CHECK-NEXT:    vpkg %v0, %v1, %v0
@@ -502,7 +502,7 @@ define <4 x float> @fun33(<4 x double> %
 
 define <4 x double> @fun34(<4 x double> %val1, <4 x double> %val2, <4 x double> %val3, <4 x double> %val4) {
 ; CHECK-LABEL: fun34:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-DAG:     vfchdb [[REG0:%v[0-9]+]], %v26, %v30
 ; CHECK-DAG:     vfchdb [[REG1:%v[0-9]+]], %v24, %v28
 ; CHECK-DAG:     vsel %v24, %v25, %v29, [[REG1]]

Modified: llvm/trunk/test/CodeGen/SystemZ/vec-trunc-to-i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/vec-trunc-to-i1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/vec-trunc-to-i1.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/vec-trunc-to-i1.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define void @pr32275(<4 x i8> %B15) {
 ; CHECK-LABEL: pr32275:
-; CHECK:       # BB#0: # %BB
+; CHECK:       # %bb.0: # %BB
 ; CHECK-NEXT:    vrepif %v0, 1
 ; CHECK-NEXT:  .LBB0_1: # %CF34
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -22,7 +22,7 @@ define void @pr32275(<4 x i8> %B15) {
 ; CHECK-NEXT:    vlgvf %r0, %v1, 3
 ; CHECK-NEXT:    tmll %r0, 1
 ; CHECK-NEXT:    jne .LBB0_1
-; CHECK-NEXT:  # BB#2: # %CF36
+; CHECK-NEXT:  # %bb.2: # %CF36
 ; CHECK-NEXT:    br %r14
 BB:
   br label %CF34

Modified: llvm/trunk/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll (original)
+++ llvm/trunk/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll Mon Dec  4 09:18:51 2017
@@ -13,7 +13,7 @@ declare void @_ZNSsC1EPKcRKSaIcE() unnam
 ; It isn't valid to If-Convert the following function, even though the calls
 ; are in common. The calls clobber the predicate info.
 ; CHECK: cbnz r{{[0-9]+}}, .LBB0_2
-; CHECK: BB#1
+; CHECK: %bb.1
 ; CHECK: .LBB0_2
 ; Function Attrs: nounwind
 define hidden void @_ZN4llvm14DOTGraphTraitsIPNS_13ScheduleDAGMIEE17getEdgeAttributesEPKNS_5SUnitENS_13SUnitIteratorEPKNS_11ScheduleDAGE() #0 align 2 {

Modified: llvm/trunk/test/CodeGen/WebAssembly/dbgvalue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/WebAssembly/dbgvalue.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/WebAssembly/dbgvalue.ll (original)
+++ llvm/trunk/test/CodeGen/WebAssembly/dbgvalue.ll Mon Dec  4 09:18:51 2017
@@ -1,8 +1,8 @@
 ; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=wasm32-unknown-unknown-wasm | FileCheck %s
 
-; CHECK: BB#0
+; CHECK: %bb.0
 ; CHECK: #DEBUG_VALUE: usage:self <- %4
-; CHECK: BB#1
+; CHECK: %bb.1
 ; CHECK: DW_TAG_variable
 source_filename = "test/CodeGen/WebAssembly/dbgvalue.ll"
 target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"

Modified: llvm/trunk/test/CodeGen/WebAssembly/signext-arg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/WebAssembly/signext-arg.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/WebAssembly/signext-arg.ll (original)
+++ llvm/trunk/test/CodeGen/WebAssembly/signext-arg.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@ declare i32 @get_int(i16 %arg)
 
 define i32 @func_1(i16 %arg1 , i32 %arg2) #0 {
 ; CHECK-LABEL: func_1:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    i32.const $push1=, 16
 ; CHECK-NEXT:    i32.shl $push2=, $0, $pop1
 ; CHECK-NEXT:    i32.const $push4=, 16

Modified: llvm/trunk/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll Mon Dec  4 09:18:51 2017
@@ -10,7 +10,7 @@ target triple = "i686-unknown-unknown"
 
 define i32 @test5(i32 %B, i8 %C) {
 ; CHECK-LABEL: test5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    movl A, %eax

Modified: llvm/trunk/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @f(i32 %a, i32 %b) {
 ; CHECK-LABEL: f:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl %ecx, %edx

Modified: llvm/trunk/test/CodeGen/X86/2008-02-14-BitMiscompile.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-02-14-BitMiscompile.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2008-02-14-BitMiscompile.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2008-02-14-BitMiscompile.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @test(i1 %A) {
 ; CHECK-LABEL: test:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    andl $1, %eax
 ; CHECK-NEXT:    negl %eax

Modified: llvm/trunk/test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll Mon Dec  4 09:18:51 2017
@@ -11,7 +11,7 @@ declare %0 @llvm.sadd.with.overflow.i32(
 define fastcc i32 @test() nounwind {
 entry:
 ; CHECK-LABEL: test:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    addl $0, %eax
 ; CHECK-NEXT:    seto %cl

Modified: llvm/trunk/test/CodeGen/X86/2010-05-12-FastAllocKills.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-05-12-FastAllocKills.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-05-12-FastAllocKills.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-05-12-FastAllocKills.ll Mon Dec  4 09:18:51 2017
@@ -3,18 +3,18 @@ target datalayout = "e-p:64:64:64-i1:8:8
 target triple = "x86_64-apple-darwin"
 
 ; This test causes a virtual FP register to be redefined while it is live:
-;BB#5: derived from LLVM BB %bb10
-;    Predecessors according to CFG: BB#4 BB#5
+;%bb.5: derived from LLVM BB %bb10
+;    Predecessors according to CFG: %bb.4 %bb.5
 ;	%reg1024<def> = MOV_Fp8080 %reg1034
 ;	%reg1025<def> = MUL_Fp80m32 %reg1024, %rip, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
 ;	%reg1034<def> = MOV_Fp8080 %reg1025
 ;	FP_REG_KILL %fp0<imp-def>, %fp1<imp-def>, %fp2<imp-def>, %fp3<imp-def>, %fp4<imp-def>, %fp5<imp-def>, %fp6<imp-def>
-;	JMP_4 <BB#5>
-;    Successors according to CFG: BB#5
+;	JMP_4 <%bb.5>
+;    Successors according to CFG: %bb.5
 ;
 ; The X86FP pass needs good kill flags, like on %fp0 representing %reg1034:
-;BB#5: derived from LLVM BB %bb10
-;    Predecessors according to CFG: BB#4 BB#5
+;%bb.5: derived from LLVM BB %bb10
+;    Predecessors according to CFG: %bb.4 %bb.5
 ;	%fp0<def> = LD_Fp80m <fi#3>, 1, %reg0, 0, %reg0; mem:LD10[FixedStack3](align=4)
 ;	%fp1<def> = MOV_Fp8080 %fp0<kill>
 ;	%fp2<def> = MUL_Fp80m32 %fp1, %rip, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
@@ -23,8 +23,8 @@ target triple = "x86_64-apple-darwin"
 ;	ST_FpP80m <fi#4>, 1, %reg0, 0, %reg0, %fp1<kill>; mem:ST10[FixedStack4](align=4)
 ;	ST_FpP80m <fi#5>, 1, %reg0, 0, %reg0, %fp2<kill>; mem:ST10[FixedStack5](align=4)
 ;	FP_REG_KILL %fp0<imp-def>, %fp1<imp-def>, %fp2<imp-def>, %fp3<imp-def>, %fp4<imp-def>, %fp5<imp-def>, %fp6<imp-def>
-;	JMP_4 <BB#5>
-;    Successors according to CFG: BB#5
+;	JMP_4 <%bb.5>
+;    Successors according to CFG: %bb.5
 
 define fastcc i32 @sqlite3AtoF(i8* %z, double* nocapture %pResult) nounwind ssp {
 entry:

Modified: llvm/trunk/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll Mon Dec  4 09:18:51 2017
@@ -8,14 +8,14 @@
 
 define i32 @main() nounwind {
 ; CHECK-LABEL: main:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpq {{.*}}(%rip), %rax
 ; CHECK-NEXT:    sbbl %eax, %eax
 ; CHECK-NEXT:    andl $150, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jle .LBB0_1
-; CHECK-NEXT:  # BB#2: # %if.then
+; CHECK-NEXT:  # %bb.2: # %if.then
 ; CHECK-NEXT:    movl $1, {{.*}}(%rip)
 ; CHECK-NEXT:    movl $1, %esi
 ; CHECK-NEXT:    jmp .LBB0_3

Modified: llvm/trunk/test/CodeGen/X86/2011-10-19-widen_vselect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-10-19-widen_vselect.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-10-19-widen_vselect.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-10-19-widen_vselect.ll Mon Dec  4 09:18:51 2017
@@ -7,13 +7,13 @@
 
 define void @simple_widen(<2 x float> %a, <2 x float> %b) {
 ; X32-LABEL: simple_widen:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    extractps $1, %xmm1, (%eax)
 ; X32-NEXT:    movss %xmm1, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: simple_widen:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movlps %xmm1, (%rax)
 ; X64-NEXT:    retq
 entry:
@@ -24,7 +24,7 @@ entry:
 
 define void @complex_inreg_work(<2 x float> %a, <2 x float> %b) {
 ; X32-LABEL: complex_inreg_work:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movaps %xmm0, %xmm2
 ; X32-NEXT:    cmpordps %xmm0, %xmm0
 ; X32-NEXT:    blendvps %xmm0, %xmm2, %xmm1
@@ -33,7 +33,7 @@ define void @complex_inreg_work(<2 x flo
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: complex_inreg_work:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movaps %xmm0, %xmm2
 ; X64-NEXT:    cmpordps %xmm0, %xmm0
 ; X64-NEXT:    blendvps %xmm0, %xmm2, %xmm1
@@ -48,14 +48,14 @@ entry:
 
 define void @zero_test() {
 ; X32-LABEL: zero_test:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    xorps %xmm0, %xmm0
 ; X32-NEXT:    extractps $1, %xmm0, (%eax)
 ; X32-NEXT:    movss %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: zero_test:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    xorps %xmm0, %xmm0
 ; X64-NEXT:    movlps %xmm0, (%rax)
 ; X64-NEXT:    retq
@@ -67,7 +67,7 @@ entry:
 
 define void @full_test() {
 ; X32-LABEL: full_test:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    subl $60, %esp
 ; X32-NEXT:    .cfi_def_cfa_offset 64
 ; X32-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
@@ -91,7 +91,7 @@ define void @full_test() {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: full_test:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
 ; X64-NEXT:    cvttps2dq %xmm2, %xmm0
 ; X64-NEXT:    cvtdq2ps %xmm0, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/2011-10-21-widen-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-10-21-widen-cmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-10-21-widen-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-10-21-widen-cmp.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @cmp_2_floats(<2 x float> %a, <2 x float> %b) {
 ; CHECK-LABEL: cmp_2_floats:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movaps %xmm0, %xmm2
 ; CHECK-NEXT:    cmpordps %xmm0, %xmm0
 ; CHECK-NEXT:    blendvps %xmm0, %xmm2, %xmm1
@@ -21,7 +21,7 @@ entry:
 
 define void @cmp_2_doubles(<2 x double> %a, <2 x double> %b) {
 ; CHECK-LABEL: cmp_2_doubles:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movapd %xmm0, %xmm2
 ; CHECK-NEXT:    cmpordpd %xmm0, %xmm0
 ; CHECK-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
@@ -36,7 +36,7 @@ entry:
 
 define void @mp_11193(<8 x float> * nocapture %aFOO, <8 x float>* nocapture %RET) nounwind {
 ; CHECK-LABEL: mp_11193:
-; CHECK:       # BB#0: # %allocas
+; CHECK:       # %bb.0: # %allocas
 ; CHECK-NEXT:    movl $-1082130432, (%rsi) # imm = 0xBF800000
 ; CHECK-NEXT:    retq
 allocas:

Modified: llvm/trunk/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll Mon Dec  4 09:18:51 2017
@@ -9,12 +9,12 @@
 
 define <4 x i32> @test(<4 x i32>* %p) {
 ; CHECK-LABEL: test:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movaps (%rdi), %xmm0
 ; CHECK-NEXT:    extractps $2, %xmm0, %eax
 ; CHECK-NEXT:    cmpl $3, %eax
 ; CHECK-NEXT:    je .LBB0_2
-; CHECK-NEXT:  # BB#1:
+; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:  .LBB0_2:
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/2011-12-8-bitcastintprom.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-12-8-bitcastintprom.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-12-8-bitcastintprom.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-12-8-bitcastintprom.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 ; Make sure that the conversion between v4i8 to v2i16 is not a simple bitcast.
 define void @prom_bug(<4 x i8> %t, i16* %p) {
 ; SSE2-LABEL: prom_bug:
-; SSE2:       ## BB#0:
+; SSE2:       ## %bb.0:
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    packuswb %xmm0, %xmm0
 ; SSE2-NEXT:    packuswb %xmm0, %xmm0
@@ -16,7 +16,7 @@ define void @prom_bug(<4 x i8> %t, i16*
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: prom_bug:
-; SSE41:       ## BB#0:
+; SSE41:       ## %bb.0:
 ; SSE41-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
 ; SSE41-NEXT:    pextrw $0, %xmm0, (%rdi)
 ; SSE41-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/2011-20-21-zext-ui2fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-20-21-zext-ui2fp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-20-21-zext-ui2fp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-20-21-zext-ui2fp.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @ui_to_fp_conv(<8 x float> * nocapture %aFOO, <8 x float>* nocapture %RET) nounwind {
 ; CHECK-LABEL: ui_to_fp_conv:
-; CHECK:       # BB#0: # %allocas
+; CHECK:       # %bb.0: # %allocas
 ; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [1.000000e+00,1.000000e+00,0.000000e+00,0.000000e+00]
 ; CHECK-NEXT:    xorps %xmm1, %xmm1
 ; CHECK-NEXT:    movups %xmm1, 16(%rsi)

Modified: llvm/trunk/test/CodeGen/X86/2012-01-11-split-cv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-01-11-split-cv.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-01-11-split-cv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-01-11-split-cv.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @add18i16(<18 x i16>* nocapture sret %ret, <18 x i16>* %bp) nounwind {
 ; CHECK-LABEL: add18i16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    vmovups (%ecx), %ymm0

Modified: llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll Mon Dec  4 09:18:51 2017
@@ -2,7 +2,7 @@
 
 define void @endless_loop() {
 ; CHECK-LABEL: endless_loop:
-; CHECK-NEXT:  # BB#0:
+; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    vmovaps (%eax), %ymm0
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]

Modified: llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define void @func() nounwind ssp {
 ; CHECK-LABEL: func:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovups 0, %xmm0
 ; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]

Modified: llvm/trunk/test/CodeGen/X86/2012-07-10-extload64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-07-10-extload64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-07-10-extload64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-07-10-extload64.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @load_store(<4 x i16>* %in) {
 ; CHECK-LABEL: load_store:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; CHECK-NEXT:    paddw %xmm0, %xmm0
@@ -20,7 +20,7 @@ entry:
 ; Make sure that we store a 64bit value, even on 32bit systems.
 define void @store_64(<2 x i32>* %ptr) {
 ; CHECK-LABEL: store_64:
-; CHECK:       # BB#0: # %BB
+; CHECK:       # %bb.0: # %BB
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:    movlps %xmm0, (%eax)
@@ -32,7 +32,7 @@ BB:
 
 define <2 x i32> @load_64(<2 x i32>* %ptr) {
 ; CHECK-LABEL: load_64:
-; CHECK:       # BB#0: # %BB
+; CHECK:       # %bb.0: # %BB
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
 ; CHECK-NEXT:    retl

Modified: llvm/trunk/test/CodeGen/X86/2012-08-16-setcc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-08-16-setcc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-08-16-setcc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-08-16-setcc.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define i32 @and_1(i8 zeroext %a, i8 zeroext %b, i32 %x) {
 ; CHECK-LABEL: and_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    andb %dil, %sil
 ; CHECK-NEXT:    cmovnel %edx, %eax
@@ -18,7 +18,7 @@ define i32 @and_1(i8 zeroext %a, i8 zero
 
 define zeroext i1 @and_2(i8 zeroext %a, i8 zeroext %b) {
 ; CHECK-LABEL: and_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andb %dil, %sil
 ; CHECK-NEXT:    setne %al
 ; CHECK-NEXT:    retq
@@ -29,7 +29,7 @@ define zeroext i1 @and_2(i8 zeroext %a,
 
 define i32 @xor_1(i8 zeroext %a, i8 zeroext %b, i32 %x) {
 ; CHECK-LABEL: xor_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    xorb %dil, %sil
 ; CHECK-NEXT:    cmovnel %edx, %eax
@@ -42,7 +42,7 @@ define i32 @xor_1(i8 zeroext %a, i8 zero
 
 define zeroext i1 @xor_2(i8 zeroext %a, i8 zeroext %b) {
 ; CHECK-LABEL: xor_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorb %dil, %sil
 ; CHECK-NEXT:    setne %al
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/2012-1-10-buildvector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-1-10-buildvector.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-1-10-buildvector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-1-10-buildvector.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @bad_cast() {
 ; CHECK-LABEL: bad_cast:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovaps %xmm0, (%eax)
 ; CHECK-NEXT:    movl $0, (%eax)
@@ -16,7 +16,7 @@ define void @bad_cast() {
 
 define void @bad_insert(i32 %t) {
 ; CHECK-LABEL: bad_insert:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vmovaps %ymm0, (%eax)
 ; CHECK-NEXT:    vzeroupper

Modified: llvm/trunk/test/CodeGen/X86/2012-12-1-merge-multiple.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-12-1-merge-multiple.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-12-1-merge-multiple.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-12-1-merge-multiple.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @multiple_stores_on_chain(i16 * %A) {
 ; CHECK-LABEL: multiple_stores_on_chain:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movabsq $844433520132096, %rax # imm = 0x3000200010000
 ; CHECK-NEXT:    movq %rax, (%rdi)
 ; CHECK-NEXT:    movabsq $1970350607106052, %rax # imm = 0x7000600050004

Modified: llvm/trunk/test/CodeGen/X86/3dnow-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/3dnow-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/3dnow-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/3dnow-schedule.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @test_femms() optsize {
 ; CHECK-LABEL: test_femms:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    femms
 ; CHECK-NEXT:    retq # sched: [1:1.00]
   call void @llvm.x86.mmx.femms()
@@ -13,7 +13,7 @@ declare void @llvm.x86.mmx.femms() nounw
 
 define i64 @test_pavgusb(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pavgusb:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pavgusb %mm1, %mm0 # sched: [5:1.00]
 ; CHECK-NEXT:    pavgusb (%rdi), %mm0 # sched: [9:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -28,7 +28,7 @@ declare x86_mmx @llvm.x86.3dnow.pavgusb(
 
 define i64 @test_pf2id(x86_mmx* %a0) optsize {
 ; CHECK-LABEL: test_pf2id:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pf2id (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    pf2id %mm0, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -43,7 +43,7 @@ declare x86_mmx @llvm.x86.3dnow.pf2id(x8
 
 define i64 @test_pf2iw(x86_mmx* %a0) optsize {
 ; CHECK-LABEL: test_pf2iw:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pf2iw (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    pf2iw %mm0, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -58,7 +58,7 @@ declare x86_mmx @llvm.x86.3dnowa.pf2iw(x
 
 define i64 @test_pfacc(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfacc:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfacc %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfacc (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -73,7 +73,7 @@ declare x86_mmx @llvm.x86.3dnow.pfacc(x8
 
 define i64 @test_pfadd(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfadd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfadd %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfadd (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -88,7 +88,7 @@ declare x86_mmx @llvm.x86.3dnow.pfadd(x8
 
 define i64 @test_pfcmpeq(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfcmpeq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfcmpeq %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfcmpeq (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -103,7 +103,7 @@ declare x86_mmx @llvm.x86.3dnow.pfcmpeq(
 
 define i64 @test_pfcmpge(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfcmpge:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfcmpge %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfcmpge (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -118,7 +118,7 @@ declare x86_mmx @llvm.x86.3dnow.pfcmpge(
 
 define i64 @test_pfcmpgt(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfcmpgt:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfcmpgt %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfcmpgt (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -133,7 +133,7 @@ declare x86_mmx @llvm.x86.3dnow.pfcmpgt(
 
 define i64 @test_pfmax(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfmax:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfmax %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfmax (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -148,7 +148,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmax(x8
 
 define i64 @test_pfmin(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfmin:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfmin %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfmin (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -163,7 +163,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmin(x8
 
 define i64 @test_pfmul(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfmul:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfmul %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfmul (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -178,7 +178,7 @@ declare x86_mmx @llvm.x86.3dnow.pfmul(x8
 
 define i64 @test_pfnacc(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfnacc:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfnacc %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfnacc (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -193,7 +193,7 @@ declare x86_mmx @llvm.x86.3dnowa.pfnacc(
 
 define i64 @test_pfpnacc(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfpnacc:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfpnacc %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfpnacc (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -208,7 +208,7 @@ declare x86_mmx @llvm.x86.3dnowa.pfpnacc
 
 define i64 @test_pfrcp(x86_mmx* %a0) optsize {
 ; CHECK-LABEL: test_pfrcp:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfrcp (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    pfrcp %mm0, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -223,7 +223,7 @@ declare x86_mmx @llvm.x86.3dnow.pfrcp(x8
 
 define i64 @test_pfrcpit1(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfrcpit1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfrcpit1 %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfrcpit1 (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -238,7 +238,7 @@ declare x86_mmx @llvm.x86.3dnow.pfrcpit1
 
 define i64 @test_pfrcpit2(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfrcpit2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfrcpit2 %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfrcpit2 (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -253,7 +253,7 @@ declare x86_mmx @llvm.x86.3dnow.pfrcpit2
 
 define i64 @test_pfrsqit1(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfrsqit1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfrsqit1 %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfrsqit1 (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -268,7 +268,7 @@ declare x86_mmx @llvm.x86.3dnow.pfrsqit1
 
 define i64 @test_pfrsqrt(x86_mmx* %a0) optsize {
 ; CHECK-LABEL: test_pfrsqrt:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfrsqrt (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    pfrsqrt %mm0, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -283,7 +283,7 @@ declare x86_mmx @llvm.x86.3dnow.pfrsqrt(
 
 define i64 @test_pfsub(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfsub:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfsub %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfsub (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -298,7 +298,7 @@ declare x86_mmx @llvm.x86.3dnow.pfsub(x8
 
 define i64 @test_pfsubr(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pfsubr:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pfsubr %mm1, %mm0 # sched: [3:1.00]
 ; CHECK-NEXT:    pfsubr (%rdi), %mm0 # sched: [7:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -313,7 +313,7 @@ declare x86_mmx @llvm.x86.3dnow.pfsubr(x
 
 define i64 @test_pi2fd(x86_mmx* %a0) optsize {
 ; CHECK-LABEL: test_pi2fd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pi2fd (%rdi), %mm0 # sched: [8:1.00]
 ; CHECK-NEXT:    pi2fd %mm0, %mm0 # sched: [4:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -328,7 +328,7 @@ declare x86_mmx @llvm.x86.3dnow.pi2fd(x8
 
 define i64 @test_pi2fw(x86_mmx* %a0) optsize {
 ; CHECK-LABEL: test_pi2fw:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pi2fw (%rdi), %mm0 # sched: [8:1.00]
 ; CHECK-NEXT:    pi2fw %mm0, %mm0 # sched: [4:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -343,7 +343,7 @@ declare x86_mmx @llvm.x86.3dnowa.pi2fw(x
 
 define i64 @test_pmulhrw(x86_mmx %a0, x86_mmx %a1, x86_mmx* %a2) optsize {
 ; CHECK-LABEL: test_pmulhrw:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pmulhrw %mm1, %mm0 # sched: [5:1.00]
 ; CHECK-NEXT:    pmulhrw (%rdi), %mm0 # sched: [9:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]
@@ -358,7 +358,7 @@ declare x86_mmx @llvm.x86.3dnow.pmulhrw(
 
 define void @test_prefetch(i8* %a0) optsize {
 ; CHECK-LABEL: test_prefetch:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    prefetch (%rdi) # sched: [5:0.50]
 ; CHECK-NEXT:    #NO_APP
@@ -369,7 +369,7 @@ define void @test_prefetch(i8* %a0) opts
 
 define void @test_prefetchw(i8* %a0) optsize {
 ; CHECK-LABEL: test_prefetchw:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    prefetchw (%rdi) # sched: [5:0.50]
 ; CHECK-NEXT:    #NO_APP
@@ -380,7 +380,7 @@ define void @test_prefetchw(i8* %a0) opt
 
 define i64 @test_pswapd(x86_mmx* %a0) optsize {
 ; CHECK-LABEL: test_pswapd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pswapd (%rdi), %mm0 # mm0 = mem[1,0] sched: [5:1.00]
 ; CHECK-NEXT:    pswapd %mm0, %mm0 # mm0 = mm0[1,0] sched: [1:1.00]
 ; CHECK-NEXT:    movd %mm0, %rax # sched: [1:0.33]

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/GV.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/GV.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/GV.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/GV.ll Mon Dec  4 09:18:51 2017
@@ -9,22 +9,22 @@
 ; Function Attrs: noinline nounwind optnone uwtable
 define i32* @test_global_ptrv() #3 {
 ; X64-LABEL: test_global_ptrv:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    leaq g_int, %rax
 ; X64-NEXT:    retq
 ;
 ; X64_DARWIN_PIC-LABEL: test_global_ptrv:
-; X64_DARWIN_PIC:       ## BB#0: ## %entry
+; X64_DARWIN_PIC:       ## %bb.0: ## %entry
 ; X64_DARWIN_PIC-NEXT:    leaq _g_int(%rip), %rax
 ; X64_DARWIN_PIC-NEXT:    retq
 ;
 ; X32-LABEL: test_global_ptrv:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    leal g_int, %eax
 ; X32-NEXT:    retl
 ;
 ; X32ABI-LABEL: test_global_ptrv:
-; X32ABI:       # BB#0: # %entry
+; X32ABI:       # %bb.0: # %entry
 ; X32ABI-NEXT:    leal g_int, %eax
 ; X32ABI-NEXT:    retq
 entry:
@@ -34,25 +34,25 @@ entry:
 ; Function Attrs: noinline nounwind optnone uwtable
 define i32 @test_global_valv() #3 {
 ; X64-LABEL: test_global_valv:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    leaq g_int, %rax
 ; X64-NEXT:    movl (%rax), %eax
 ; X64-NEXT:    retq
 ;
 ; X64_DARWIN_PIC-LABEL: test_global_valv:
-; X64_DARWIN_PIC:       ## BB#0: ## %entry
+; X64_DARWIN_PIC:       ## %bb.0: ## %entry
 ; X64_DARWIN_PIC-NEXT:    leaq _g_int(%rip), %rax
 ; X64_DARWIN_PIC-NEXT:    movl (%rax), %eax
 ; X64_DARWIN_PIC-NEXT:    retq
 ;
 ; X32-LABEL: test_global_valv:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    leal g_int, %eax
 ; X32-NEXT:    movl (%eax), %eax
 ; X32-NEXT:    retl
 ;
 ; X32ABI-LABEL: test_global_valv:
-; X32ABI:       # BB#0: # %entry
+; X32ABI:       # %bb.0: # %entry
 ; X32ABI-NEXT:    leal g_int, %eax
 ; X32ABI-NEXT:    movl (%eax), %eax
 ; X32ABI-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll Mon Dec  4 09:18:51 2017
@@ -4,12 +4,12 @@
 
 define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
 ; X64-LABEL: test_add_i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq (%rsi,%rdi), %rax
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_add_i64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebp
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    .cfi_offset %ebp, -8
@@ -27,14 +27,14 @@ define i64 @test_add_i64(i64 %arg1, i64
 
 define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
 ; X64-LABEL: test_add_i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    leal (%rsi,%rdi), %eax
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_add_i32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
@@ -44,7 +44,7 @@ define i32 @test_add_i32(i32 %arg1, i32
 
 define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
 ; X64-LABEL: test_add_i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    leal (%rsi,%rdi), %eax
@@ -52,7 +52,7 @@ define i16 @test_add_i16(i16 %arg1, i16
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_add_i16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    addw {{[0-9]+}}(%esp), %ax
 ; X32-NEXT:    retl
@@ -62,13 +62,13 @@ define i16 @test_add_i16(i16 %arg1, i16
 
 define i8 @test_add_i8(i8 %arg1, i8 %arg2) {
 ; X64-LABEL: test_add_i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    addb %dil, %sil
 ; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_add_i8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    addb {{[0-9]+}}(%esp), %al
 ; X32-NEXT:    retl
@@ -78,7 +78,7 @@ define i8 @test_add_i8(i8 %arg1, i8 %arg
 
 define i32 @test_add_i1(i32 %arg1, i32 %arg2) {
 ; X64-LABEL: test_add_i1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpl %esi, %edi
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    addb %al, %al
@@ -87,7 +87,7 @@ define i32 @test_add_i1(i32 %arg1, i32 %
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_add_i1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    cmpl %eax, {{[0-9]+}}(%esp)
 ; X32-NEXT:    sete %al

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/add-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/add-vec.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/add-vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/add-vec.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define <16 x i8> @test_add_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) {
 ; ALL-LABEL: test_add_v16i8:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
 ; ALL-NEXT:    retq
   %ret = add <16 x i8> %arg1, %arg2
@@ -15,7 +15,7 @@ define <16 x i8> @test_add_v16i8(<16 x i
 
 define <8 x i16> @test_add_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
 ; ALL-LABEL: test_add_v8i16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 ; ALL-NEXT:    retq
   %ret = add <8 x i16> %arg1, %arg2
@@ -24,7 +24,7 @@ define <8 x i16> @test_add_v8i16(<8 x i1
 
 define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
 ; ALL-LABEL: test_add_v4i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; ALL-NEXT:    retq
   %ret = add <4 x i32> %arg1, %arg2
@@ -33,7 +33,7 @@ define <4 x i32> @test_add_v4i32(<4 x i3
 
 define <2 x i64> @test_add_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
 ; ALL-LABEL: test_add_v2i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 ; ALL-NEXT:    retq
   %ret = add <2 x i64> %arg1, %arg2
@@ -42,17 +42,17 @@ define <2 x i64> @test_add_v2i64(<2 x i6
 
 define <32 x i8> @test_add_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) {
 ; SKX-LABEL: test_add_v32i8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
 ;
 ; AVX2-LABEL: test_add_v32i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX1-LABEL: test_add_v32i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
@@ -65,17 +65,17 @@ define <32 x i8> @test_add_v32i8(<32 x i
 
 define <16 x i16> @test_add_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
 ; SKX-LABEL: test_add_v16i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
 ;
 ; AVX2-LABEL: test_add_v16i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX1-LABEL: test_add_v16i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
@@ -88,17 +88,17 @@ define <16 x i16> @test_add_v16i16(<16 x
 
 define <8 x i32> @test_add_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
 ; SKX-LABEL: test_add_v8i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
 ;
 ; AVX2-LABEL: test_add_v8i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX1-LABEL: test_add_v8i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
@@ -111,17 +111,17 @@ define <8 x i32> @test_add_v8i32(<8 x i3
 
 define <4 x i64> @test_add_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
 ; SKX-LABEL: test_add_v4i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
 ;
 ; AVX2-LABEL: test_add_v4i64:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX1-LABEL: test_add_v4i64:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
@@ -134,18 +134,18 @@ define <4 x i64> @test_add_v4i64(<4 x i6
 
 define <64 x i8> @test_add_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) {
 ; SKX-LABEL: test_add_v64i8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddb %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; AVX2-LABEL: test_add_v64i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX1-LABEL: test_add_v64i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
@@ -163,18 +163,18 @@ define <64 x i8> @test_add_v64i8(<64 x i
 
 define <32 x i16> @test_add_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
 ; SKX-LABEL: test_add_v32i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddw %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; AVX2-LABEL: test_add_v32i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpaddw %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX1-LABEL: test_add_v32i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
@@ -192,18 +192,18 @@ define <32 x i16> @test_add_v32i16(<32 x
 
 define <16 x i32> @test_add_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
 ; SKX-LABEL: test_add_v16i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; AVX2-LABEL: test_add_v16i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpaddd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX1-LABEL: test_add_v16i32:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
@@ -221,18 +221,18 @@ define <16 x i32> @test_add_v16i32(<16 x
 
 define <8 x i64> @test_add_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) {
 ; SKX-LABEL: test_add_v8i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
 ;
 ; AVX2-LABEL: test_add_v8i64:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpaddq %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpaddq %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    retq
 ;
 ; AVX1-LABEL: test_add_v8i64:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/and-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/and-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/and-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/and-scalar.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @test_and_i1(i32 %arg1, i32 %arg2) {
 ; ALL-LABEL: test_and_i1:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    sete %al
 ; ALL-NEXT:    andb %al, %al
@@ -18,7 +18,7 @@ define i32 @test_and_i1(i32 %arg1, i32 %
 
 define i8 @test_and_i8(i8 %arg1, i8 %arg2) {
 ; ALL-LABEL: test_and_i8:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    andb %dil, %sil
 ; ALL-NEXT:    movl %esi, %eax
 ; ALL-NEXT:    retq
@@ -28,7 +28,7 @@ define i8 @test_and_i8(i8 %arg1, i8 %arg
 
 define i16 @test_and_i16(i16 %arg1, i16 %arg2) {
 ; ALL-LABEL: test_and_i16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    andw %di, %si
 ; ALL-NEXT:    movl %esi, %eax
 ; ALL-NEXT:    retq
@@ -38,7 +38,7 @@ define i16 @test_and_i16(i16 %arg1, i16
 
 define i32 @test_and_i32(i32 %arg1, i32 %arg2) {
 ; ALL-LABEL: test_and_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    andl %edi, %esi
 ; ALL-NEXT:    movl %esi, %eax
 ; ALL-NEXT:    retq
@@ -48,7 +48,7 @@ define i32 @test_and_i32(i32 %arg1, i32
 
 define i64 @test_and_i64(i64 %arg1, i64 %arg2) {
 ; ALL-LABEL: test_and_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    andq %rdi, %rsi
 ; ALL-NEXT:    movq %rsi, %rax
 ; ALL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/binop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/binop.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/binop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/binop.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
 ; ALL-LABEL: test_sub_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    subq %rsi, %rdi
 ; ALL-NEXT:    movq %rdi, %rax
 ; ALL-NEXT:    retq
@@ -16,7 +16,7 @@ define i64 @test_sub_i64(i64 %arg1, i64
 
 define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
 ; ALL-LABEL: test_sub_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    subl %esi, %edi
 ; ALL-NEXT:    movl %edi, %eax
 ; ALL-NEXT:    retq
@@ -26,12 +26,12 @@ define i32 @test_sub_i32(i32 %arg1, i32
 
 define float @test_add_float(float %arg1, float %arg2) {
 ; SSE-LABEL: test_add_float:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; ALL_AVX-LABEL: test_add_float:
-; ALL_AVX:       # BB#0:
+; ALL_AVX:       # %bb.0:
 ; ALL_AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; ALL_AVX-NEXT:    retq
   %ret = fadd float %arg1, %arg2
@@ -40,12 +40,12 @@ define float @test_add_float(float %arg1
 
 define double @test_add_double(double %arg1, double %arg2) {
 ; SSE-LABEL: test_add_double:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; ALL_AVX-LABEL: test_add_double:
-; ALL_AVX:       # BB#0:
+; ALL_AVX:       # %bb.0:
 ; ALL_AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; ALL_AVX-NEXT:    retq
   %ret = fadd double %arg1, %arg2
@@ -54,12 +54,12 @@ define double @test_add_double(double %a
 
 define float @test_sub_float(float %arg1, float %arg2) {
 ; SSE-LABEL: test_sub_float:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; ALL_AVX-LABEL: test_sub_float:
-; ALL_AVX:       # BB#0:
+; ALL_AVX:       # %bb.0:
 ; ALL_AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
 ; ALL_AVX-NEXT:    retq
   %ret = fsub float %arg1, %arg2
@@ -68,12 +68,12 @@ define float @test_sub_float(float %arg1
 
 define double @test_sub_double(double %arg1, double %arg2) {
 ; SSE-LABEL: test_sub_double:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; ALL_AVX-LABEL: test_sub_double:
-; ALL_AVX:       # BB#0:
+; ALL_AVX:       # %bb.0:
 ; ALL_AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
 ; ALL_AVX-NEXT:    retq
   %ret = fsub double %arg1, %arg2
@@ -82,12 +82,12 @@ define double @test_sub_double(double %a
 
 define <4 x i32>  @test_add_v4i32(<4 x i32> %arg1, <4 x i32>  %arg2) {
 ; SSE-LABEL: test_add_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; ALL_AVX-LABEL: test_add_v4i32:
-; ALL_AVX:       # BB#0:
+; ALL_AVX:       # %bb.0:
 ; ALL_AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; ALL_AVX-NEXT:    retq
   %ret = add <4 x i32>  %arg1, %arg2
@@ -96,12 +96,12 @@ define <4 x i32>  @test_add_v4i32(<4 x i
 
 define <4 x i32>  @test_sub_v4i32(<4 x i32> %arg1, <4 x i32>  %arg2) {
 ; SSE-LABEL: test_sub_v4i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    psubd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; ALL_AVX-LABEL: test_sub_v4i32:
-; ALL_AVX:       # BB#0:
+; ALL_AVX:       # %bb.0:
 ; ALL_AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
 ; ALL_AVX-NEXT:    retq
   %ret = sub <4 x i32>  %arg1, %arg2
@@ -110,12 +110,12 @@ define <4 x i32>  @test_sub_v4i32(<4 x i
 
 define <4 x float>  @test_add_v4f32(<4 x float> %arg1, <4 x float>  %arg2) {
 ; SSE-LABEL: test_add_v4f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    addps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; ALL_AVX-LABEL: test_add_v4f32:
-; ALL_AVX:       # BB#0:
+; ALL_AVX:       # %bb.0:
 ; ALL_AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 ; ALL_AVX-NEXT:    retq
   %ret = fadd <4 x float>  %arg1, %arg2
@@ -124,12 +124,12 @@ define <4 x float>  @test_add_v4f32(<4 x
 
 define <4 x float>  @test_sub_v4f32(<4 x float> %arg1, <4 x float>  %arg2) {
 ; SSE-LABEL: test_sub_v4f32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    subps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; ALL_AVX-LABEL: test_sub_v4f32:
-; ALL_AVX:       # BB#0:
+; ALL_AVX:       # %bb.0:
 ; ALL_AVX-NEXT:    vsubps %xmm1, %xmm0, %xmm0
 ; ALL_AVX-NEXT:    retq
   %ret = fsub <4 x float>  %arg1, %arg2
@@ -138,12 +138,12 @@ define <4 x float>  @test_sub_v4f32(<4 x
 
 define i32  @test_copy_float(float %val) {
 ; SSE-LABEL: test_copy_float:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %xmm0, %eax
 ; SSE-NEXT:    retq
 ;
 ; ALL_AVX-LABEL: test_copy_float:
-; ALL_AVX:       # BB#0:
+; ALL_AVX:       # %bb.0:
 ; ALL_AVX-NEXT:    vmovd %xmm0, %eax
 ; ALL_AVX-NEXT:    retq
   %r = bitcast float %val to i32
@@ -152,12 +152,12 @@ define i32  @test_copy_float(float %val)
 
 define float  @test_copy_i32(i32 %val) {
 ; SSE-LABEL: test_copy_i32:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %edi, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; ALL_AVX-LABEL: test_copy_i32:
-; ALL_AVX:       # BB#0:
+; ALL_AVX:       # %bb.0:
 ; ALL_AVX-NEXT:    vmovd %edi, %xmm0
 ; ALL_AVX-NEXT:    retq
   %r = bitcast i32 %val to float

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/br.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/br.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/br.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/br.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @uncondbr() {
 ; CHECK-LABEL: uncondbr:
-; CHECK:       # BB#1: # %entry
+; CHECK:       # %bb.1: # %entry
 ; CHECK-NEXT:    jmp .LBB0_3
 ; CHECK-NEXT:  .LBB0_2: # %end
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/brcond.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/brcond.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/brcond.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/brcond.ll Mon Dec  4 09:18:51 2017
@@ -4,12 +4,12 @@
 
 define i32 @test_1(i32 %a, i32 %b, i32 %tValue, i32 %fValue) {
 ; X64-LABEL: test_1:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    cmpl %esi, %edi
 ; X64-NEXT:    setl %al
 ; X64-NEXT:    testb $1, %al
 ; X64-NEXT:    je .LBB0_2
-; X64-NEXT:  # BB#1: # %if.then
+; X64-NEXT:  # %bb.1: # %if.then
 ; X64-NEXT:    movl %edx, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
 ; X64-NEXT:    retq
@@ -19,7 +19,7 @@ define i32 @test_1(i32 %a, i32 %b, i32 %
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -27,7 +27,7 @@ define i32 @test_1(i32 %a, i32 %b, i32 %
 ; X32-NEXT:    setl %al
 ; X32-NEXT:    testb $1, %al
 ; X32-NEXT:    je .LBB0_2
-; X32-NEXT:  # BB#1: # %if.then
+; X32-NEXT:  # %bb.1: # %if.then
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    jmp .LBB0_3
 ; X32-NEXT:  .LBB0_2: # %if.else
@@ -57,10 +57,10 @@ return:
 
 define i32 @test_2(i32 %a) {
 ; X64-LABEL: test_2:
-; X64:       # BB#0: # %entry
+; X64:       # %bb.0: # %entry
 ; X64-NEXT:    testb $1, %dil
 ; X64-NEXT:    je .LBB1_2
-; X64-NEXT:  # BB#1: # %if.then
+; X64-NEXT:  # %bb.1: # %if.then
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB1_2: # %if.else
@@ -68,11 +68,11 @@ define i32 @test_2(i32 %a) {
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_2:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    testb $1, %al
 ; X32-NEXT:    je .LBB1_2
-; X32-NEXT:  # BB#1: # %if.then
+; X32-NEXT:  # %bb.1: # %if.then
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    retl
 ; X32-NEXT:  .LBB1_2: # %if.else

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll Mon Dec  4 09:18:51 2017
@@ -4,12 +4,12 @@
 
 define i32 @test_ret_i32() {
 ; X32-LABEL: test_ret_i32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl $20, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_ret_i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl $20, %eax
 ; X64-NEXT:    retq
   ret i32 20
@@ -17,13 +17,13 @@ define i32 @test_ret_i32() {
 
 define i64 @test_ret_i64() {
 ; X32-LABEL: test_ret_i64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl $4294967295, %eax # imm = 0xFFFFFFFF
 ; X32-NEXT:    movl $15, %edx
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_ret_i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movabsq $68719476735, %rax # imm = 0xFFFFFFFFF
 ; X64-NEXT:    retq
   ret i64 68719476735
@@ -31,12 +31,12 @@ define i64 @test_ret_i64() {
 
 define i8 @test_arg_i8(i8 %a) {
 ; X32-LABEL: test_arg_i8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movb 4(%esp), %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_arg_i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
   ret i8 %a
@@ -44,12 +44,12 @@ define i8 @test_arg_i8(i8 %a) {
 
 define i16 @test_arg_i16(i16 %a) {
 ; X32-LABEL: test_arg_i16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl 4(%esp), %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_arg_i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
   ret i16 %a
@@ -57,12 +57,12 @@ define i16 @test_arg_i16(i16 %a) {
 
 define i32 @test_arg_i32(i32 %a) {
 ; X32-LABEL: test_arg_i32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl 4(%esp), %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_arg_i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
   ret i32 %a
@@ -70,13 +70,13 @@ define i32 @test_arg_i32(i32 %a) {
 
 define i64 @test_arg_i64(i64 %a) {
 ; X32-LABEL: test_arg_i64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl 4(%esp), %eax
 ; X32-NEXT:    movl 8(%esp), %edx
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_arg_i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    retq
   ret i64 %a
@@ -84,13 +84,13 @@ define i64 @test_arg_i64(i64 %a) {
 
 define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) {
 ; X32-LABEL: test_i64_args_8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl 60(%esp), %eax
 ; X32-NEXT:    movl 64(%esp), %edx
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_i64_args_8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movq 16(%rsp), %rax
 ; X64-NEXT:    retq
   ret i64 %arg8
@@ -98,12 +98,12 @@ define i64 @test_i64_args_8(i64 %arg1, i
 
 define <4 x i32> @test_v4i32_args(<4 x i32> %arg1, <4 x i32> %arg2) {
 ; X32-LABEL: test_v4i32_args:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movaps %xmm1, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_v4i32_args:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps %xmm1, %xmm0
 ; X64-NEXT:    retq
   ret <4 x i32> %arg2
@@ -111,7 +111,7 @@ define <4 x i32> @test_v4i32_args(<4 x i
 
 define <8 x i32> @test_v8i32_args(<8 x i32> %arg1, <8 x i32> %arg2) {
 ; X32-LABEL: test_v8i32_args:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $12, %esp
 ; X32-NEXT:    .cfi_def_cfa_offset 16
 ; X32-NEXT:    movups 16(%esp), %xmm1
@@ -120,7 +120,7 @@ define <8 x i32> @test_v8i32_args(<8 x i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_v8i32_args:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movaps %xmm2, %xmm0
 ; X64-NEXT:    movaps %xmm3, %xmm1
 ; X64-NEXT:    retq
@@ -130,7 +130,7 @@ define <8 x i32> @test_v8i32_args(<8 x i
 declare void @trivial_callee()
 define void @test_trivial_call() {
 ; X32-LABEL: test_trivial_call:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $12, %esp
 ; X32-NEXT:    .cfi_def_cfa_offset 16
 ; X32-NEXT:    calll trivial_callee
@@ -138,7 +138,7 @@ define void @test_trivial_call() {
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_trivial_call:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    .cfi_def_cfa_offset 16
 ; X64-NEXT:    callq trivial_callee
@@ -151,7 +151,7 @@ define void @test_trivial_call() {
 declare void @simple_arg_callee(i32 %in0, i32 %in1)
 define void @test_simple_arg_call(i32 %in0, i32 %in1) {
 ; X32-LABEL: test_simple_arg_call:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $12, %esp
 ; X32-NEXT:    .cfi_def_cfa_offset 16
 ; X32-NEXT:    movl 16(%esp), %eax
@@ -163,7 +163,7 @@ define void @test_simple_arg_call(i32 %i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_simple_arg_call:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    .cfi_def_cfa_offset 16
 ; X64-NEXT:    movl %edi, %eax
@@ -179,7 +179,7 @@ define void @test_simple_arg_call(i32 %i
 declare void @simple_arg8_callee(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8)
 define void @test_simple_arg8_call(i32 %in0) {
 ; X32-LABEL: test_simple_arg8_call:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $44, %esp
 ; X32-NEXT:    .cfi_def_cfa_offset 48
 ; X32-NEXT:    movl 48(%esp), %eax
@@ -196,7 +196,7 @@ define void @test_simple_arg8_call(i32 %
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_simple_arg8_call:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subq $24, %rsp
 ; X64-NEXT:    .cfi_def_cfa_offset 32
 ; X64-NEXT:    movl %edi, (%rsp)
@@ -216,7 +216,7 @@ define void @test_simple_arg8_call(i32 %
 declare i32 @simple_return_callee(i32 %in0)
 define i32 @test_simple_return_callee() {
 ; X32-LABEL: test_simple_return_callee:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $12, %esp
 ; X32-NEXT:    .cfi_def_cfa_offset 16
 ; X32-NEXT:    movl $5, %eax
@@ -227,7 +227,7 @@ define i32 @test_simple_return_callee()
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_simple_return_callee:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    .cfi_def_cfa_offset 16
 ; X64-NEXT:    movl $5, %edi
@@ -243,7 +243,7 @@ define i32 @test_simple_return_callee()
 declare <8 x i32> @split_return_callee(<8 x i32> %in0)
 define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
 ; X32-LABEL: test_split_return_callee:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $44, %esp
 ; X32-NEXT:    .cfi_def_cfa_offset 48
 ; X32-NEXT:    movaps %xmm0, (%esp) # 16-byte Spill
@@ -257,7 +257,7 @@ define <8 x i32> @test_split_return_call
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_split_return_callee:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subq $40, %rsp
 ; X64-NEXT:    .cfi_def_cfa_offset 48
 ; X64-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
@@ -276,7 +276,7 @@ define <8 x i32> @test_split_return_call
 
 define void @test_indirect_call(void()* %func) {
 ; X32-LABEL: test_indirect_call:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $12, %esp
 ; X32-NEXT:    .cfi_def_cfa_offset 16
 ; X32-NEXT:    calll *16(%esp)
@@ -284,7 +284,7 @@ define void @test_indirect_call(void()*
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_indirect_call:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    .cfi_def_cfa_offset 16
 ; X64-NEXT:    callq *%rdi
@@ -297,7 +297,7 @@ define void @test_indirect_call(void()*
 declare void @take_char(i8)
 define void @test_abi_exts_call(i8* %addr) {
 ; X32-LABEL: test_abi_exts_call:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %ebx
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    pushl %esi
@@ -322,7 +322,7 @@ define void @test_abi_exts_call(i8* %add
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_abi_exts_call:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rbx
 ; X64-NEXT:    .cfi_def_cfa_offset 16
 ; X64-NEXT:    .cfi_offset %rbx, -16
@@ -346,7 +346,7 @@ define void @test_abi_exts_call(i8* %add
 declare void @variadic_callee(i8*, ...)
 define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) {
 ; X32-LABEL: test_variadic_call_1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $12, %esp
 ; X32-NEXT:    .cfi_def_cfa_offset 16
 ; X32-NEXT:    movl 16(%esp), %eax
@@ -360,7 +360,7 @@ define void @test_variadic_call_1(i8** %
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_variadic_call_1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    .cfi_def_cfa_offset 16
 ; X64-NEXT:    movq (%rdi), %rdi
@@ -378,7 +378,7 @@ define void @test_variadic_call_1(i8** %
 
 define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
 ; X32-LABEL: test_variadic_call_2:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    subl $12, %esp
 ; X32-NEXT:    .cfi_def_cfa_offset 16
 ; X32-NEXT:    movl 16(%esp), %eax
@@ -396,7 +396,7 @@ define void @test_variadic_call_2(i8** %
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_variadic_call_2:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rax
 ; X64-NEXT:    .cfi_def_cfa_offset 16
 ; X64-NEXT:    movq (%rdi), %rdi

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/cmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/cmp.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @test_icmp_eq_i8(i8 %a, i8 %b) {
 ; ALL-LABEL: test_icmp_eq_i8:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpb %sil, %dil
 ; ALL-NEXT:    sete %al
 ; ALL-NEXT:    andl $1, %eax
@@ -15,7 +15,7 @@ define i32 @test_icmp_eq_i8(i8 %a, i8 %b
 
 define i32 @test_icmp_eq_i16(i16 %a, i16 %b) {
 ; ALL-LABEL: test_icmp_eq_i16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpw %si, %di
 ; ALL-NEXT:    sete %al
 ; ALL-NEXT:    andl $1, %eax
@@ -27,7 +27,7 @@ define i32 @test_icmp_eq_i16(i16 %a, i16
 
 define i32 @test_icmp_eq_i64(i64 %a, i64 %b) {
 ; ALL-LABEL: test_icmp_eq_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpq %rsi, %rdi
 ; ALL-NEXT:    sete %al
 ; ALL-NEXT:    andl $1, %eax
@@ -39,7 +39,7 @@ define i32 @test_icmp_eq_i64(i64 %a, i64
 
 define i32 @test_icmp_eq_i32(i32 %a, i32 %b) {
 ; ALL-LABEL: test_icmp_eq_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    sete %al
 ; ALL-NEXT:    andl $1, %eax
@@ -51,7 +51,7 @@ define i32 @test_icmp_eq_i32(i32 %a, i32
 
 define i32 @test_icmp_ne_i32(i32 %a, i32 %b) {
 ; ALL-LABEL: test_icmp_ne_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    setne %al
 ; ALL-NEXT:    andl $1, %eax
@@ -63,7 +63,7 @@ define i32 @test_icmp_ne_i32(i32 %a, i32
 
 define i32 @test_icmp_ugt_i32(i32 %a, i32 %b) {
 ; ALL-LABEL: test_icmp_ugt_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    seta %al
 ; ALL-NEXT:    andl $1, %eax
@@ -75,7 +75,7 @@ define i32 @test_icmp_ugt_i32(i32 %a, i3
 
 define i32 @test_icmp_uge_i32(i32 %a, i32 %b) {
 ; ALL-LABEL: test_icmp_uge_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    setae %al
 ; ALL-NEXT:    andl $1, %eax
@@ -87,7 +87,7 @@ define i32 @test_icmp_uge_i32(i32 %a, i3
 
 define i32 @test_icmp_ult_i32(i32 %a, i32 %b) {
 ; ALL-LABEL: test_icmp_ult_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    setb %al
 ; ALL-NEXT:    andl $1, %eax
@@ -99,7 +99,7 @@ define i32 @test_icmp_ult_i32(i32 %a, i3
 
 define i32 @test_icmp_ule_i32(i32 %a, i32 %b) {
 ; ALL-LABEL: test_icmp_ule_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    setbe %al
 ; ALL-NEXT:    andl $1, %eax
@@ -111,7 +111,7 @@ define i32 @test_icmp_ule_i32(i32 %a, i3
 
 define i32 @test_icmp_sgt_i32(i32 %a, i32 %b) {
 ; ALL-LABEL: test_icmp_sgt_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    setg %al
 ; ALL-NEXT:    andl $1, %eax
@@ -123,7 +123,7 @@ define i32 @test_icmp_sgt_i32(i32 %a, i3
 
 define i32 @test_icmp_sge_i32(i32 %a, i32 %b) {
 ; ALL-LABEL: test_icmp_sge_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    setge %al
 ; ALL-NEXT:    andl $1, %eax
@@ -135,7 +135,7 @@ define i32 @test_icmp_sge_i32(i32 %a, i3
 
 define i32 @test_icmp_slt_i32(i32 %a, i32 %b) {
 ; ALL-LABEL: test_icmp_slt_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    setl %al
 ; ALL-NEXT:    andl $1, %eax
@@ -147,7 +147,7 @@ define i32 @test_icmp_slt_i32(i32 %a, i3
 
 define i32 @test_icmp_sle_i32(i32 %a, i32 %b) {
 ; ALL-LABEL: test_icmp_sle_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    setle %al
 ; ALL-NEXT:    andl $1, %eax

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/constant.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/constant.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/constant.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i8 @const_i8() {
 ; ALL-LABEL: const_i8:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movb $2, %al
 ; ALL-NEXT:    retq
   ret i8 2
@@ -11,7 +11,7 @@ define i8 @const_i8() {
 
 define i16 @const_i16() {
 ; ALL-LABEL: const_i16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movw $3, %ax
 ; ALL-NEXT:    retq
   ret i16 3
@@ -19,7 +19,7 @@ define i16 @const_i16() {
 
 define i32 @const_i32() {
 ; ALL-LABEL: const_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl $4, %eax
 ; ALL-NEXT:    retq
   ret i32 4
@@ -27,7 +27,7 @@ define i32 @const_i32() {
 
 define i64 @const_i64() {
 ; ALL-LABEL: const_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movabsq $68719476720, %rax # imm = 0xFFFFFFFF0
 ; ALL-NEXT:    retq
   ret i64 68719476720
@@ -36,7 +36,7 @@ define i64 @const_i64() {
 ;i64 value fit into u32
 define i64 @const_i64_u32() {
 ; ALL-LABEL: const_i64_u32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movq $1879048192, %rax # imm = 0x70000000
 ; ALL-NEXT:    retq
   ret i64 1879048192
@@ -45,7 +45,7 @@ define i64 @const_i64_u32() {
 ;i64 value fit into i32
 define i64 @const_i64_i32() {
 ; ALL-LABEL: const_i64_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movq $-1, %rax
 ; ALL-NEXT:    retq
   ret i64 -1
@@ -53,7 +53,7 @@ define i64 @const_i64_i32() {
 
 define void @main(i32 ** %data) {
 ; ALL-LABEL: main:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movq $0, %rax
 ; ALL-NEXT:    movq %rax, (%rdi)
 ; ALL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/ext-x86-64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/ext-x86-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/ext-x86-64.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define i64 @test_zext_i1(i8 %a) {
 ; X64-LABEL: test_zext_i1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %edi<def> %edi<kill> %rdi<def>
 ; X64-NEXT:    andq $1, %rdi
 ; X64-NEXT:    movq %rdi, %rax
@@ -17,7 +17,7 @@ define i64 @test_zext_i1(i8 %a) {
 
 define i64 @test_sext_i8(i8 %val) {
 ; X64-LABEL: test_sext_i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movsbq %dil, %rax
 ; X64-NEXT:    retq
   %r = sext i8 %val to i64
@@ -26,7 +26,7 @@ define i64 @test_sext_i8(i8 %val) {
 
 define i64 @test_sext_i16(i16 %val) {
 ; X64-LABEL: test_sext_i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movswq %di, %rax
 ; X64-NEXT:    retq
   %r = sext i16 %val to i64

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define i8 @test_zext_i1toi8(i32 %a) {
 ; X64-LABEL: test_zext_i1toi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andb $1, %dil
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_zext_i1toi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andb $1, %al
 ; X32-NEXT:    # kill: %al<def> %al<kill> %eax<kill>
@@ -22,13 +22,13 @@ define i8 @test_zext_i1toi8(i32 %a) {
 
 define i16 @test_zext_i1toi16(i32 %a) {
 ; X64-LABEL: test_zext_i1toi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andw $1, %di
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_zext_i1toi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andw $1, %ax
 ; X32-NEXT:    # kill: %ax<def> %ax<kill> %eax<kill>
@@ -40,13 +40,13 @@ define i16 @test_zext_i1toi16(i32 %a) {
 
 define i32 @test_zext_i1(i32 %a) {
 ; X64-LABEL: test_zext_i1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    andl $1, %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_zext_i1:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andl $1, %eax
 ; X32-NEXT:    retl
@@ -57,12 +57,12 @@ define i32 @test_zext_i1(i32 %a) {
 
 define i32 @test_zext_i8(i8 %val) {
 ; X64-LABEL: test_zext_i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_zext_i8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
   %r = zext i8 %val to i32
@@ -71,12 +71,12 @@ define i32 @test_zext_i8(i8 %val) {
 
 define i32 @test_zext_i16(i16 %val) {
 ; X64-LABEL: test_zext_i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl %di, %eax
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_zext_i16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
   %r = zext i16 %val to i32
@@ -85,12 +85,12 @@ define i32 @test_zext_i16(i16 %val) {
 
 define i32 @test_sext_i8(i8 %val) {
 ; X64-LABEL: test_sext_i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movsbl %dil, %eax
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_sext_i8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
   %r = sext i8 %val to i32
@@ -99,12 +99,12 @@ define i32 @test_sext_i8(i8 %val) {
 
 define i32 @test_sext_i16(i16 %val) {
 ; X64-LABEL: test_sext_i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movswl %di, %eax
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: test_sext_i16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movswl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    retl
   %r = sext i16 %val to i32

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/fadd-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/fadd-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/fadd-scalar.ll Mon Dec  4 09:18:51 2017
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
 define float @test_fadd_float(float %arg1, float %arg2) {
 ; ALL-LABEL: test_fadd_float:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    addss %xmm1, %xmm0
 ; ALL-NEXT:    retq
   %ret = fadd float %arg1, %arg2
@@ -11,7 +11,7 @@ define float @test_fadd_float(float %arg
 
 define double @test_fadd_double(double %arg1, double %arg2) {
 ; ALL-LABEL: test_fadd_double:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    addsd %xmm1, %xmm0
 ; ALL-NEXT:    retq
   %ret = fadd double %arg1, %arg2

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/fconstant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/fconstant.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/fconstant.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/fconstant.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define void @test_float(float* %a , float %b) {
 ; CHECK_SMALL64-LABEL: test_float:
-; CHECK_SMALL64:       # BB#0: # %entry
+; CHECK_SMALL64:       # %bb.0: # %entry
 ; CHECK_SMALL64-NEXT:    movss .LCPI0_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
 ; CHECK_SMALL64-NEXT:    addss %xmm0, %xmm1
 ; CHECK_SMALL64-NEXT:    movd %xmm1, %eax
@@ -15,7 +15,7 @@ define void @test_float(float* %a , floa
 ; CHECK_SMALL64-NEXT:    retq
 ;
 ; CHECK_LARGE64-LABEL: test_float:
-; CHECK_LARGE64:       # BB#0: # %entry
+; CHECK_LARGE64:       # %bb.0: # %entry
 ; CHECK_LARGE64-NEXT:    movabsq $.LCPI0_0, %rax
 ; CHECK_LARGE64-NEXT:    addss (%rax), %xmm0
 ; CHECK_LARGE64-NEXT:    movd %xmm0, %eax
@@ -23,7 +23,7 @@ define void @test_float(float* %a , floa
 ; CHECK_LARGE64-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_float:
-; CHECK32:       # BB#0: # %entry
+; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl 4(%esp), %eax
 ; CHECK32-NEXT:    movl 8(%esp), %ecx
 ; CHECK32-NEXT:    movss .LCPI0_0, %xmm0 # xmm0 = mem[0],zero,zero,zero

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll Mon Dec  4 09:18:51 2017
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
 define float @test_fdiv_float(float %arg1, float %arg2) {
 ; ALL-LABEL: test_fdiv_float:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    divss %xmm1, %xmm0
 ; ALL-NEXT:    retq
   %ret = fdiv float %arg1, %arg2
@@ -11,7 +11,7 @@ define float @test_fdiv_float(float %arg
 
 define double @test_fdiv_double(double %arg1, double %arg2) {
 ; ALL-LABEL: test_fdiv_double:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    divsd %xmm1, %xmm0
 ; ALL-NEXT:    retq
   %ret = fdiv double %arg1, %arg2

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/fmul-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/fmul-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/fmul-scalar.ll Mon Dec  4 09:18:51 2017
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
 define float @test_fmul_float(float %arg1, float %arg2) {
 ; ALL-LABEL: test_fmul_float:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    mulss %xmm1, %xmm0
 ; ALL-NEXT:    retq
   %ret = fmul float %arg1, %arg2
@@ -11,7 +11,7 @@ define float @test_fmul_float(float %arg
 
 define double @test_fmul_double(double %arg1, double %arg2) {
 ; ALL-LABEL: test_fmul_double:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    mulsd %xmm1, %xmm0
 ; ALL-NEXT:    retq
   %ret = fmul double %arg1, %arg2

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/fpext-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/fpext-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/fpext-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/fpext-scalar.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define double @test(float %a) {
 ; CHECK-LABEL: test:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cvtss2sd %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/frameIndex.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/frameIndex.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/frameIndex.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/frameIndex.ll Mon Dec  4 09:18:51 2017
@@ -8,12 +8,12 @@
 
 define i32* @allocai32() {
 ; X64-LABEL: allocai32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq -4(%rsp), %rax
 ; X64-NEXT:    retq
 ;
 ; X32-LABEL: allocai32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    .cfi_def_cfa_offset 8
 ; X32-NEXT:    movl %esp, %eax
@@ -21,7 +21,7 @@ define i32* @allocai32() {
 ; X32-NEXT:    retl
 ;
 ; X32ABI-LABEL: allocai32:
-; X32ABI:       # BB#0:
+; X32ABI:       # %bb.0:
 ; X32ABI-NEXT:    leal -4(%rsp), %eax
 ; X32ABI-NEXT:    retq
   %ptr1 = alloca i32

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/fsub-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/fsub-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/fsub-scalar.ll Mon Dec  4 09:18:51 2017
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
 define float @test_fsub_float(float %arg1, float %arg2) {
 ; ALL-LABEL: test_fsub_float:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    subss %xmm1, %xmm0
 ; ALL-NEXT:    retq
   %ret = fsub float %arg1, %arg2
@@ -11,7 +11,7 @@ define float @test_fsub_float(float %arg
 
 define double @test_fsub_double(double %arg1, double %arg2) {
 ; ALL-LABEL: test_fsub_double:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    subsd %xmm1, %xmm0
 ; ALL-NEXT:    retq
   %ret = fsub double %arg1, %arg2

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/gep.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/gep.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/gep.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
 ; X64_GISEL-LABEL: test_gep_i8:
-; X64_GISEL:       # BB#0:
+; X64_GISEL:       # %bb.0:
 ; X64_GISEL-NEXT:    movq $4, %rax
 ; X64_GISEL-NEXT:    movsbq %sil, %rcx
 ; X64_GISEL-NEXT:    imulq %rax, %rcx
@@ -12,7 +12,7 @@ define i32* @test_gep_i8(i32 *%arr, i8 %
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    movsbq %sil, %rax
 ; X64-NEXT:    leaq (%rdi,%rax,4), %rax
@@ -23,13 +23,13 @@ define i32* @test_gep_i8(i32 *%arr, i8 %
 
 define i32* @test_gep_i8_const(i32 *%arr) {
 ; X64_GISEL-LABEL: test_gep_i8_const:
-; X64_GISEL:       # BB#0:
+; X64_GISEL:       # %bb.0:
 ; X64_GISEL-NEXT:    movq $80, %rax
 ; X64_GISEL-NEXT:    leaq (%rdi,%rax), %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i8_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq 80(%rdi), %rax
 ; X64-NEXT:    retq
   %arrayidx = getelementptr i32, i32* %arr, i8 20
@@ -38,7 +38,7 @@ define i32* @test_gep_i8_const(i32 *%arr
 
 define i32* @test_gep_i16(i32 *%arr, i16 %ind) {
 ; X64_GISEL-LABEL: test_gep_i16:
-; X64_GISEL:       # BB#0:
+; X64_GISEL:       # %bb.0:
 ; X64_GISEL-NEXT:    movq $4, %rax
 ; X64_GISEL-NEXT:    movswq %si, %rcx
 ; X64_GISEL-NEXT:    imulq %rax, %rcx
@@ -46,7 +46,7 @@ define i32* @test_gep_i16(i32 *%arr, i16
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-NEXT:    movswq %si, %rax
 ; X64-NEXT:    leaq (%rdi,%rax,4), %rax
@@ -57,13 +57,13 @@ define i32* @test_gep_i16(i32 *%arr, i16
 
 define i32* @test_gep_i16_const(i32 *%arr) {
 ; X64_GISEL-LABEL: test_gep_i16_const:
-; X64_GISEL:       # BB#0:
+; X64_GISEL:       # %bb.0:
 ; X64_GISEL-NEXT:    movq $80, %rax
 ; X64_GISEL-NEXT:    leaq (%rdi,%rax), %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i16_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq 80(%rdi), %rax
 ; X64-NEXT:    retq
   %arrayidx = getelementptr i32, i32* %arr, i16 20
@@ -72,7 +72,7 @@ define i32* @test_gep_i16_const(i32 *%ar
 
 define i32* @test_gep_i32(i32 *%arr, i32 %ind) {
 ; X64_GISEL-LABEL: test_gep_i32:
-; X64_GISEL:       # BB#0:
+; X64_GISEL:       # %bb.0:
 ; X64_GISEL-NEXT:    movq $4, %rax
 ; X64_GISEL-NEXT:    movslq %esi, %rcx
 ; X64_GISEL-NEXT:    imulq %rax, %rcx
@@ -80,7 +80,7 @@ define i32* @test_gep_i32(i32 *%arr, i32
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movslq %esi, %rax
 ; X64-NEXT:    leaq (%rdi,%rax,4), %rax
 ; X64-NEXT:    retq
@@ -90,13 +90,13 @@ define i32* @test_gep_i32(i32 *%arr, i32
 
 define i32* @test_gep_i32_const(i32 *%arr) {
 ; X64_GISEL-LABEL: test_gep_i32_const:
-; X64_GISEL:       # BB#0:
+; X64_GISEL:       # %bb.0:
 ; X64_GISEL-NEXT:    movq $20, %rax
 ; X64_GISEL-NEXT:    leaq (%rdi,%rax), %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i32_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq 20(%rdi), %rax
 ; X64-NEXT:    retq
   %arrayidx = getelementptr i32, i32* %arr, i32 5
@@ -105,14 +105,14 @@ define i32* @test_gep_i32_const(i32 *%ar
 
 define i32* @test_gep_i64(i32 *%arr, i64 %ind) {
 ; X64_GISEL-LABEL: test_gep_i64:
-; X64_GISEL:       # BB#0:
+; X64_GISEL:       # %bb.0:
 ; X64_GISEL-NEXT:    movq $4, %rax
 ; X64_GISEL-NEXT:    imulq %rsi, %rax
 ; X64_GISEL-NEXT:    leaq (%rdi,%rax), %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq (%rdi,%rsi,4), %rax
 ; X64-NEXT:    retq
   %arrayidx = getelementptr i32, i32* %arr, i64 %ind
@@ -121,13 +121,13 @@ define i32* @test_gep_i64(i32 *%arr, i64
 
 define i32* @test_gep_i64_const(i32 *%arr) {
 ; X64_GISEL-LABEL: test_gep_i64_const:
-; X64_GISEL:       # BB#0:
+; X64_GISEL:       # %bb.0:
 ; X64_GISEL-NEXT:    movq $20, %rax
 ; X64_GISEL-NEXT:    leaq (%rdi,%rax), %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i64_const:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    leaq 20(%rdi), %rax
 ; X64-NEXT:    retq
   %arrayidx = getelementptr i32, i32* %arr, i64 5

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-brcond.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-brcond.mir?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-brcond.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-brcond.mir Mon Dec  4 09:18:51 2017
@@ -27,25 +27,25 @@ registers:
   - { id: 2, class: _, preferred-register: '' }
   - { id: 3, class: _, preferred-register: '' }
 # ALL:         %1:_(s1) = G_TRUNC %0(s32)
-# ALL-NEXT:    G_BRCOND %1(s1), %[[TRUE:bb.[0-9]+.if.then]]
-# ALL-NEXT:    G_BR %[[FALSE:bb.[0-9]+.if.else]]
-# ALL:       [[TRUE]]:
+# ALL-NEXT:    G_BRCOND %1(s1), %[[TRUE:bb.[0-9]+]]
+# ALL-NEXT:    G_BR %[[FALSE:bb.[0-9]+]]
+# ALL:       [[TRUE]].{{[a-zA-Z0-9.]+}}:
 # ALL-NEXT:    %eax = COPY %2(s32)
 # ALL-NEXT:    RET 0, implicit %eax
-# ALL:       [[FALSE]]:
+# ALL:       [[FALSE]].{{[a-zA-Z0-9.]+}}:
 # ALL-NEXT:    %eax = COPY %3(s32)
 # ALL-NEXT:    RET 0, implicit %eax
 body:             |
   bb.1.entry:
-    successors: %bb.2.if.then(0x40000000), %bb.3.if.else(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi
 
     %0(s32) = COPY %edi
     %2(s32) = G_CONSTANT i32 0
     %3(s32) = G_CONSTANT i32 1
     %1(s1) = G_TRUNC %0(s32)
-    G_BRCOND %1(s1), %bb.2.if.then
-    G_BR %bb.3.if.else
+    G_BRCOND %1(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.if.then:
     %eax = COPY %2(s32)

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir Mon Dec  4 09:18:51 2017
@@ -140,29 +140,29 @@ constants:
 
 body:             |
   ; ALL-LABEL: name: test_i1
-  ; ALL: bb.0.entry:
-  ; ALL:   successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+  ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+  ; ALL:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; ALL:   liveins: %edi, %edx, %esi
   ; ALL:   [[COPY:%[0-9]+]]:_(s32) = COPY %edi
   ; ALL:   [[COPY1:%[0-9]+]]:_(s1) = COPY %esi
   ; ALL:   [[COPY2:%[0-9]+]]:_(s1) = COPY %edx
   ; ALL:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; ALL:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
-  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1.cond.true
-  ; ALL:   G_BR %bb.2.cond.false
+  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1
+  ; ALL:   G_BR %bb.2
   ; ALL: bb.1.cond.true:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
+  ; ALL:   successors: %bb.3(0x80000000)
   ; ALL:   [[ANYEXT:%[0-9]+]]:_(s8) = G_ANYEXT [[COPY1]](s1)
-  ; ALL:   G_BR %bb.3.cond.end
+  ; ALL:   G_BR %bb.3
   ; ALL: bb.2.cond.false:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
+  ; ALL:   successors: %bb.3(0x80000000)
   ; ALL:   [[ANYEXT1:%[0-9]+]]:_(s8) = G_ANYEXT [[COPY2]](s1)
   ; ALL: bb.3.cond.end:
-  ; ALL:   [[PHI:%[0-9]+]]:_(s8) = G_PHI [[ANYEXT]](s8), %bb.1.cond.true, [[ANYEXT1]](s8), %bb.2.cond.false
+  ; ALL:   [[PHI:%[0-9]+]]:_(s8) = G_PHI [[ANYEXT]](s8), %bb.1, [[ANYEXT1]](s8), %bb.2
   ; ALL:   %al = COPY
   ; ALL:   RET 0, implicit %al
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %edx, %esi
 
     %0(s32) = COPY %edi
@@ -170,20 +170,20 @@ body:             |
     %2(s1) = COPY %edx
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s1) = G_PHI %1(s1), %bb.2.cond.true, %2(s1), %bb.3.cond.false
+    %5(s1) = G_PHI %1(s1), %bb.2, %2(s1), %bb.3
     %6(s8) = G_ZEXT %5(s1)
     %al = COPY %6(s8)
     RET 0, implicit %al
@@ -211,27 +211,27 @@ constants:
 
 body:             |
   ; ALL-LABEL: name: test_i8
-  ; ALL: bb.0.entry:
-  ; ALL:   successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+  ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+  ; ALL:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; ALL:   liveins: %edi, %edx, %esi
   ; ALL:   [[COPY:%[0-9]+]]:_(s32) = COPY %edi
   ; ALL:   [[COPY1:%[0-9]+]]:_(s8) = COPY %sil
   ; ALL:   [[COPY2:%[0-9]+]]:_(s8) = COPY %edx
   ; ALL:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; ALL:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
-  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1.cond.true
-  ; ALL:   G_BR %bb.2.cond.false
+  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1
+  ; ALL:   G_BR %bb.2
   ; ALL: bb.1.cond.true:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
-  ; ALL:   G_BR %bb.3.cond.end
+  ; ALL:   successors: %bb.3(0x80000000)
+  ; ALL:   G_BR %bb.3
   ; ALL: bb.2.cond.false:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
+  ; ALL:   successors: %bb.3(0x80000000)
   ; ALL: bb.3.cond.end:
-  ; ALL:   [[PHI:%[0-9]+]]:_(s8) = G_PHI [[COPY1]](s8), %bb.1.cond.true, [[COPY2]](s8), %bb.2.cond.false
+  ; ALL:   [[PHI:%[0-9]+]]:_(s8) = G_PHI [[COPY1]](s8), %bb.1, [[COPY2]](s8), %bb.2
   ; ALL:   %al = COPY [[PHI]](s8)
   ; ALL:   RET 0, implicit %al
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %edx, %esi
 
     %0(s32) = COPY %edi
@@ -239,20 +239,20 @@ body:             |
     %2(s8) = COPY %edx
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s8) = G_PHI %1(s8), %bb.2.cond.true, %2(s8), %bb.3.cond.false
+    %5(s8) = G_PHI %1(s8), %bb.2, %2(s8), %bb.3
     %al = COPY %5(s8)
     RET 0, implicit %al
 
@@ -279,27 +279,27 @@ constants:
 
 body:             |
   ; ALL-LABEL: name: test_i16
-  ; ALL: bb.0.entry:
-  ; ALL:   successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+  ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+  ; ALL:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; ALL:   liveins: %edi, %edx, %esi
   ; ALL:   [[COPY:%[0-9]+]]:_(s32) = COPY %edi
   ; ALL:   [[COPY1:%[0-9]+]]:_(s16) = COPY %si
   ; ALL:   [[COPY2:%[0-9]+]]:_(s16) = COPY %edx
   ; ALL:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; ALL:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
-  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1.cond.true
-  ; ALL:   G_BR %bb.2.cond.false
+  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1
+  ; ALL:   G_BR %bb.2
   ; ALL: bb.1.cond.true:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
-  ; ALL:   G_BR %bb.3.cond.end
+  ; ALL:   successors: %bb.3(0x80000000)
+  ; ALL:   G_BR %bb.3
   ; ALL: bb.2.cond.false:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
+  ; ALL:   successors: %bb.3(0x80000000)
   ; ALL: bb.3.cond.end:
-  ; ALL:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[COPY1]](s16), %bb.1.cond.true, [[COPY2]](s16), %bb.2.cond.false
+  ; ALL:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[COPY1]](s16), %bb.1, [[COPY2]](s16), %bb.2
   ; ALL:   %ax = COPY [[PHI]](s16)
   ; ALL:   RET 0, implicit %ax
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %edx, %esi
 
     %0(s32) = COPY %edi
@@ -307,20 +307,20 @@ body:             |
     %2(s16) = COPY %edx
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s16) = G_PHI %1(s16), %bb.2.cond.true, %2(s16), %bb.3.cond.false
+    %5(s16) = G_PHI %1(s16), %bb.2, %2(s16), %bb.3
     %ax = COPY %5(s16)
     RET 0, implicit %ax
 
@@ -347,27 +347,27 @@ constants:
 
 body:             |
   ; ALL-LABEL: name: test_i32
-  ; ALL: bb.0.entry:
-  ; ALL:   successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+  ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+  ; ALL:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; ALL:   liveins: %edi, %edx, %esi
   ; ALL:   [[COPY:%[0-9]+]]:_(s32) = COPY %edi
   ; ALL:   [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
   ; ALL:   [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
   ; ALL:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; ALL:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
-  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1.cond.true
-  ; ALL:   G_BR %bb.2.cond.false
+  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1
+  ; ALL:   G_BR %bb.2
   ; ALL: bb.1.cond.true:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
-  ; ALL:   G_BR %bb.3.cond.end
+  ; ALL:   successors: %bb.3(0x80000000)
+  ; ALL:   G_BR %bb.3
   ; ALL: bb.2.cond.false:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
+  ; ALL:   successors: %bb.3(0x80000000)
   ; ALL: bb.3.cond.end:
-  ; ALL:   [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1.cond.true, [[COPY2]](s32), %bb.2.cond.false
+  ; ALL:   [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
   ; ALL:   %eax = COPY [[PHI]](s32)
   ; ALL:   RET 0, implicit %eax
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %edx, %esi
 
     %0(s32) = COPY %edi
@@ -375,20 +375,20 @@ body:             |
     %2(s32) = COPY %edx
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s32) = G_PHI %1(s32), %bb.2.cond.true, %2(s32), %bb.3.cond.false
+    %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
     %eax = COPY %5(s32)
     RET 0, implicit %eax
 
@@ -415,27 +415,27 @@ constants:
 
 body:             |
   ; ALL-LABEL: name: test_i64
-  ; ALL: bb.0.entry:
-  ; ALL:   successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+  ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+  ; ALL:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; ALL:   liveins: %edi, %rdx, %rsi
   ; ALL:   [[COPY:%[0-9]+]]:_(s32) = COPY %edi
   ; ALL:   [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi
   ; ALL:   [[COPY2:%[0-9]+]]:_(s64) = COPY %rdx
   ; ALL:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; ALL:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
-  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1.cond.true
-  ; ALL:   G_BR %bb.2.cond.false
+  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1
+  ; ALL:   G_BR %bb.2
   ; ALL: bb.1.cond.true:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
-  ; ALL:   G_BR %bb.3.cond.end
+  ; ALL:   successors: %bb.3(0x80000000)
+  ; ALL:   G_BR %bb.3
   ; ALL: bb.2.cond.false:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
+  ; ALL:   successors: %bb.3(0x80000000)
   ; ALL: bb.3.cond.end:
-  ; ALL:   [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1.cond.true, [[COPY2]](s64), %bb.2.cond.false
+  ; ALL:   [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1, [[COPY2]](s64), %bb.2
   ; ALL:   %rax = COPY [[PHI]](s64)
   ; ALL:   RET 0, implicit %rax
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %rdx, %rsi
 
     %0(s32) = COPY %edi
@@ -443,20 +443,20 @@ body:             |
     %2(s64) = COPY %rdx
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s64) = G_PHI %1(s64), %bb.2.cond.true, %2(s64), %bb.3.cond.false
+    %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
     %rax = COPY %5(s64)
     RET 0, implicit %rax
 
@@ -483,27 +483,27 @@ constants:
 
 body:             |
   ; ALL-LABEL: name: test_float
-  ; ALL: bb.0.entry:
-  ; ALL:   successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+  ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+  ; ALL:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; ALL:   liveins: %edi, %xmm0, %xmm1
   ; ALL:   [[COPY:%[0-9]+]]:_(s32) = COPY %edi
   ; ALL:   [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm0
   ; ALL:   [[COPY2:%[0-9]+]]:_(s32) = COPY %xmm1
   ; ALL:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; ALL:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
-  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1.cond.true
-  ; ALL:   G_BR %bb.2.cond.false
+  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1
+  ; ALL:   G_BR %bb.2
   ; ALL: bb.1.cond.true:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
-  ; ALL:   G_BR %bb.3.cond.end
+  ; ALL:   successors: %bb.3(0x80000000)
+  ; ALL:   G_BR %bb.3
   ; ALL: bb.2.cond.false:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
+  ; ALL:   successors: %bb.3(0x80000000)
   ; ALL: bb.3.cond.end:
-  ; ALL:   [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1.cond.true, [[COPY2]](s32), %bb.2.cond.false
+  ; ALL:   [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
   ; ALL:   %xmm0 = COPY [[PHI]](s32)
   ; ALL:   RET 0, implicit %xmm0
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %xmm0, %xmm1
 
     %0(s32) = COPY %edi
@@ -511,20 +511,20 @@ body:             |
     %2(s32) = COPY %xmm1
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s32) = G_PHI %1(s32), %bb.2.cond.true, %2(s32), %bb.3.cond.false
+    %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
     %xmm0 = COPY %5(s32)
     RET 0, implicit %xmm0
 
@@ -551,27 +551,27 @@ constants:
 
 body:             |
   ; ALL-LABEL: name: test_double
-  ; ALL: bb.0.entry:
-  ; ALL:   successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+  ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
+  ; ALL:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; ALL:   liveins: %edi, %xmm0, %xmm1
   ; ALL:   [[COPY:%[0-9]+]]:_(s32) = COPY %edi
   ; ALL:   [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm0
   ; ALL:   [[COPY2:%[0-9]+]]:_(s64) = COPY %xmm1
   ; ALL:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; ALL:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
-  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1.cond.true
-  ; ALL:   G_BR %bb.2.cond.false
+  ; ALL:   G_BRCOND [[ICMP]](s1), %bb.1
+  ; ALL:   G_BR %bb.2
   ; ALL: bb.1.cond.true:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
-  ; ALL:   G_BR %bb.3.cond.end
+  ; ALL:   successors: %bb.3(0x80000000)
+  ; ALL:   G_BR %bb.3
   ; ALL: bb.2.cond.false:
-  ; ALL:   successors: %bb.3.cond.end(0x80000000)
+  ; ALL:   successors: %bb.3(0x80000000)
   ; ALL: bb.3.cond.end:
-  ; ALL:   [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1.cond.true, [[COPY2]](s64), %bb.2.cond.false
+  ; ALL:   [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1, [[COPY2]](s64), %bb.2
   ; ALL:   %xmm0 = COPY [[PHI]](s64)
   ; ALL:   RET 0, implicit %xmm0
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %xmm0, %xmm1
 
     %0(s32) = COPY %edi
@@ -579,20 +579,20 @@ body:             |
     %2(s64) = COPY %xmm1
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s64) = G_PHI %1(s64), %bb.2.cond.true, %2(s64), %bb.3.cond.false
+    %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
     %xmm0 = COPY %5(s64)
     RET 0, implicit %xmm0
 

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i1 @test_load_i1(i1 * %p1) {
 ; ALL-LABEL: test_load_i1:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl 4(%esp), %eax
 ; ALL-NEXT:    movb (%eax), %al
 ; ALL-NEXT:    retl
@@ -16,7 +16,7 @@ define i1 @test_load_i1(i1 * %p1) {
 
 define i8 @test_load_i8(i8 * %p1) {
 ; ALL-LABEL: test_load_i8:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl 4(%esp), %eax
 ; ALL-NEXT:    movb (%eax), %al
 ; ALL-NEXT:    retl
@@ -26,7 +26,7 @@ define i8 @test_load_i8(i8 * %p1) {
 
 define i16 @test_load_i16(i16 * %p1) {
 ; ALL-LABEL: test_load_i16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl 4(%esp), %eax
 ; ALL-NEXT:    movzwl (%eax), %eax
 ; ALL-NEXT:    retl
@@ -36,7 +36,7 @@ define i16 @test_load_i16(i16 * %p1) {
 
 define i32 @test_load_i32(i32 * %p1) {
 ; ALL-LABEL: test_load_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl 4(%esp), %eax
 ; ALL-NEXT:    movl (%eax), %eax
 ; ALL-NEXT:    retl
@@ -46,7 +46,7 @@ define i32 @test_load_i32(i32 * %p1) {
 
 define i1 * @test_store_i1(i1 %val, i1 * %p1) {
 ; ALL-LABEL: test_store_i1:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movb 4(%esp), %cl
 ; ALL-NEXT:    movl 8(%esp), %eax
 ; ALL-NEXT:    andb $1, %cl
@@ -58,7 +58,7 @@ define i1 * @test_store_i1(i1 %val, i1 *
 
 define i8 * @test_store_i8(i8 %val, i8 * %p1) {
 ; ALL-LABEL: test_store_i8:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movb 4(%esp), %cl
 ; ALL-NEXT:    movl 8(%esp), %eax
 ; ALL-NEXT:    movb %cl, (%eax)
@@ -69,7 +69,7 @@ define i8 * @test_store_i8(i8 %val, i8 *
 
 define i16 * @test_store_i16(i16 %val, i16 * %p1) {
 ; ALL-LABEL: test_store_i16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movzwl 4(%esp), %ecx
 ; ALL-NEXT:    movl 8(%esp), %eax
 ; ALL-NEXT:    movw %cx, (%eax)
@@ -80,7 +80,7 @@ define i16 * @test_store_i16(i16 %val, i
 
 define i32 * @test_store_i32(i32 %val, i32 * %p1) {
 ; ALL-LABEL: test_store_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl 4(%esp), %ecx
 ; ALL-NEXT:    movl 8(%esp), %eax
 ; ALL-NEXT:    movl %ecx, (%eax)
@@ -91,7 +91,7 @@ define i32 * @test_store_i32(i32 %val, i
 
 define i32* @test_load_ptr(i32** %ptr1) {
 ; ALL-LABEL: test_load_ptr:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl 4(%esp), %eax
 ; ALL-NEXT:    movl (%eax), %eax
 ; ALL-NEXT:    retl
@@ -101,7 +101,7 @@ define i32* @test_load_ptr(i32** %ptr1)
 
 define void @test_store_ptr(i32** %ptr1, i32* %a) {
 ; ALL-LABEL: test_store_ptr:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl 4(%esp), %eax
 ; ALL-NEXT:    movl 8(%esp), %ecx
 ; ALL-NEXT:    movl %ecx, (%eax)

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define i1 @test_load_i1(i1 * %p1) {
 ; ALL-LABEL: test_load_i1:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movb (%rdi), %al
 ; ALL-NEXT:    retq
   %r = load i1, i1* %p1
@@ -13,7 +13,7 @@ define i1 @test_load_i1(i1 * %p1) {
 
 define i8 @test_load_i8(i8 * %p1) {
 ; ALL-LABEL: test_load_i8:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movb (%rdi), %al
 ; ALL-NEXT:    retq
   %r = load i8, i8* %p1
@@ -22,7 +22,7 @@ define i8 @test_load_i8(i8 * %p1) {
 
 define i16 @test_load_i16(i16 * %p1) {
 ; ALL-LABEL: test_load_i16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movzwl (%rdi), %eax
 ; ALL-NEXT:    retq
   %r = load i16, i16* %p1
@@ -31,7 +31,7 @@ define i16 @test_load_i16(i16 * %p1) {
 
 define i32 @test_load_i32(i32 * %p1) {
 ; ALL-LABEL: test_load_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl (%rdi), %eax
 ; ALL-NEXT:    retq
   %r = load i32, i32* %p1
@@ -40,7 +40,7 @@ define i32 @test_load_i32(i32 * %p1) {
 
 define i64 @test_load_i64(i64 * %p1) {
 ; ALL-LABEL: test_load_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movq (%rdi), %rax
 ; ALL-NEXT:    retq
   %r = load i64, i64* %p1
@@ -49,13 +49,13 @@ define i64 @test_load_i64(i64 * %p1) {
 
 define float @test_load_float(float * %p1) {
 ; SSE-LABEL: test_load_float:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movl (%rdi), %eax
 ; SSE-NEXT:    movd %eax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; ALL-LABEL: test_load_float:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl (%rdi), %eax
 ; ALL-NEXT:    movd %eax, %xmm0
 ; ALL-NEXT:    retq
@@ -65,13 +65,13 @@ define float @test_load_float(float * %p
 
 define double @test_load_double(double * %p1) {
 ; SSE-LABEL: test_load_double:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    movq (%rdi), %rax
 ; SSE-NEXT:    movq %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; ALL-LABEL: test_load_double:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movq (%rdi), %rax
 ; ALL-NEXT:    movq %rax, %xmm0
 ; ALL-NEXT:    retq
@@ -81,7 +81,7 @@ define double @test_load_double(double *
 
 define i1 * @test_store_i1(i1 %val, i1 * %p1) {
 ; ALL-LABEL: test_store_i1:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    andb $1, %dil
 ; ALL-NEXT:    movb %dil, (%rsi)
 ; ALL-NEXT:    movq %rsi, %rax
@@ -92,7 +92,7 @@ define i1 * @test_store_i1(i1 %val, i1 *
 
 define i32 * @test_store_i32(i32 %val, i32 * %p1) {
 ; ALL-LABEL: test_store_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl %edi, (%rsi)
 ; ALL-NEXT:    movq %rsi, %rax
 ; ALL-NEXT:    retq
@@ -102,7 +102,7 @@ define i32 * @test_store_i32(i32 %val, i
 
 define i64 * @test_store_i64(i64 %val, i64 * %p1) {
 ; ALL-LABEL: test_store_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movq %rdi, (%rsi)
 ; ALL-NEXT:    movq %rsi, %rax
 ; ALL-NEXT:    retq
@@ -113,14 +113,14 @@ define i64 * @test_store_i64(i64 %val, i
 define float * @test_store_float(float %val, float * %p1) {
 ;
 ; SSE_FAST-LABEL: test_store_float:
-; SSE_FAST:       # BB#0:
+; SSE_FAST:       # %bb.0:
 ; SSE_FAST-NEXT:    movd %xmm0, %eax
 ; SSE_FAST-NEXT:    movl %eax, (%rdi)
 ; SSE_FAST-NEXT:    movq %rdi, %rax
 ; SSE_FAST-NEXT:    retq
 ;
 ; SSE_GREEDY-LABEL: test_store_float:
-; SSE_GREEDY:       # BB#0:
+; SSE_GREEDY:       # %bb.0:
 ; SSE_GREEDY-NEXT:    movss %xmm0, (%rdi)
 ; SSE_GREEDY-NEXT:    movq %rdi, %rax
 ; SSE_GREEDY-NEXT:    retq
@@ -131,14 +131,14 @@ define float * @test_store_float(float %
 define double * @test_store_double(double %val, double * %p1) {
 ;
 ; SSE_FAST-LABEL: test_store_double:
-; SSE_FAST:       # BB#0:
+; SSE_FAST:       # %bb.0:
 ; SSE_FAST-NEXT:    movq %xmm0, %rax
 ; SSE_FAST-NEXT:    movq %rax, (%rdi)
 ; SSE_FAST-NEXT:    movq %rdi, %rax
 ; SSE_FAST-NEXT:    retq
 ;
 ; SSE_GREEDY-LABEL: test_store_double:
-; SSE_GREEDY:       # BB#0:
+; SSE_GREEDY:       # %bb.0:
 ; SSE_GREEDY-NEXT:    movsd %xmm0, (%rdi)
 ; SSE_GREEDY-NEXT:    movq %rdi, %rax
 ; SSE_GREEDY-NEXT:    retq
@@ -148,7 +148,7 @@ define double * @test_store_double(doubl
 
 define i32* @test_load_ptr(i32** %ptr1) {
 ; ALL-LABEL: test_load_ptr:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movq (%rdi), %rax
 ; ALL-NEXT:    retq
   %p = load i32*, i32** %ptr1
@@ -157,7 +157,7 @@ define i32* @test_load_ptr(i32** %ptr1)
 
 define void @test_store_ptr(i32** %ptr1, i32* %a) {
 ; ALL-LABEL: test_store_ptr:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movq %rsi, (%rdi)
 ; ALL-NEXT:    retq
   store i32* %a, i32** %ptr1
@@ -166,7 +166,7 @@ define void @test_store_ptr(i32** %ptr1,
 
 define i32 @test_gep_folding(i32* %arr, i32 %val) {
 ; ALL-LABEL: test_gep_folding:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movl %esi, 20(%rdi)
 ; ALL-NEXT:    movl 20(%rdi), %eax
 ; ALL-NEXT:    retq
@@ -179,7 +179,7 @@ define i32 @test_gep_folding(i32* %arr,
 ; check that gep index doesn't folded into memory operand
 define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) {
 ; ALL-LABEL: test_gep_folding_largeGepIndex:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    movabsq $228719476720, %rax # imm = 0x3540BE3FF0
 ; ALL-NEXT:    leaq (%rdi,%rax), %rax
 ; ALL-NEXT:    movl %esi, (%rax)

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/memop-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/memop-vec.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/memop-vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/memop-vec.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <4 x i32> @test_load_v4i32_noalign(<4 x i32> * %p1) {
 ; SKX-LABEL: test_load_v4i32_noalign:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %xmm0
 ; SKX-NEXT:    retq
   %r = load <4 x i32>, <4 x i32>* %p1, align 1
@@ -13,7 +13,7 @@ define <4 x i32> @test_load_v4i32_noalig
 
 define <4 x i32> @test_load_v4i32_align(<4 x i32> * %p1) {
 ; SKX-LABEL: test_load_v4i32_align:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps (%rdi), %xmm0
 ; SKX-NEXT:    retq
   %r = load <4 x i32>, <4 x i32>* %p1, align 16
@@ -22,7 +22,7 @@ define <4 x i32> @test_load_v4i32_align(
 
 define <8 x i32> @test_load_v8i32_noalign(<8 x i32> * %p1) {
 ; SKX-LABEL: test_load_v8i32_noalign:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %ymm0
 ; SKX-NEXT:    retq
   %r = load <8 x i32>, <8 x i32>* %p1, align 1
@@ -31,7 +31,7 @@ define <8 x i32> @test_load_v8i32_noalig
 
 define <8 x i32> @test_load_v8i32_align(<8 x i32> * %p1) {
 ; SKX-LABEL: test_load_v8i32_align:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps (%rdi), %ymm0
 ; SKX-NEXT:    retq
   %r = load <8 x i32>, <8 x i32>* %p1, align 32
@@ -40,7 +40,7 @@ define <8 x i32> @test_load_v8i32_align(
 
 define <16 x i32> @test_load_v16i32_noalign(<16 x i32> * %p1) {
 ; SKX-LABEL: test_load_v16i32_noalign:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %zmm0
 ; SKX-NEXT:    retq
   %r = load <16 x i32>, <16 x i32>* %p1, align 1
@@ -49,7 +49,7 @@ define <16 x i32> @test_load_v16i32_noal
 
 define <16 x i32> @test_load_v16i32_align(<16 x i32> * %p1) {
 ; SKX-LABEL: test_load_v16i32_align:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %zmm0
 ; SKX-NEXT:    retq
   %r = load <16 x i32>, <16 x i32>* %p1, align 32
@@ -58,7 +58,7 @@ define <16 x i32> @test_load_v16i32_alig
 
 define void @test_store_v4i32_noalign(<4 x i32> %val, <4 x i32>* %p1) {
 ; SKX-LABEL: test_store_v4i32_noalign:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups %xmm0, (%rdi)
 ; SKX-NEXT:    retq
   store <4 x i32> %val, <4 x i32>* %p1, align 1
@@ -67,7 +67,7 @@ define void @test_store_v4i32_noalign(<4
 
 define void @test_store_v4i32_align(<4 x i32> %val, <4 x i32>* %p1) {
 ; SKX-LABEL: test_store_v4i32_align:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps %xmm0, (%rdi)
 ; SKX-NEXT:    retq
   store <4 x i32> %val, <4 x i32>* %p1, align 16
@@ -76,7 +76,7 @@ define void @test_store_v4i32_align(<4 x
 
 define void @test_store_v8i32_noalign(<8 x i32> %val, <8 x i32>* %p1) {
 ; SKX-LABEL: test_store_v8i32_noalign:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups %ymm0, (%rdi)
 ; SKX-NEXT:    vzeroupper
 ; SKX-NEXT:    retq
@@ -86,7 +86,7 @@ define void @test_store_v8i32_noalign(<8
 
 define void @test_store_v8i32_align(<8 x i32> %val, <8 x i32>* %p1) {
 ; SKX-LABEL: test_store_v8i32_align:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps %ymm0, (%rdi)
 ; SKX-NEXT:    vzeroupper
 ; SKX-NEXT:    retq
@@ -96,7 +96,7 @@ define void @test_store_v8i32_align(<8 x
 
 define void @test_store_v16i32_noalign(<16 x i32> %val, <16 x i32>* %p1) {
 ; SKX-LABEL: test_store_v16i32_noalign:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups %zmm0, (%rdi)
 ; SKX-NEXT:    vzeroupper
 ; SKX-NEXT:    retq
@@ -106,7 +106,7 @@ define void @test_store_v16i32_noalign(<
 
 define void @test_store_v16i32_align(<16 x i32> %val, <16 x i32>* %p1) {
 ; SKX-LABEL: test_store_v16i32_align:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps %zmm0, (%rdi)
 ; SKX-NEXT:    vzeroupper
 ; SKX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/mul-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/mul-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/mul-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/mul-scalar.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 
 define i16 @test_mul_i16(i16 %arg1, i16 %arg2) {
 ; X64-LABEL: test_mul_i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    imulw %di, %si
 ; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    retq
@@ -19,7 +19,7 @@ define i16 @test_mul_i16(i16 %arg1, i16
 
 define i32 @test_mul_i32(i32 %arg1, i32 %arg2) {
 ; X64-LABEL: test_mul_i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    imull %edi, %esi
 ; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    retq
@@ -29,7 +29,7 @@ define i32 @test_mul_i32(i32 %arg1, i32
 
 define i64 @test_mul_i64(i64 %arg1, i64 %arg2) {
 ; X64-LABEL: test_mul_i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    imulq %rdi, %rsi
 ; X64-NEXT:    movq %rsi, %rax
 ; X64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/mul-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/mul-vec.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/mul-vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/mul-vec.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
 ; SKX-LABEL: test_mul_v8i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
 ; SKX-NEXT:    retq
   %ret = mul <8 x i16> %arg1, %arg2
@@ -12,7 +12,7 @@ define <8 x i16> @test_mul_v8i16(<8 x i1
 
 define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
 ; SKX-LABEL: test_mul_v4i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
 ; SKX-NEXT:    retq
   %ret = mul <4 x i32> %arg1, %arg2
@@ -21,7 +21,7 @@ define <4 x i32> @test_mul_v4i32(<4 x i3
 
 define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
 ; SKX-LABEL: test_mul_v2i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmullq %xmm1, %xmm0, %xmm0
 ; SKX-NEXT:    retq
   %ret = mul <2 x i64> %arg1, %arg2
@@ -30,7 +30,7 @@ define <2 x i64> @test_mul_v2i64(<2 x i6
 
 define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
 ; SKX-LABEL: test_mul_v16i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
   %ret = mul <16 x i16> %arg1, %arg2
@@ -39,7 +39,7 @@ define <16 x i16> @test_mul_v16i16(<16 x
 
 define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
 ; SKX-LABEL: test_mul_v8i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
   %ret = mul <8 x i32> %arg1, %arg2
@@ -48,7 +48,7 @@ define <8 x i32> @test_mul_v8i32(<8 x i3
 
 define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
 ; SKX-LABEL: test_mul_v4i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmullq %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
   %ret = mul <4 x i64> %arg1, %arg2
@@ -57,7 +57,7 @@ define <4 x i64> @test_mul_v4i64(<4 x i6
 
 define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
 ; SKX-LABEL: test_mul_v32i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmullw %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %ret = mul <32 x i16> %arg1, %arg2
@@ -66,7 +66,7 @@ define <32 x i16> @test_mul_v32i16(<32 x
 
 define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
 ; SKX-LABEL: test_mul_v16i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmulld %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %ret = mul <16 x i32> %arg1, %arg2
@@ -75,7 +75,7 @@ define <16 x i32> @test_mul_v16i32(<16 x
 
 define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) {
 ; SKX-LABEL: test_mul_v8i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpmullq %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %ret = mul <8 x i64> %arg1, %arg2

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/or-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/or-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/or-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/or-scalar.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @test_or_i1(i32 %arg1, i32 %arg2) {
 ; ALL-LABEL: test_or_i1:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    sete %al
 ; ALL-NEXT:    orb %al, %al
@@ -18,7 +18,7 @@ define i32 @test_or_i1(i32 %arg1, i32 %a
 
 define i8 @test_or_i8(i8 %arg1, i8 %arg2) {
 ; ALL-LABEL: test_or_i8:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    orb %dil, %sil
 ; ALL-NEXT:    movl %esi, %eax
 ; ALL-NEXT:    retq
@@ -28,7 +28,7 @@ define i8 @test_or_i8(i8 %arg1, i8 %arg2
 
 define i16 @test_or_i16(i16 %arg1, i16 %arg2) {
 ; ALL-LABEL: test_or_i16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    orw %di, %si
 ; ALL-NEXT:    movl %esi, %eax
 ; ALL-NEXT:    retq
@@ -38,7 +38,7 @@ define i16 @test_or_i16(i16 %arg1, i16 %
 
 define i32 @test_or_i32(i32 %arg1, i32 %arg2) {
 ; ALL-LABEL: test_or_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    orl %edi, %esi
 ; ALL-NEXT:    movl %esi, %eax
 ; ALL-NEXT:    retq
@@ -48,7 +48,7 @@ define i32 @test_or_i32(i32 %arg1, i32 %
 
 define i64 @test_or_i64(i64 %arg1, i64 %arg2) {
 ; ALL-LABEL: test_or_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    orq %rdi, %rsi
 ; ALL-NEXT:    movq %rsi, %rax
 ; ALL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/phi.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/phi.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/phi.ll Mon Dec  4 09:18:51 2017
@@ -3,13 +3,13 @@
 
 define i8 @test_i8(i32 %a, i8 %f, i8 %t) {
 ; ALL-LABEL: test_i8:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    xorl %eax, %eax
 ; ALL-NEXT:    cmpl %eax, %edi
 ; ALL-NEXT:    setg %al
 ; ALL-NEXT:    testb $1, %al
 ; ALL-NEXT:    jne .LBB0_2
-; ALL-NEXT:  # BB#1: # %cond.false
+; ALL-NEXT:  # %bb.1: # %cond.false
 ; ALL-NEXT:    movl %edx, %esi
 ; ALL-NEXT:  .LBB0_2: # %cond.end
 ; ALL-NEXT:    movl %esi, %eax
@@ -31,13 +31,13 @@ cond.end:
 
 define i16 @test_i16(i32 %a, i16 %f, i16 %t) {
 ; ALL-LABEL: test_i16:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    xorl %eax, %eax
 ; ALL-NEXT:    cmpl %eax, %edi
 ; ALL-NEXT:    setg %al
 ; ALL-NEXT:    testb $1, %al
 ; ALL-NEXT:    jne .LBB1_2
-; ALL-NEXT:  # BB#1: # %cond.false
+; ALL-NEXT:  # %bb.1: # %cond.false
 ; ALL-NEXT:    movl %edx, %esi
 ; ALL-NEXT:  .LBB1_2: # %cond.end
 ; ALL-NEXT:    movl %esi, %eax
@@ -59,13 +59,13 @@ cond.end:
 
 define i32 @test_i32(i32 %a, i32 %f, i32 %t) {
 ; ALL-LABEL: test_i32:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    xorl %eax, %eax
 ; ALL-NEXT:    cmpl %eax, %edi
 ; ALL-NEXT:    setg %al
 ; ALL-NEXT:    testb $1, %al
 ; ALL-NEXT:    jne .LBB2_2
-; ALL-NEXT:  # BB#1: # %cond.false
+; ALL-NEXT:  # %bb.1: # %cond.false
 ; ALL-NEXT:    movl %edx, %esi
 ; ALL-NEXT:  .LBB2_2: # %cond.end
 ; ALL-NEXT:    movl %esi, %eax
@@ -87,13 +87,13 @@ cond.end:
 
 define i64 @test_i64(i32 %a, i64 %f, i64 %t) {
 ; ALL-LABEL: test_i64:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    xorl %eax, %eax
 ; ALL-NEXT:    cmpl %eax, %edi
 ; ALL-NEXT:    setg %al
 ; ALL-NEXT:    testb $1, %al
 ; ALL-NEXT:    jne .LBB3_2
-; ALL-NEXT:  # BB#1: # %cond.false
+; ALL-NEXT:  # %bb.1: # %cond.false
 ; ALL-NEXT:    movq %rdx, %rsi
 ; ALL-NEXT:  .LBB3_2: # %cond.end
 ; ALL-NEXT:    movq %rsi, %rax
@@ -115,13 +115,13 @@ cond.end:
 
 define float @test_float(i32 %a, float %f, float %t) {
 ; ALL-LABEL: test_float:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    xorl %eax, %eax
 ; ALL-NEXT:    cmpl %eax, %edi
 ; ALL-NEXT:    setg %al
 ; ALL-NEXT:    testb $1, %al
 ; ALL-NEXT:    jne .LBB4_2
-; ALL-NEXT:  # BB#1: # %cond.false
+; ALL-NEXT:  # %bb.1: # %cond.false
 ; ALL-NEXT:    movaps %xmm1, %xmm0
 ; ALL-NEXT:  .LBB4_2: # %cond.end
 ; ALL-NEXT:    retq
@@ -142,13 +142,13 @@ cond.end:
 
 define double @test_double(i32 %a, double %f, double %t) {
 ; ALL-LABEL: test_double:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    xorl %eax, %eax
 ; ALL-NEXT:    cmpl %eax, %edi
 ; ALL-NEXT:    setg %al
 ; ALL-NEXT:    testb $1, %al
 ; ALL-NEXT:    jne .LBB5_2
-; ALL-NEXT:  # BB#1: # %cond.false
+; ALL-NEXT:  # %bb.1: # %cond.false
 ; ALL-NEXT:    movaps %xmm1, %xmm0
 ; ALL-NEXT:  .LBB5_2: # %cond.end
 ; ALL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir Mon Dec  4 09:18:51 2017
@@ -1311,12 +1311,12 @@ registers:
   - { id: 4, class: _, preferred-register: '' }
   - { id: 5, class: _, preferred-register: '' }
 # CHECK:       bb.3.cond.end:
-# CHECK-NEXT:      %5:gpr(s32) = G_PHI %1(s32), %bb.1.cond.true, %2(s32), %bb.2.cond.false
+# CHECK-NEXT:      %5:gpr(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
 # CHECK-NEXT:      %eax = COPY %5(s32)
 # CHECK-NEXT:      RET 0, implicit %eax
 body:             |
   bb.0.entry:
-    successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+    successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: %edi, %edx, %esi
 
     %0(s32) = COPY %edi
@@ -1324,19 +1324,19 @@ body:             |
     %2(s32) = COPY %edx
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.1.cond.true
-    G_BR %bb.2.cond.false
+    G_BRCOND %4(s1), %bb.1
+    G_BR %bb.2
 
   bb.1.cond.true:
-    successors: %bb.3.cond.end(0x80000000)
+    successors: %bb.3(0x80000000)
 
-    G_BR %bb.3.cond.end
+    G_BR %bb.3
 
   bb.2.cond.false:
-    successors: %bb.3.cond.end(0x80000000)
+    successors: %bb.3(0x80000000)
 
   bb.3.cond.end:
-    %5(s32) = G_PHI %1(s32), %bb.1.cond.true, %2(s32), %bb.2.cond.false
+    %5(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
     %eax = COPY %5(s32)
     RET 0, implicit %eax
 
@@ -1363,12 +1363,12 @@ registers:
   - { id: 4, class: _, preferred-register: '' }
   - { id: 5, class: _, preferred-register: '' }
 # CHECK:         bb.3.cond.end:
-# CHECK-NEXT:      %5:vecr(s32) = G_PHI %1(s32), %bb.1.cond.true, %2(s32), %bb.2.cond.false
+# CHECK-NEXT:      %5:vecr(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
 # CHECK-NEXT:      %xmm0 = COPY %5(s32)
 # CHECK-NEXT:      RET 0, implicit %xmm0
 body:             |
   bb.0.entry:
-    successors: %bb.1.cond.true(0x40000000), %bb.2.cond.false(0x40000000)
+    successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: %edi, %xmm0, %xmm1
 
     %0(s32) = COPY %edi
@@ -1376,19 +1376,19 @@ body:             |
     %2(s32) = COPY %xmm1
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.1.cond.true
-    G_BR %bb.2.cond.false
+    G_BRCOND %4(s1), %bb.1
+    G_BR %bb.2
 
   bb.1.cond.true:
-    successors: %bb.3.cond.end(0x80000000)
+    successors: %bb.3(0x80000000)
 
-    G_BR %bb.3.cond.end
+    G_BR %bb.3
 
   bb.2.cond.false:
-    successors: %bb.3.cond.end(0x80000000)
+    successors: %bb.3(0x80000000)
 
   bb.3.cond.end:
-    %5(s32) = G_PHI %1(s32), %bb.1.cond.true, %2(s32), %bb.2.cond.false
+    %5(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
     %xmm0 = COPY %5(s32)
     RET 0, implicit %xmm0
 

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-br.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-br.mir?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-br.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-br.mir Mon Dec  4 09:18:51 2017
@@ -20,20 +20,20 @@ name:            uncondbr
 alignment:       4
 legalized:       true
 regBankSelected: true
-# CHECK:     JMP_1 %bb.2.bb2
-# CHECK:     JMP_1 %bb.1.end
+# CHECK:     JMP_1 %bb.2
+# CHECK:     JMP_1 %bb.1
 body:             |
   bb.1.entry:
-    successors: %bb.3.bb2(0x80000000)
+    successors: %bb.3(0x80000000)
 
-    G_BR %bb.3.bb2
+    G_BR %bb.3
 
   bb.2.end:
     RET 0
 
   bb.3.bb2:
-    successors: %bb.2.end(0x80000000)
+    successors: %bb.2(0x80000000)
 
-    G_BR %bb.2.end
+    G_BR %bb.2
 
 ...

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-brcond.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-brcond.mir?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-brcond.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-brcond.mir Mon Dec  4 09:18:51 2017
@@ -33,27 +33,27 @@ registers:
 # CHECK-NEXT:    %3:gr32 = MOV32ri 1
 # CHECK-NEXT:    %1:gr8 = COPY %0.sub_8bit
 # CHECK-NEXT:    TEST8ri %1, 1, implicit-def %eflags
-# CHECK-NEXT:    JNE_1 %[[TRUE:bb.[0-9].true]], implicit %eflags
-# CHECK-NEXT:    JMP_1 %[[FALSE:bb.[0-9].false]]
-# CHECK:      [[TRUE]]:
+# CHECK-NEXT:    JNE_1 %[[TRUE:bb.[0-9]+]], implicit %eflags
+# CHECK-NEXT:    JMP_1 %[[FALSE:bb.[0-9]+]]
+# CHECK:      [[TRUE]].{{[a-zA-Z0-9]+}}:
 # CHECK-NEXT:    %eax = COPY %2
 # CHECK-NEXT:    RET 0, implicit %eax
-# CHECK:      [[FALSE]]:
+# CHECK:      [[FALSE]].{{[a-zA-Z0-9]+}}:
 # CHECK-NEXT:    %eax = COPY %3
 # CHECK-NEXT:    RET 0, implicit %eax
 
 
 body:             |
   bb.1.entry:
-    successors: %bb.2.true(0x40000000), %bb.3.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi
 
     %0(s32) = COPY %edi
     %2(s32) = G_CONSTANT i32 0
     %3(s32) = G_CONSTANT i32 1
     %1(s1) = G_TRUNC %0(s32)
-    G_BRCOND %1(s1), %bb.2.true
-    G_BR %bb.3.false
+    G_BRCOND %1(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.true:
     %eax = COPY %2(s32)

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir Mon Dec  4 09:18:51 2017
@@ -121,12 +121,12 @@ registers:
   - { id: 4, class: gpr, preferred-register: '' }
   - { id: 5, class: gpr, preferred-register: '' }
 # ALL-LABEL: bb.3.cond.end:
-# ALL:          %5:gr8 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL:          %5:gr8 = PHI %1, %bb.1, %2, %bb.2
 # ALL-NEXT:     %al = COPY %5
 # ALL-NEXT:     RET 0, implicit %al
 body:             |
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %edx, %esi
 
     %0(s32) = COPY %edi
@@ -134,20 +134,20 @@ body:             |
     %2(s8) = COPY %edx
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s8) = G_PHI %1(s8), %bb.2.cond.true, %2(s8), %bb.3.cond.false
+    %5(s8) = G_PHI %1(s8), %bb.2, %2(s8), %bb.3
     %al = COPY %5(s8)
     RET 0, implicit %al
 
@@ -174,12 +174,12 @@ registers:
   - { id: 4, class: gpr, preferred-register: '' }
   - { id: 5, class: gpr, preferred-register: '' }
 # ALL-LABEL: bb.3.cond.end:
-# ALL:          %5:gr16 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL:          %5:gr16 = PHI %1, %bb.1, %2, %bb.2
 # ALL-NEXT:     %ax = COPY %5
 # ALL-NEXT:     RET 0, implicit %ax
 body:             |
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %edx, %esi
 
     %0(s32) = COPY %edi
@@ -187,20 +187,20 @@ body:             |
     %2(s16) = COPY %edx
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s16) = G_PHI %1(s16), %bb.2.cond.true, %2(s16), %bb.3.cond.false
+    %5(s16) = G_PHI %1(s16), %bb.2, %2(s16), %bb.3
     %ax = COPY %5(s16)
     RET 0, implicit %ax
 
@@ -227,12 +227,12 @@ registers:
   - { id: 4, class: gpr, preferred-register: '' }
   - { id: 5, class: gpr, preferred-register: '' }
 # ALL-LABEL: bb.3.cond.end:
-# ALL:          %5:gr32 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL:          %5:gr32 = PHI %1, %bb.1, %2, %bb.2
 # ALL-NEXT:     %eax = COPY %5
 # ALL-NEXT:     RET 0, implicit %eax
 body:             |
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %edx, %esi
 
     %0(s32) = COPY %edi
@@ -240,20 +240,20 @@ body:             |
     %2(s32) = COPY %edx
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s32) = G_PHI %1(s32), %bb.2.cond.true, %2(s32), %bb.3.cond.false
+    %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
     %eax = COPY %5(s32)
     RET 0, implicit %eax
 
@@ -280,12 +280,12 @@ registers:
   - { id: 4, class: gpr, preferred-register: '' }
   - { id: 5, class: gpr, preferred-register: '' }
 # ALL-LABEL: bb.3.cond.end:
-# ALL:          %5:gr64 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL:          %5:gr64 = PHI %1, %bb.1, %2, %bb.2
 # ALL-NEXT:     %rax = COPY %5
 # ALL-NEXT:     RET 0, implicit %rax
 body:             |
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %rdx, %rsi
 
     %0(s32) = COPY %edi
@@ -293,20 +293,20 @@ body:             |
     %2(s64) = COPY %rdx
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s64) = G_PHI %1(s64), %bb.2.cond.true, %2(s64), %bb.3.cond.false
+    %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
     %rax = COPY %5(s64)
     RET 0, implicit %rax
 
@@ -337,12 +337,12 @@ fixedStack:
 stack:
 constants:
 # ALL-LABEL: bb.3.cond.end:
-# ALL:          %5:fr32 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL:          %5:fr32 = PHI %1, %bb.1, %2, %bb.2
 # ALL-NEXT:     %xmm0 = COPY %5
 # ALL-NEXT:     RET 0, implicit %xmm0
 body:             |
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %xmm0, %xmm1
 
     %0(s32) = COPY %edi
@@ -350,20 +350,20 @@ body:             |
     %2(s32) = COPY %xmm1
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s32) = G_PHI %1(s32), %bb.2.cond.true, %2(s32), %bb.3.cond.false
+    %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
     %xmm0 = COPY %5(s32)
     RET 0, implicit %xmm0
 
@@ -390,12 +390,12 @@ registers:
   - { id: 4, class: gpr, preferred-register: '' }
   - { id: 5, class: vecr, preferred-register: '' }
 # ALL-LABEL: bb.3.cond.end:
-# ALL:          %5:fr64 = PHI %1, %bb.1.cond.true, %2, %bb.2.cond.false
+# ALL:          %5:fr64 = PHI %1, %bb.1, %2, %bb.2
 # ALL-NEXT:     %xmm0 = COPY %5
 # ALL-NEXT:     RET 0, implicit %xmm0
 body:             |
   bb.1.entry:
-    successors: %bb.2.cond.true(0x40000000), %bb.3.cond.false(0x40000000)
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
     liveins: %edi, %xmm0, %xmm1
 
     %0(s32) = COPY %edi
@@ -403,20 +403,20 @@ body:             |
     %2(s64) = COPY %xmm1
     %3(s32) = G_CONSTANT i32 0
     %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
-    G_BRCOND %4(s1), %bb.2.cond.true
-    G_BR %bb.3.cond.false
+    G_BRCOND %4(s1), %bb.2
+    G_BR %bb.3
 
   bb.2.cond.true:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
-    G_BR %bb.4.cond.end
+    G_BR %bb.4
 
   bb.3.cond.false:
-    successors: %bb.4.cond.end(0x80000000)
+    successors: %bb.4(0x80000000)
 
 
   bb.4.cond.end:
-    %5(s64) = G_PHI %1(s64), %bb.2.cond.true, %2(s64), %bb.3.cond.false
+    %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
     %xmm0 = COPY %5(s64)
     RET 0, implicit %xmm0
 

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/sub-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/sub-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/sub-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/sub-scalar.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
 ; X64-LABEL: test_sub_i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subq %rsi, %rdi
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    retq
@@ -13,7 +13,7 @@ define i64 @test_sub_i64(i64 %arg1, i64
 
 define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
 ; X64-LABEL: test_sub_i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subl %esi, %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
@@ -23,7 +23,7 @@ define i32 @test_sub_i32(i32 %arg1, i32
 
 define i16 @test_sub_i16(i16 %arg1, i16 %arg2) {
 ; X64-LABEL: test_sub_i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subw %si, %di
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
@@ -33,7 +33,7 @@ define i16 @test_sub_i16(i16 %arg1, i16
 
 define i8 @test_sub_i8(i8 %arg1, i8 %arg2) {
 ; X64-LABEL: test_sub_i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subb %sil, %dil
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    retq
@@ -43,7 +43,7 @@ define i8 @test_sub_i8(i8 %arg1, i8 %arg
 
 define i32 @test_sub_i1(i32 %arg1, i32 %arg2) {
 ; X64-LABEL: test_sub_i1:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    subb %sil, %dil
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    andl $1, %eax

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/sub-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/sub-vec.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/sub-vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/sub-vec.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <16 x i8> @test_sub_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) {
 ; SKX-LABEL: test_sub_v16i8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
 ; SKX-NEXT:    retq
   %ret = sub <16 x i8> %arg1, %arg2
@@ -12,7 +12,7 @@ define <16 x i8> @test_sub_v16i8(<16 x i
 
 define <8 x i16> @test_sub_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
 ; SKX-LABEL: test_sub_v8i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubw %xmm1, %xmm0, %xmm0
 ; SKX-NEXT:    retq
   %ret = sub <8 x i16> %arg1, %arg2
@@ -21,7 +21,7 @@ define <8 x i16> @test_sub_v8i16(<8 x i1
 
 define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
 ; SKX-LABEL: test_sub_v4i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
 ; SKX-NEXT:    retq
   %ret = sub <4 x i32> %arg1, %arg2
@@ -30,7 +30,7 @@ define <4 x i32> @test_sub_v4i32(<4 x i3
 
 define <2 x i64> @test_sub_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
 ; SKX-LABEL: test_sub_v2i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
 ; SKX-NEXT:    retq
   %ret = sub <2 x i64> %arg1, %arg2
@@ -39,7 +39,7 @@ define <2 x i64> @test_sub_v2i64(<2 x i6
 
 define <32 x i8> @test_sub_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) {
 ; SKX-LABEL: test_sub_v32i8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
   %ret = sub <32 x i8> %arg1, %arg2
@@ -48,7 +48,7 @@ define <32 x i8> @test_sub_v32i8(<32 x i
 
 define <16 x i16> @test_sub_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
 ; SKX-LABEL: test_sub_v16i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubw %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
   %ret = sub <16 x i16> %arg1, %arg2
@@ -57,7 +57,7 @@ define <16 x i16> @test_sub_v16i16(<16 x
 
 define <8 x i32> @test_sub_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
 ; SKX-LABEL: test_sub_v8i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
   %ret = sub <8 x i32> %arg1, %arg2
@@ -66,7 +66,7 @@ define <8 x i32> @test_sub_v8i32(<8 x i3
 
 define <4 x i64> @test_sub_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
 ; SKX-LABEL: test_sub_v4i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubq %ymm1, %ymm0, %ymm0
 ; SKX-NEXT:    retq
   %ret = sub <4 x i64> %arg1, %arg2
@@ -75,7 +75,7 @@ define <4 x i64> @test_sub_v4i64(<4 x i6
 
 define <64 x i8> @test_sub_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) {
 ; SKX-LABEL: test_sub_v64i8:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubb %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %ret = sub <64 x i8> %arg1, %arg2
@@ -84,7 +84,7 @@ define <64 x i8> @test_sub_v64i8(<64 x i
 
 define <32 x i16> @test_sub_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
 ; SKX-LABEL: test_sub_v32i16:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubw %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %ret = sub <32 x i16> %arg1, %arg2
@@ -93,7 +93,7 @@ define <32 x i16> @test_sub_v32i16(<32 x
 
 define <16 x i32> @test_sub_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
 ; SKX-LABEL: test_sub_v16i32:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubd %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %ret = sub <16 x i32> %arg1, %arg2
@@ -102,7 +102,7 @@ define <16 x i32> @test_sub_v16i32(<16 x
 
 define <8 x i64> @test_sub_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) {
 ; SKX-LABEL: test_sub_v8i64:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpsubq %zmm1, %zmm0, %zmm0
 ; SKX-NEXT:    retq
   %ret = sub <8 x i64> %arg1, %arg2

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/trunc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/trunc.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i1 @trunc_i32toi1(i32 %a) {
 ; CHECK-LABEL: trunc_i32toi1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %r = trunc i32 %a to i1
@@ -12,7 +12,7 @@ define i1 @trunc_i32toi1(i32 %a) {
 
 define i8 @trunc_i32toi8(i32 %a) {
 ; CHECK-LABEL: trunc_i32toi8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %r = trunc i32 %a to i8
@@ -21,7 +21,7 @@ define i8 @trunc_i32toi8(i32 %a) {
 
 define i16 @trunc_i32toi16(i32 %a) {
 ; CHECK-LABEL: trunc_i32toi16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %r = trunc i32 %a to i16
@@ -30,7 +30,7 @@ define i16 @trunc_i32toi16(i32 %a) {
 
 define i8 @trunc_i64toi8(i64 %a) {
 ; CHECK-LABEL: trunc_i64toi8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %r = trunc i64 %a to i8
@@ -39,7 +39,7 @@ define i8 @trunc_i64toi8(i64 %a) {
 
 define i16 @trunc_i64toi16(i64 %a) {
 ; CHECK-LABEL: trunc_i64toi16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %r = trunc i64 %a to i16
@@ -48,7 +48,7 @@ define i16 @trunc_i64toi16(i64 %a) {
 
 define i32 @trunc_i64toi32(i64 %a) {
 ; CHECK-LABEL: trunc_i64toi32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
   %r = trunc i64 %a to i32

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/undef.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/undef.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/undef.ll Mon Dec  4 09:18:51 2017
@@ -3,14 +3,14 @@
 
 define i8 @test() {
 ; ALL-LABEL: test:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    retq
   ret i8 undef
 }
 
 define i8 @test2(i8 %a) {
 ; ALL-LABEL: test2:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    addb %al, %dil
 ; ALL-NEXT:    movl %edi, %eax
 ; ALL-NEXT:    retq
@@ -21,14 +21,14 @@ define i8 @test2(i8 %a) {
 
 define float @test3() {
 ; ALL-LABEL: test3:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    retq
   ret float undef
 }
 
 define float @test4(float %a) {
 ; ALL-LABEL: test4:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    addss %xmm0, %xmm0
 ; ALL-NEXT:    retq
   %r = fadd float %a, undef

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/xor-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/xor-scalar.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/xor-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/xor-scalar.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define i32 @test_xor_i1(i32 %arg1, i32 %arg2) {
 ; ALL-LABEL: test_xor_i1:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    cmpl %esi, %edi
 ; ALL-NEXT:    sete %al
 ; ALL-NEXT:    xorb %al, %al
@@ -18,7 +18,7 @@ define i32 @test_xor_i1(i32 %arg1, i32 %
 
 define i8 @test_xor_i8(i8 %arg1, i8 %arg2) {
 ; ALL-LABEL: test_xor_i8:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    xorb %dil, %sil
 ; ALL-NEXT:    movl %esi, %eax
 ; ALL-NEXT:    retq
@@ -28,7 +28,7 @@ define i8 @test_xor_i8(i8 %arg1, i8 %arg
 
 define i16 @test_xor_i16(i16 %arg1, i16 %arg2) {
 ; ALL-LABEL: test_xor_i16:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    xorw %di, %si
 ; ALL-NEXT:    movl %esi, %eax
 ; ALL-NEXT:    retq
@@ -38,7 +38,7 @@ define i16 @test_xor_i16(i16 %arg1, i16
 
 define i32 @test_xor_i32(i32 %arg1, i32 %arg2) {
 ; ALL-LABEL: test_xor_i32:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    xorl %edi, %esi
 ; ALL-NEXT:    movl %esi, %eax
 ; ALL-NEXT:    retq
@@ -48,7 +48,7 @@ define i32 @test_xor_i32(i32 %arg1, i32
 
 define i64 @test_xor_i64(i64 %arg1, i64 %arg2) {
 ; ALL-LABEL: test_xor_i64:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    xorq %rdi, %rsi
 ; ALL-NEXT:    movq %rsi, %rax
 ; ALL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/MachineBranchProb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/MachineBranchProb.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/MachineBranchProb.ll (original)
+++ llvm/trunk/test/CodeGen/X86/MachineBranchProb.ll Mon Dec  4 09:18:51 2017
@@ -17,10 +17,10 @@ for.cond2:
   %cmp4 = icmp eq i32 %i.1, %v3
   %or.cond = or i1 %tobool, %cmp4
   br i1 %or.cond, label %for.inc20, label %for.inc, !prof !0
-; CHECK: BB#1: derived from LLVM BB %for.cond2
-; CHECK: Successors according to CFG: BB#3({{[0-9a-fx/= ]+}}1.53%) BB#4({{[0-9a-fx/= ]+}}98.47%)
-; CHECK: BB#4: derived from LLVM BB %for.cond2
-; CHECK: Successors according to CFG: BB#3({{[0-9a-fx/= ]+}}1.55%) BB#2({{[0-9a-fx/= ]+}}98.45%)
+; CHECK: %bb.1: derived from LLVM BB %for.cond2
+; CHECK: Successors according to CFG: %bb.3({{[0-9a-fx/= ]+}}1.53%) %bb.4({{[0-9a-fx/= ]+}}98.47%)
+; CHECK: %bb.4: derived from LLVM BB %for.cond2
+; CHECK: Successors according to CFG: %bb.3({{[0-9a-fx/= ]+}}1.55%) %bb.2({{[0-9a-fx/= ]+}}98.45%)
 
 for.inc:                                          ; preds = %for.cond2
   %shl = shl i32 %bit.0, 1

Modified: llvm/trunk/test/CodeGen/X86/MergeConsecutiveStores.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/MergeConsecutiveStores.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/MergeConsecutiveStores.ll (original)
+++ llvm/trunk/test/CodeGen/X86/MergeConsecutiveStores.ll Mon Dec  4 09:18:51 2017
@@ -8,10 +8,10 @@
 ; save 1,2,3 ... as one big integer.
 define void @merge_const_store(i32 %count, %struct.A* nocapture %p) nounwind uwtable noinline ssp {
 ; CHECK-LABEL: merge_const_store:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jle .LBB0_3
-; CHECK-NEXT:  # BB#1: # %.lr.ph.preheader
+; CHECK-NEXT:  # %bb.1: # %.lr.ph.preheader
 ; CHECK-NEXT:    movabsq $578437695752307201, %rax # imm = 0x807060504030201
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB0_2: # %.lr.ph
@@ -54,7 +54,7 @@ define void @merge_const_store(i32 %coun
 ; No vectors because we use noimplicitfloat
 define void @merge_const_store_no_vec(i32 %count, %struct.B* nocapture %p) noimplicitfloat{
 ; CHECK-LABEL: merge_const_store_no_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jle .LBB1_2
 ; CHECK-NEXT:    .p2align 4, 0x90
@@ -101,10 +101,10 @@ define void @merge_const_store_no_vec(i3
 ; Move the constants using a single vector store.
 define void @merge_const_store_vec(i32 %count, %struct.B* nocapture %p) nounwind uwtable noinline ssp {
 ; CHECK-LABEL: merge_const_store_vec:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jle .LBB2_3
-; CHECK-NEXT:  # BB#1: # %.lr.ph.preheader
+; CHECK-NEXT:  # %bb.1: # %.lr.ph.preheader
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB2_2: # %.lr.ph
@@ -148,7 +148,7 @@ define void @merge_const_store_vec(i32 %
 ; Move the first 4 constants as a single vector. Move the rest as scalars.
 define void @merge_nonconst_store(i32 %count, i8 %zz, %struct.A* nocapture %p) nounwind uwtable noinline ssp {
 ; CHECK-LABEL: merge_nonconst_store:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jle .LBB3_2
 ; CHECK-NEXT:    .p2align 4, 0x90
@@ -194,7 +194,7 @@ define void @merge_nonconst_store(i32 %c
 
 define void @merge_loads_i16(i32 %count, %struct.A* noalias nocapture %q, %struct.A* noalias nocapture %p) nounwind uwtable noinline ssp {
 ; BWON-LABEL: merge_loads_i16:
-; BWON:       # BB#0:
+; BWON:       # %bb.0:
 ; BWON-NEXT:    testl %edi, %edi
 ; BWON-NEXT:    jle .LBB4_2
 ; BWON-NEXT:    .p2align 4, 0x90
@@ -208,7 +208,7 @@ define void @merge_loads_i16(i32 %count,
 ; BWON-NEXT:    retq
 ;
 ; BWOFF-LABEL: merge_loads_i16:
-; BWOFF:       # BB#0:
+; BWOFF:       # %bb.0:
 ; BWOFF-NEXT:    testl %edi, %edi
 ; BWOFF-NEXT:    jle .LBB4_2
 ; BWOFF-NEXT:    .p2align 4, 0x90
@@ -249,7 +249,7 @@ define void @merge_loads_i16(i32 %count,
 ; The loads and the stores are interleaved. Can't merge them.
 define void @no_merge_loads(i32 %count, %struct.A* noalias nocapture %q, %struct.A* noalias nocapture %p) nounwind uwtable noinline ssp {
 ; BWON-LABEL: no_merge_loads:
-; BWON:       # BB#0:
+; BWON:       # %bb.0:
 ; BWON-NEXT:    testl %edi, %edi
 ; BWON-NEXT:    jle .LBB5_2
 ; BWON-NEXT:    .p2align 4, 0x90
@@ -266,7 +266,7 @@ define void @no_merge_loads(i32 %count,
 ; BWON-NEXT:    retq
 ;
 ; BWOFF-LABEL: no_merge_loads:
-; BWOFF:       # BB#0:
+; BWOFF:       # %bb.0:
 ; BWOFF-NEXT:    testl %edi, %edi
 ; BWOFF-NEXT:    jle .LBB5_2
 ; BWOFF-NEXT:    .p2align 4, 0x90
@@ -309,7 +309,7 @@ a4:
 
 define void @merge_loads_integer(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
 ; CHECK-LABEL: merge_loads_integer:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jle .LBB6_2
 ; CHECK-NEXT:    .p2align 4, 0x90
@@ -349,7 +349,7 @@ define void @merge_loads_integer(i32 %co
 
 define void @merge_loads_vector(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
 ; CHECK-LABEL: merge_loads_vector:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jle .LBB7_2
 ; CHECK-NEXT:    .p2align 4, 0x90
@@ -399,7 +399,7 @@ block4:
 ; On x86, even unaligned copies can be merged to vector ops.
 define void @merge_loads_no_align(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
 ; CHECK-LABEL: merge_loads_no_align:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jle .LBB8_2
 ; CHECK-NEXT:    .p2align 4, 0x90
@@ -450,7 +450,7 @@ block4:
 ; word (16 bit) instead of a byte copy.
 define void @MergeLoadStoreBaseIndexOffset(i64* %a, i8* %b, i8* %c, i32 %n) {
 ; BWON-LABEL: MergeLoadStoreBaseIndexOffset:
-; BWON:       # BB#0:
+; BWON:       # %bb.0:
 ; BWON-NEXT:    movl %ecx, %r8d
 ; BWON-NEXT:    xorl %ecx, %ecx
 ; BWON-NEXT:    .p2align 4, 0x90
@@ -461,11 +461,11 @@ define void @MergeLoadStoreBaseIndexOffs
 ; BWON-NEXT:    incq %rcx
 ; BWON-NEXT:    cmpl %ecx, %r8d
 ; BWON-NEXT:    jne .LBB9_1
-; BWON-NEXT:  # BB#2:
+; BWON-NEXT:  # %bb.2:
 ; BWON-NEXT:    retq
 ;
 ; BWOFF-LABEL: MergeLoadStoreBaseIndexOffset:
-; BWOFF:       # BB#0:
+; BWOFF:       # %bb.0:
 ; BWOFF-NEXT:    movl %ecx, %r8d
 ; BWOFF-NEXT:    xorl %ecx, %ecx
 ; BWOFF-NEXT:    .p2align 4, 0x90
@@ -476,7 +476,7 @@ define void @MergeLoadStoreBaseIndexOffs
 ; BWOFF-NEXT:    incq %rcx
 ; BWOFF-NEXT:    cmpl %ecx, %r8d
 ; BWOFF-NEXT:    jne .LBB9_1
-; BWOFF-NEXT:  # BB#2:
+; BWOFF-NEXT:  # %bb.2:
 ; BWOFF-NEXT:    retq
   br label %1
 
@@ -507,7 +507,7 @@ define void @MergeLoadStoreBaseIndexOffs
 ; word (16 bit) instead of a byte copy for complicated address calculation.
 define void @MergeLoadStoreBaseIndexOffsetComplicated(i8* %a, i8* %b, i8* %c, i64 %n) {
 ; BWON-LABEL: MergeLoadStoreBaseIndexOffsetComplicated:
-; BWON:       # BB#0:
+; BWON:       # %bb.0:
 ; BWON-NEXT:    xorl %r8d, %r8d
 ; BWON-NEXT:    .p2align 4, 0x90
 ; BWON-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
@@ -518,11 +518,11 @@ define void @MergeLoadStoreBaseIndexOffs
 ; BWON-NEXT:    addq $2, %r8
 ; BWON-NEXT:    cmpq %rcx, %r8
 ; BWON-NEXT:    jl .LBB10_1
-; BWON-NEXT:  # BB#2:
+; BWON-NEXT:  # %bb.2:
 ; BWON-NEXT:    retq
 ;
 ; BWOFF-LABEL: MergeLoadStoreBaseIndexOffsetComplicated:
-; BWOFF:       # BB#0:
+; BWOFF:       # %bb.0:
 ; BWOFF-NEXT:    xorl %r8d, %r8d
 ; BWOFF-NEXT:    .p2align 4, 0x90
 ; BWOFF-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
@@ -533,7 +533,7 @@ define void @MergeLoadStoreBaseIndexOffs
 ; BWOFF-NEXT:    addq $2, %r8
 ; BWOFF-NEXT:    cmpq %rcx, %r8
 ; BWOFF-NEXT:    jl .LBB10_1
-; BWOFF-NEXT:  # BB#2:
+; BWOFF-NEXT:  # %bb.2:
 ; BWOFF-NEXT:    retq
   br label %1
 
@@ -566,7 +566,7 @@ define void @MergeLoadStoreBaseIndexOffs
 ; extensions.
 define void @MergeLoadStoreBaseIndexOffsetSext(i8* %a, i8* %b, i8* %c, i32 %n) {
 ; BWON-LABEL: MergeLoadStoreBaseIndexOffsetSext:
-; BWON:       # BB#0:
+; BWON:       # %bb.0:
 ; BWON-NEXT:    movl %ecx, %r8d
 ; BWON-NEXT:    xorl %ecx, %ecx
 ; BWON-NEXT:    .p2align 4, 0x90
@@ -577,11 +577,11 @@ define void @MergeLoadStoreBaseIndexOffs
 ; BWON-NEXT:    incq %rcx
 ; BWON-NEXT:    cmpl %ecx, %r8d
 ; BWON-NEXT:    jne .LBB11_1
-; BWON-NEXT:  # BB#2:
+; BWON-NEXT:  # %bb.2:
 ; BWON-NEXT:    retq
 ;
 ; BWOFF-LABEL: MergeLoadStoreBaseIndexOffsetSext:
-; BWOFF:       # BB#0:
+; BWOFF:       # %bb.0:
 ; BWOFF-NEXT:    movl %ecx, %r8d
 ; BWOFF-NEXT:    xorl %ecx, %ecx
 ; BWOFF-NEXT:    .p2align 4, 0x90
@@ -592,7 +592,7 @@ define void @MergeLoadStoreBaseIndexOffs
 ; BWOFF-NEXT:    incq %rcx
 ; BWOFF-NEXT:    cmpl %ecx, %r8d
 ; BWOFF-NEXT:    jne .LBB11_1
-; BWOFF-NEXT:  # BB#2:
+; BWOFF-NEXT:  # %bb.2:
 ; BWOFF-NEXT:    retq
   br label %1
 
@@ -624,7 +624,7 @@ define void @MergeLoadStoreBaseIndexOffs
 ; computations;
 define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) {
 ; BWON-LABEL: loadStoreBaseIndexOffsetSextNoSex:
-; BWON:       # BB#0:
+; BWON:       # %bb.0:
 ; BWON-NEXT:    movl %ecx, %r8d
 ; BWON-NEXT:    xorl %ecx, %ecx
 ; BWON-NEXT:    .p2align 4, 0x90
@@ -639,11 +639,11 @@ define void @loadStoreBaseIndexOffsetSex
 ; BWON-NEXT:    incq %rcx
 ; BWON-NEXT:    cmpl %ecx, %r8d
 ; BWON-NEXT:    jne .LBB12_1
-; BWON-NEXT:  # BB#2:
+; BWON-NEXT:  # %bb.2:
 ; BWON-NEXT:    retq
 ;
 ; BWOFF-LABEL: loadStoreBaseIndexOffsetSextNoSex:
-; BWOFF:       # BB#0:
+; BWOFF:       # %bb.0:
 ; BWOFF-NEXT:    movl %ecx, %r8d
 ; BWOFF-NEXT:    xorl %ecx, %ecx
 ; BWOFF-NEXT:    .p2align 4, 0x90
@@ -658,7 +658,7 @@ define void @loadStoreBaseIndexOffsetSex
 ; BWOFF-NEXT:    incq %rcx
 ; BWOFF-NEXT:    cmpl %ecx, %r8d
 ; BWOFF-NEXT:    jne .LBB12_1
-; BWOFF-NEXT:  # BB#2:
+; BWOFF-NEXT:  # %bb.2:
 ; BWOFF-NEXT:    retq
   br label %1
 
@@ -690,7 +690,7 @@ define void @loadStoreBaseIndexOffsetSex
 ; PR21711 ( http://llvm.org/bugs/show_bug.cgi?id=21711 )
 define void @merge_vec_element_store(<8 x float> %v, float* %ptr) {
 ; CHECK-LABEL: merge_vec_element_store:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovups %ymm0, (%rdi)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
@@ -725,7 +725,7 @@ define void @merge_vec_element_store(<8
 ; These should be merged into 32-byte stores.
 define void @merge_vec_extract_stores(<8 x float> %v1, <8 x float> %v2, <4 x float>* %ptr) {
 ; CHECK-LABEL: merge_vec_extract_stores:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovups %ymm0, 48(%rdi)
 ; CHECK-NEXT:    vmovups %ymm1, 80(%rdi)
 ; CHECK-NEXT:    vzeroupper
@@ -749,7 +749,7 @@ define void @merge_vec_extract_stores(<8
 ; Merging vector stores when sourced from vector loads.
 define void @merge_vec_stores_from_loads(<4 x float>* %v, <4 x float>* %ptr) {
 ; CHECK-LABEL: merge_vec_stores_from_loads:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovups (%rdi), %ymm0
 ; CHECK-NEXT:    vmovups %ymm0, (%rsi)
 ; CHECK-NEXT:    vzeroupper
@@ -769,7 +769,7 @@ define void @merge_vec_stores_from_loads
 ; Merging vector stores when sourced from a constant vector is not currently handled.
 define void @merge_vec_stores_of_constants(<4 x i32>* %ptr) {
 ; CHECK-LABEL: merge_vec_stores_of_constants:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovaps %xmm0, 48(%rdi)
 ; CHECK-NEXT:    vmovaps %xmm0, 64(%rdi)
@@ -786,7 +786,7 @@ define void @merge_vec_stores_of_constan
 ; This should now be merged.
 define void @merge_vec_element_and_scalar_load([6 x i64]* %array) {
 ; CHECK-LABEL: merge_vec_element_and_scalar_load:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovups (%rdi), %xmm0
 ; CHECK-NEXT:    vmovups %xmm0, 32(%rdi)
 ; CHECK-NEXT:    retq
@@ -809,7 +809,7 @@ define void @merge_vec_element_and_scala
 ; Don't let a non-consecutive store thwart merging of the last two.
 define void @almost_consecutive_stores(i8* %p) {
 ; CHECK-LABEL: almost_consecutive_stores:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movb $0, (%rdi)
 ; CHECK-NEXT:    movb $1, 42(%rdi)
 ; CHECK-NEXT:    movw $770, 2(%rdi) # imm = 0x302
@@ -827,7 +827,7 @@ define void @almost_consecutive_stores(i
 ; We should be able to merge these.
 define void @merge_bitcast(<4 x i32> %v, float* %ptr) {
 ; CHECK-LABEL: merge_bitcast:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovups %xmm0, (%rdi)
 ; CHECK-NEXT:    retq
   %fv = bitcast <4 x i32> %v to <4 x float>

Modified: llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll (original)
+++ llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll Mon Dec  4 09:18:51 2017
@@ -5,7 +5,7 @@
 
 define void @pull_bitcast(<4 x i8>* %pA, <4 x i8>* %pB) {
 ; CHECK-LABEL: pull_bitcast:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl (%rsi), %eax
 ; CHECK-NEXT:    xorl %eax, (%rdi)
 ; CHECK-NEXT:    retq
@@ -18,7 +18,7 @@ define void @pull_bitcast(<4 x i8>* %pA,
 
 define <4 x i32> @multi_use_swizzle(<4 x i32>* %pA, <4 x i32>* %pB) {
 ; CHECK-LABEL: multi_use_swizzle:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %xmm0
 ; CHECK-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],mem[1,2]
 ; CHECK-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[1,3,2,2]
@@ -36,7 +36,7 @@ define <4 x i32> @multi_use_swizzle(<4 x
 
 define <4 x i8> @pull_bitcast2(<4 x i8>* %pA, <4 x i8>* %pB, <4 x i8>* %pC) {
 ; CHECK-LABEL: pull_bitcast2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl (%rdi), %eax
 ; CHECK-NEXT:    movl %eax, (%rdx)
 ; CHECK-NEXT:    xorl (%rsi), %eax
@@ -53,7 +53,7 @@ define <4 x i8> @pull_bitcast2(<4 x i8>*
 
 define <4 x i32> @reverse_1(<4 x i32>* %pA, <4 x i32>* %pB) {
 ; CHECK-LABEL: reverse_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %xmm0
 ; CHECK-NEXT:    retq
   %A = load <4 x i32>, <4 x i32>* %pA
@@ -65,7 +65,7 @@ define <4 x i32> @reverse_1(<4 x i32>* %
 
 define <4 x i32> @no_reverse_shuff(<4 x i32>* %pA, <4 x i32>* %pB) {
 ; CHECK-LABEL: no_reverse_shuff:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
 ; CHECK-NEXT:    retq
   %A = load <4 x i32>, <4 x i32>* %pA

Modified: llvm/trunk/test/CodeGen/X86/TruncAssertSext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/TruncAssertSext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/TruncAssertSext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/TruncAssertSext.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i64 @main(i64 %a) {
 ; CHECK-LABEL: main:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    orq $-2, %rdi
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/TruncAssertZext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/TruncAssertZext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/TruncAssertZext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/TruncAssertZext.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i64 @foo() {
 ; CHECK-LABEL: foo:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq $-1, %rax
 ; CHECK-NEXT:    retq
   ret i64 -1
@@ -14,7 +14,7 @@ define i64 @foo() {
 
 define i64 @main() {
 ; CHECK-LABEL: main:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    callq foo

Modified: llvm/trunk/test/CodeGen/X86/WidenArith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/WidenArith.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/WidenArith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/WidenArith.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <8 x i32> @test(<8 x float> %a, <8 x float> %b) {
 ; X86-LABEL: test:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    vaddps %ymm1, %ymm0, %ymm2
 ; X86-NEXT:    vmulps %ymm0, %ymm1, %ymm1
 ; X86-NEXT:    vsubps %ymm2, %ymm1, %ymm3
@@ -15,7 +15,7 @@ define <8 x i32> @test(<8 x float> %a, <
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vaddps %ymm1, %ymm0, %ymm2
 ; X64-NEXT:    vmulps %ymm0, %ymm1, %ymm1
 ; X64-NEXT:    vsubps %ymm2, %ymm1, %ymm3

Modified: llvm/trunk/test/CodeGen/X86/add-ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/add-ext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/add-ext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/add-ext.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define i64 @add_nsw_consts(i32 %i) {
 ; CHECK-LABEL: add_nsw_consts:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movslq %edi, %rax
 ; CHECK-NEXT:    addq $12, %rax
 ; CHECK-NEXT:    retq
@@ -24,7 +24,7 @@ define i64 @add_nsw_consts(i32 %i) {
 
 define i64 @add_nsw_sext_add(i32 %i, i64 %x) {
 ; CHECK-LABEL: add_nsw_sext_add:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movslq %edi, %rax
 ; CHECK-NEXT:    leaq 5(%rsi,%rax), %rax
 ; CHECK-NEXT:    retq
@@ -40,7 +40,7 @@ define i64 @add_nsw_sext_add(i32 %i, i64
 
 define i64 @add_nsw_sext_lsh_add(i32 %i, i64 %x) {
 ; CHECK-LABEL: add_nsw_sext_lsh_add:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movslq %edi, %rax
 ; CHECK-NEXT:    leaq -40(%rsi,%rax,8), %rax
 ; CHECK-NEXT:    retq
@@ -57,7 +57,7 @@ define i64 @add_nsw_sext_lsh_add(i32 %i,
 
 define i64 @add_nsw_sext(i32 %i, i64 %x) {
 ; CHECK-LABEL: add_nsw_sext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addl $5, %edi
 ; CHECK-NEXT:    movslq %edi, %rax
 ; CHECK-NEXT:    retq
@@ -71,7 +71,7 @@ define i64 @add_nsw_sext(i32 %i, i64 %x)
 
 define i8* @gep8(i32 %i, i8* %x) {
 ; CHECK-LABEL: gep8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movslq %edi, %rax
 ; CHECK-NEXT:    leaq 5(%rsi,%rax), %rax
 ; CHECK-NEXT:    retq
@@ -84,7 +84,7 @@ define i8* @gep8(i32 %i, i8* %x) {
 
 define i16* @gep16(i32 %i, i16* %x) {
 ; CHECK-LABEL: gep16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movslq %edi, %rax
 ; CHECK-NEXT:    leaq -10(%rsi,%rax,2), %rax
 ; CHECK-NEXT:    retq
@@ -97,7 +97,7 @@ define i16* @gep16(i32 %i, i16* %x) {
 
 define i32* @gep32(i32 %i, i32* %x) {
 ; CHECK-LABEL: gep32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movslq %edi, %rax
 ; CHECK-NEXT:    leaq 20(%rsi,%rax,4), %rax
 ; CHECK-NEXT:    retq
@@ -110,7 +110,7 @@ define i32* @gep32(i32 %i, i32* %x) {
 
 define i64* @gep64(i32 %i, i64* %x) {
 ; CHECK-LABEL: gep64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movslq %edi, %rax
 ; CHECK-NEXT:    leaq -40(%rsi,%rax,8), %rax
 ; CHECK-NEXT:    retq
@@ -125,7 +125,7 @@ define i64* @gep64(i32 %i, i64* %x) {
 
 define i128* @gep128(i32 %i, i128* %x) {
 ; CHECK-LABEL: gep128:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movslq %edi, %rax
 ; CHECK-NEXT:    shlq $4, %rax
 ; CHECK-NEXT:    leaq 80(%rsi,%rax), %rax
@@ -143,7 +143,7 @@ define i128* @gep128(i32 %i, i128* %x) {
 
 define void @PR20134(i32* %a, i32 %i) {
 ; CHECK-LABEL: PR20134:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movslq %esi, %rax
 ; CHECK-NEXT:    movl 4(%rdi,%rax,4), %ecx
 ; CHECK-NEXT:    addl 8(%rdi,%rax,4), %ecx
@@ -169,7 +169,7 @@ define void @PR20134(i32* %a, i32 %i) {
 
 ; The same as @PR20134 but sign extension is replaced with zero extension
 define void @PR20134_zext(i32* %a, i32 %i) {
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
 ; CHECK-NEXT: movl %esi, %eax
 ; CHECK-NEXT: movl 4(%rdi,%rax,4), %ecx
 ; CHECK-NEXT: addl 8(%rdi,%rax,4), %ecx

Modified: llvm/trunk/test/CodeGen/X86/add-of-carry.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/add-of-carry.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/add-of-carry.ll (original)
+++ llvm/trunk/test/CodeGen/X86/add-of-carry.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define i32 @test1(i32 %sum, i32 %x) nounwind readnone ssp {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl %eax, %edx
@@ -26,7 +26,7 @@ define i32 @test1(i32 %sum, i32 %x) noun
 
 define i32 @test2(i32 %x, i32 %y, i32 %res) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx

Modified: llvm/trunk/test/CodeGen/X86/add-sub-nsw-nuw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/add-sub-nsw-nuw.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/add-sub-nsw-nuw.ll (original)
+++ llvm/trunk/test/CodeGen/X86/add-sub-nsw-nuw.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define i8 @PR30841(i64 %argc) {
 ; CHECK-LABEL: PR30841:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    negl %eax
 ; CHECK-NEXT:    ## kill: %al<def> %al<kill> %eax<kill>

Modified: llvm/trunk/test/CodeGen/X86/add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/add.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/add.ll Mon Dec  4 09:18:51 2017
@@ -10,18 +10,18 @@ declare {i32, i1} @llvm.uadd.with.overfl
 ; instruction is a sub instead of an add.
 define i32 @test1(i32 inreg %a) nounwind {
 ; X32-LABEL: test1:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    subl $-128, %eax
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test1:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    subl $-128, %edi
 ; X64-LINUX-NEXT:    movl %edi, %eax
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test1:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    subl $-128, %ecx
 ; X64-WIN32-NEXT:    movl %ecx, %eax
 ; X64-WIN32-NEXT:    retq
@@ -31,19 +31,19 @@ entry:
 }
 define i64 @test2(i64 inreg %a) nounwind {
 ; X32-LABEL: test2:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    addl $-2147483648, %eax # imm = 0x80000000
 ; X32-NEXT:    adcl $0, %edx
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test2:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    subq $-2147483648, %rdi # imm = 0x80000000
 ; X64-LINUX-NEXT:    movq %rdi, %rax
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test2:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    subq $-2147483648, %rcx # imm = 0x80000000
 ; X64-WIN32-NEXT:    movq %rcx, %rax
 ; X64-WIN32-NEXT:    retq
@@ -53,19 +53,19 @@ entry:
 }
 define i64 @test3(i64 inreg %a) nounwind {
 ; X32-LABEL: test3:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    addl $128, %eax
 ; X32-NEXT:    adcl $0, %edx
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test3:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    subq $-128, %rdi
 ; X64-LINUX-NEXT:    movq %rdi, %rax
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test3:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    subq $-128, %rcx
 ; X64-WIN32-NEXT:    movq %rcx, %rax
 ; X64-WIN32-NEXT:    retq
@@ -76,11 +76,11 @@ entry:
 
 define i1 @test4(i32 %v1, i32 %v2, i32* %X) nounwind {
 ; X32-LABEL: test4:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    jo .LBB3_2
-; X32-NEXT:  # BB#1: # %normal
+; X32-NEXT:  # %bb.1: # %normal
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl $0, (%eax)
 ; X32-NEXT:  .LBB3_2: # %overflow
@@ -88,20 +88,20 @@ define i1 @test4(i32 %v1, i32 %v2, i32*
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test4:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    addl %esi, %edi
 ; X64-LINUX-NEXT:    jo .LBB3_2
-; X64-LINUX-NEXT:  # BB#1: # %normal
+; X64-LINUX-NEXT:  # %bb.1: # %normal
 ; X64-LINUX-NEXT:    movl $0, (%rdx)
 ; X64-LINUX-NEXT:  .LBB3_2: # %overflow
 ; X64-LINUX-NEXT:    xorl %eax, %eax
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test4:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    addl %edx, %ecx
 ; X64-WIN32-NEXT:    jo .LBB3_2
-; X64-WIN32-NEXT:  # BB#1: # %normal
+; X64-WIN32-NEXT:  # %bb.1: # %normal
 ; X64-WIN32-NEXT:    movl $0, (%r8)
 ; X64-WIN32-NEXT:  .LBB3_2: # %overflow
 ; X64-WIN32-NEXT:    xorl %eax, %eax
@@ -122,11 +122,11 @@ overflow:
 
 define i1 @test5(i32 %v1, i32 %v2, i32* %X) nounwind {
 ; X32-LABEL: test5:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    jb .LBB4_2
-; X32-NEXT:  # BB#1: # %normal
+; X32-NEXT:  # %bb.1: # %normal
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl $0, (%eax)
 ; X32-NEXT:  .LBB4_2: # %carry
@@ -134,20 +134,20 @@ define i1 @test5(i32 %v1, i32 %v2, i32*
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test5:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    addl %esi, %edi
 ; X64-LINUX-NEXT:    jb .LBB4_2
-; X64-LINUX-NEXT:  # BB#1: # %normal
+; X64-LINUX-NEXT:  # %bb.1: # %normal
 ; X64-LINUX-NEXT:    movl $0, (%rdx)
 ; X64-LINUX-NEXT:  .LBB4_2: # %carry
 ; X64-LINUX-NEXT:    xorl %eax, %eax
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test5:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    addl %edx, %ecx
 ; X64-WIN32-NEXT:    jb .LBB4_2
-; X64-WIN32-NEXT:  # BB#1: # %normal
+; X64-WIN32-NEXT:  # %bb.1: # %normal
 ; X64-WIN32-NEXT:    movl $0, (%r8)
 ; X64-WIN32-NEXT:  .LBB4_2: # %carry
 ; X64-WIN32-NEXT:    xorl %eax, %eax
@@ -168,21 +168,21 @@ carry:
 
 define i64 @test6(i64 %A, i32 %B) nounwind {
 ; X32-LABEL: test6:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    addl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test6:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; X64-LINUX-NEXT:    shlq $32, %rsi
 ; X64-LINUX-NEXT:    leaq (%rsi,%rdi), %rax
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test6:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    # kill: %edx<def> %edx<kill> %rdx<def>
 ; X64-WIN32-NEXT:    shlq $32, %rdx
 ; X64-WIN32-NEXT:    leaq (%rdx,%rcx), %rax
@@ -196,21 +196,21 @@ entry:
 
 define {i32, i1} @test7(i32 %v1, i32 %v2) nounwind {
 ; X32-LABEL: test7:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    setb %dl
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test7:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    addl %esi, %edi
 ; X64-LINUX-NEXT:    setb %dl
 ; X64-LINUX-NEXT:    movl %edi, %eax
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test7:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    addl %edx, %ecx
 ; X64-WIN32-NEXT:    setb %dl
 ; X64-WIN32-NEXT:    movl %ecx, %eax
@@ -223,7 +223,7 @@ entry:
 ; PR5443
 define {i64, i1} @test8(i64 %left, i64 %right) nounwind {
 ; X32-LABEL: test8:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
@@ -232,14 +232,14 @@ define {i64, i1} @test8(i64 %left, i64 %
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test8:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    addq %rsi, %rdi
 ; X64-LINUX-NEXT:    setb %dl
 ; X64-LINUX-NEXT:    movq %rdi, %rax
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test8:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    addq %rdx, %rcx
 ; X64-WIN32-NEXT:    setb %dl
 ; X64-WIN32-NEXT:    movq %rcx, %rax
@@ -258,7 +258,7 @@ entry:
 
 define i32 @test9(i32 %x, i32 %y) nounwind readnone {
 ; X32-LABEL: test9:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    xorl %ecx, %ecx
 ; X32-NEXT:    cmpl $10, {{[0-9]+}}(%esp)
@@ -267,7 +267,7 @@ define i32 @test9(i32 %x, i32 %y) nounwi
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test9:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    xorl %eax, %eax
 ; X64-LINUX-NEXT:    cmpl $10, %edi
 ; X64-LINUX-NEXT:    sete %al
@@ -276,7 +276,7 @@ define i32 @test9(i32 %x, i32 %y) nounwi
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test9:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    xorl %eax, %eax
 ; X64-WIN32-NEXT:    cmpl $10, %ecx
 ; X64-WIN32-NEXT:    sete %al
@@ -292,20 +292,20 @@ entry:
 
 define i1 @test10(i32 %x) nounwind {
 ; X32-LABEL: test10:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    incl %eax
 ; X32-NEXT:    seto %al
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test10:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    incl %edi
 ; X64-LINUX-NEXT:    seto %al
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test10:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    incl %ecx
 ; X64-WIN32-NEXT:    seto %al
 ; X64-WIN32-NEXT:    retq
@@ -317,17 +317,17 @@ entry:
 
 define void @test11(i32* inreg %a) nounwind {
 ; X32-LABEL: test11:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    subl $-128, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test11:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    subl $-128, (%rdi)
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test11:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    subl $-128, (%rcx)
 ; X64-WIN32-NEXT:    retq
 entry:
@@ -339,18 +339,18 @@ entry:
 
 define void @test12(i64* inreg %a) nounwind {
 ; X32-LABEL: test12:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    addl $-2147483648, (%eax) # imm = 0x80000000
 ; X32-NEXT:    adcl $0, 4(%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test12:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    subq $-2147483648, (%rdi) # imm = 0x80000000
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test12:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    subq $-2147483648, (%rcx) # imm = 0x80000000
 ; X64-WIN32-NEXT:    retq
 entry:
@@ -362,18 +362,18 @@ entry:
 
 define void @test13(i64* inreg %a) nounwind {
 ; X32-LABEL: test13:
-; X32:       # BB#0: # %entry
+; X32:       # %bb.0: # %entry
 ; X32-NEXT:    addl $128, (%eax)
 ; X32-NEXT:    adcl $0, 4(%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LINUX-LABEL: test13:
-; X64-LINUX:       # BB#0: # %entry
+; X64-LINUX:       # %bb.0: # %entry
 ; X64-LINUX-NEXT:    subq $-128, (%rdi)
 ; X64-LINUX-NEXT:    retq
 ;
 ; X64-WIN32-LABEL: test13:
-; X64-WIN32:       # BB#0: # %entry
+; X64-WIN32:       # %bb.0: # %entry
 ; X64-WIN32-NEXT:    subq $-128, (%rcx)
 ; X64-WIN32-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/addcarry.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/addcarry.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/addcarry.ll (original)
+++ llvm/trunk/test/CodeGen/X86/addcarry.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define void @a(i64* nocapture %s, i64* nocapture %t, i64 %a, i64 %b, i64 %c) nounwind {
 ; CHECK-LABEL: a:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq %rcx, %rdx
 ; CHECK-NEXT:    adcq $0, %r8
 ; CHECK-NEXT:    movq %r8, (%rdi)
@@ -26,7 +26,7 @@ entry:
 
 define void @b(i32* nocapture %r, i64 %a, i64 %b, i32 %c) nounwind {
 ; CHECK-LABEL: b:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq %rdx, %rsi
 ; CHECK-NEXT:    adcl $0, %ecx
 ; CHECK-NEXT:    movl %ecx, (%rdi)
@@ -45,7 +45,7 @@ entry:
 
 define void @c(i16* nocapture %r, i64 %a, i64 %b, i16 %c) nounwind {
 ; CHECK-LABEL: c:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq %rdx, %rsi
 ; CHECK-NEXT:    adcw $0, %cx
 ; CHECK-NEXT:    movw %cx, (%rdi)
@@ -64,7 +64,7 @@ entry:
 
 define void @d(i8* nocapture %r, i64 %a, i64 %b, i8 %c) nounwind {
 ; CHECK-LABEL: d:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq %rdx, %rsi
 ; CHECK-NEXT:    adcb $0, %cl
 ; CHECK-NEXT:    movb %cl, (%rdi)
@@ -83,7 +83,7 @@ entry:
 
 define i8 @e(i32* nocapture %a, i32 %b) nounwind {
 ; CHECK-LABEL: e:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: %esi<def> %esi<kill> %rsi<def>
 ; CHECK-NEXT:    movl (%rdi), %ecx
 ; CHECK-NEXT:    leal (%rsi,%rcx), %edx
@@ -109,7 +109,7 @@ define i8 @e(i32* nocapture %a, i32 %b)
 
 define %scalar @pr31719(%scalar* nocapture readonly %this, %scalar %arg.b) {
 ; CHECK-LABEL: pr31719:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq (%rsi), %rdx
 ; CHECK-NEXT:    adcq 8(%rsi), %rcx
 ; CHECK-NEXT:    adcq 16(%rsi), %r8
@@ -168,7 +168,7 @@ entry:
 
 define void @muladd(%accumulator* nocapture %this, i64 %arg.a, i64 %arg.b) {
 ; CHECK-LABEL: muladd:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq %rdx, %rax
 ; CHECK-NEXT:    mulq %rsi
 ; CHECK-NEXT:    addq %rax, (%rdi)
@@ -205,7 +205,7 @@ entry:
 
 define i64 @shiftadd(i64 %a, i64 %b, i64 %c, i64 %d) {
 ; CHECK-LABEL: shiftadd:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq %rsi, %rdi
 ; CHECK-NEXT:    adcq %rcx, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax
@@ -225,7 +225,7 @@ entry:
 
 define %S @readd(%S* nocapture readonly %this, %S %arg.b) {
 ; CHECK-LABEL: readd:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq (%rsi), %rdx
 ; CHECK-NEXT:    movq 8(%rsi), %r10
 ; CHECK-NEXT:    adcq $0, %r10

Modified: llvm/trunk/test/CodeGen/X86/adx-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/adx-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/adx-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/adx-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@ declare i8 @llvm.x86.addcarryx.u32(i8, i
 
 define i8 @test_addcarryx_u32(i8 %c, i32 %a, i32 %b, i8* %ptr) {
 ; NOADX-LABEL: test_addcarryx_u32:
-; NOADX:       ## BB#0:
+; NOADX:       ## %bb.0:
 ; NOADX-NEXT:    addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
 ; NOADX-NEXT:    adcl %edx, %esi ## encoding: [0x11,0xd6]
 ; NOADX-NEXT:    movl %esi, (%rcx) ## encoding: [0x89,0x31]
@@ -14,7 +14,7 @@ define i8 @test_addcarryx_u32(i8 %c, i32
 ; NOADX-NEXT:    retq ## encoding: [0xc3]
 ;
 ; ADX-LABEL: test_addcarryx_u32:
-; ADX:       ## BB#0:
+; ADX:       ## %bb.0:
 ; ADX-NEXT:    addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
 ; ADX-NEXT:    adcxl %edx, %esi ## encoding: [0x66,0x0f,0x38,0xf6,0xf2]
 ; ADX-NEXT:    movl %esi, (%rcx) ## encoding: [0x89,0x31]
@@ -28,7 +28,7 @@ declare i8 @llvm.x86.addcarryx.u64(i8, i
 
 define i8 @test_addcarryx_u64(i8 %c, i64 %a, i64 %b, i8* %ptr) {
 ; NOADX-LABEL: test_addcarryx_u64:
-; NOADX:       ## BB#0:
+; NOADX:       ## %bb.0:
 ; NOADX-NEXT:    addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
 ; NOADX-NEXT:    adcq %rdx, %rsi ## encoding: [0x48,0x11,0xd6]
 ; NOADX-NEXT:    movq %rsi, (%rcx) ## encoding: [0x48,0x89,0x31]
@@ -36,7 +36,7 @@ define i8 @test_addcarryx_u64(i8 %c, i64
 ; NOADX-NEXT:    retq ## encoding: [0xc3]
 ;
 ; ADX-LABEL: test_addcarryx_u64:
-; ADX:       ## BB#0:
+; ADX:       ## %bb.0:
 ; ADX-NEXT:    addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
 ; ADX-NEXT:    adcxq %rdx, %rsi ## encoding: [0x66,0x48,0x0f,0x38,0xf6,0xf2]
 ; ADX-NEXT:    movq %rsi, (%rcx) ## encoding: [0x48,0x89,0x31]
@@ -50,7 +50,7 @@ declare i8 @llvm.x86.addcarry.u32(i8, i3
 
 define i8 @test_addcarry_u32(i8 %c, i32 %a, i32 %b, i8* %ptr) {
 ; NOADX-LABEL: test_addcarry_u32:
-; NOADX:       ## BB#0:
+; NOADX:       ## %bb.0:
 ; NOADX-NEXT:    addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
 ; NOADX-NEXT:    adcl %edx, %esi ## encoding: [0x11,0xd6]
 ; NOADX-NEXT:    movl %esi, (%rcx) ## encoding: [0x89,0x31]
@@ -58,7 +58,7 @@ define i8 @test_addcarry_u32(i8 %c, i32
 ; NOADX-NEXT:    retq ## encoding: [0xc3]
 ;
 ; ADX-LABEL: test_addcarry_u32:
-; ADX:       ## BB#0:
+; ADX:       ## %bb.0:
 ; ADX-NEXT:    addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
 ; ADX-NEXT:    adcxl %edx, %esi ## encoding: [0x66,0x0f,0x38,0xf6,0xf2]
 ; ADX-NEXT:    movl %esi, (%rcx) ## encoding: [0x89,0x31]
@@ -72,7 +72,7 @@ declare i8 @llvm.x86.addcarry.u64(i8, i6
 
 define i8 @test_addcarry_u64(i8 %c, i64 %a, i64 %b, i8* %ptr) {
 ; NOADX-LABEL: test_addcarry_u64:
-; NOADX:       ## BB#0:
+; NOADX:       ## %bb.0:
 ; NOADX-NEXT:    addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
 ; NOADX-NEXT:    adcq %rdx, %rsi ## encoding: [0x48,0x11,0xd6]
 ; NOADX-NEXT:    movq %rsi, (%rcx) ## encoding: [0x48,0x89,0x31]
@@ -80,7 +80,7 @@ define i8 @test_addcarry_u64(i8 %c, i64
 ; NOADX-NEXT:    retq ## encoding: [0xc3]
 ;
 ; ADX-LABEL: test_addcarry_u64:
-; ADX:       ## BB#0:
+; ADX:       ## %bb.0:
 ; ADX-NEXT:    addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
 ; ADX-NEXT:    adcxq %rdx, %rsi ## encoding: [0x66,0x48,0x0f,0x38,0xf6,0xf2]
 ; ADX-NEXT:    movq %rsi, (%rcx) ## encoding: [0x48,0x89,0x31]
@@ -94,7 +94,7 @@ declare i8 @llvm.x86.subborrow.u32(i8, i
 
 define i8 @test_subborrow_u32(i8 %c, i32 %a, i32 %b, i8* %ptr) {
 ; CHECK-LABEL: test_subborrow_u32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
 ; CHECK-NEXT:    sbbl %edx, %esi ## encoding: [0x19,0xd6]
 ; CHECK-NEXT:    movl %esi, (%rcx) ## encoding: [0x89,0x31]
@@ -108,7 +108,7 @@ declare i8 @llvm.x86.subborrow.u64(i8, i
 
 define i8 @test_subborrow_u64(i8 %c, i64 %a, i64 %b, i8* %ptr) {
 ; CHECK-LABEL: test_subborrow_u64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    addb $-1, %dil ## encoding: [0x40,0x80,0xc7,0xff]
 ; CHECK-NEXT:    sbbq %rdx, %rsi ## encoding: [0x48,0x19,0xd6]
 ; CHECK-NEXT:    movq %rsi, (%rcx) ## encoding: [0x48,0x89,0x31]
@@ -121,7 +121,7 @@ define i8 @test_subborrow_u64(i8 %c, i64
 ; Try a version with loads. Previously we crashed on this.
 define i32 @load_crash(i64* nocapture readonly %a, i64* nocapture readonly %b, i64* %res)  {
 ; NOADX-LABEL: load_crash:
-; NOADX:       ## BB#0:
+; NOADX:       ## %bb.0:
 ; NOADX-NEXT:    movq (%rdi), %rax ## encoding: [0x48,0x8b,0x07]
 ; NOADX-NEXT:    xorl %ecx, %ecx ## encoding: [0x31,0xc9]
 ; NOADX-NEXT:    addb $-1, %cl ## encoding: [0x80,0xc1,0xff]
@@ -132,7 +132,7 @@ define i32 @load_crash(i64* nocapture re
 ; NOADX-NEXT:    retq ## encoding: [0xc3]
 ;
 ; ADX-LABEL: load_crash:
-; ADX:       ## BB#0:
+; ADX:       ## %bb.0:
 ; ADX-NEXT:    movq (%rdi), %rax ## encoding: [0x48,0x8b,0x07]
 ; ADX-NEXT:    xorl %ecx, %ecx ## encoding: [0x31,0xc9]
 ; ADX-NEXT:    addb $-1, %cl ## encoding: [0x80,0xc1,0xff]
@@ -152,7 +152,7 @@ define i32 @load_crash(i64* nocapture re
 ; Try a really simple all zero input case, which also used to crash
 define void @allzeros() {
 ; CHECK-LABEL: allzeros:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
 ; CHECK-NEXT:    addb $-1, %al ## encoding: [0x04,0xff]
 ; CHECK-NEXT:    sbbq %rax, %rax ## encoding: [0x48,0x19,0xc0]

Modified: llvm/trunk/test/CodeGen/X86/aes-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/aes-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/aes-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/aes-schedule.ll Mon Dec  4 09:18:51 2017
@@ -12,49 +12,49 @@
 
 define <2 x i64> @test_aesdec(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
 ; GENERIC-LABEL: test_aesdec:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    aesdec %xmm1, %xmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    aesdec (%rdi), %xmm0 # sched: [13:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SLM-LABEL: test_aesdec:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    aesdec %xmm1, %xmm0 # sched: [8:5.00]
 ; SLM-NEXT:    aesdec (%rdi), %xmm0 # sched: [8:5.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_aesdec:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaesdec %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; SANDY-NEXT:    vaesdec (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_aesdec:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaesdec %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; HASWELL-NEXT:    vaesdec (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_aesdec:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaesdec %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaesdec (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_aesdec:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaesdec %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; SKYLAKE-NEXT:    vaesdec (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_aesdec:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaesdec %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vaesdec (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_aesdec:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaesdec %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vaesdec (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -67,49 +67,49 @@ declare <2 x i64> @llvm.x86.aesni.aesdec
 
 define <2 x i64> @test_aesdeclast(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
 ; GENERIC-LABEL: test_aesdeclast:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    aesdeclast %xmm1, %xmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    aesdeclast (%rdi), %xmm0 # sched: [13:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SLM-LABEL: test_aesdeclast:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    aesdeclast %xmm1, %xmm0 # sched: [8:5.00]
 ; SLM-NEXT:    aesdeclast (%rdi), %xmm0 # sched: [8:5.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_aesdeclast:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; SANDY-NEXT:    vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_aesdeclast:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; HASWELL-NEXT:    vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_aesdeclast:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_aesdeclast:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; SKYLAKE-NEXT:    vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_aesdeclast:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_aesdeclast:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -122,49 +122,49 @@ declare <2 x i64> @llvm.x86.aesni.aesdec
 
 define <2 x i64> @test_aesenc(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
 ; GENERIC-LABEL: test_aesenc:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    aesenc %xmm1, %xmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    aesenc (%rdi), %xmm0 # sched: [13:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SLM-LABEL: test_aesenc:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    aesenc %xmm1, %xmm0 # sched: [8:5.00]
 ; SLM-NEXT:    aesenc (%rdi), %xmm0 # sched: [8:5.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_aesenc:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaesenc %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; SANDY-NEXT:    vaesenc (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_aesenc:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaesenc %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; HASWELL-NEXT:    vaesenc (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_aesenc:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaesenc %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaesenc (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_aesenc:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaesenc %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; SKYLAKE-NEXT:    vaesenc (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_aesenc:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaesenc %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vaesenc (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_aesenc:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaesenc %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vaesenc (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -177,49 +177,49 @@ declare <2 x i64> @llvm.x86.aesni.aesenc
 
 define <2 x i64> @test_aesenclast(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
 ; GENERIC-LABEL: test_aesenclast:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    aesenclast %xmm1, %xmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    aesenclast (%rdi), %xmm0 # sched: [13:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SLM-LABEL: test_aesenclast:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    aesenclast %xmm1, %xmm0 # sched: [8:5.00]
 ; SLM-NEXT:    aesenclast (%rdi), %xmm0 # sched: [8:5.00]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_aesenclast:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaesenclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; SANDY-NEXT:    vaesenclast (%rdi), %xmm0, %xmm0 # sched: [13:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_aesenclast:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaesenclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; HASWELL-NEXT:    vaesenclast (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_aesenclast:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaesenclast %xmm1, %xmm0, %xmm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaesenclast (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_aesenclast:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaesenclast %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; SKYLAKE-NEXT:    vaesenclast (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_aesenclast:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaesenclast %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    vaesenclast (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_aesenclast:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaesenclast %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vaesenclast (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -232,56 +232,56 @@ declare <2 x i64> @llvm.x86.aesni.aesenc
 
 define <2 x i64> @test_aesimc(<2 x i64> %a0, <2 x i64> *%a1) {
 ; GENERIC-LABEL: test_aesimc:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    aesimc %xmm0, %xmm1 # sched: [12:2.00]
 ; GENERIC-NEXT:    aesimc (%rdi), %xmm0 # sched: [18:2.00]
 ; GENERIC-NEXT:    por %xmm1, %xmm0 # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SLM-LABEL: test_aesimc:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    aesimc %xmm0, %xmm1 # sched: [8:5.00]
 ; SLM-NEXT:    aesimc (%rdi), %xmm0 # sched: [8:5.00]
 ; SLM-NEXT:    por %xmm1, %xmm0 # sched: [1:0.50]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_aesimc:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaesimc %xmm0, %xmm0 # sched: [12:2.00]
 ; SANDY-NEXT:    vaesimc (%rdi), %xmm1 # sched: [18:2.00]
 ; SANDY-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_aesimc:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaesimc %xmm0, %xmm0 # sched: [14:2.00]
 ; HASWELL-NEXT:    vaesimc (%rdi), %xmm1 # sched: [14:2.00]
 ; HASWELL-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_aesimc:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaesimc %xmm0, %xmm0 # sched: [14:2.00]
 ; BROADWELL-NEXT:    vaesimc (%rdi), %xmm1 # sched: [19:2.00]
 ; BROADWELL-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_aesimc:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaesimc %xmm0, %xmm0 # sched: [8:2.00]
 ; SKYLAKE-NEXT:    vaesimc (%rdi), %xmm1 # sched: [14:2.00]
 ; SKYLAKE-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_aesimc:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaesimc (%rdi), %xmm1 # sched: [7:1.00]
 ; BTVER2-NEXT:    vaesimc %xmm0, %xmm0 # sched: [2:1.00]
 ; BTVER2-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_aesimc:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaesimc (%rdi), %xmm1 # sched: [11:0.50]
 ; ZNVER1-NEXT:    vaesimc %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -296,56 +296,56 @@ declare <2 x i64> @llvm.x86.aesni.aesimc
 
 define <2 x i64> @test_aeskeygenassist(<2 x i64> %a0, <2 x i64> *%a1) {
 ; GENERIC-LABEL: test_aeskeygenassist:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    aeskeygenassist $7, %xmm0, %xmm1 # sched: [8:3.67]
 ; GENERIC-NEXT:    aeskeygenassist $7, (%rdi), %xmm0 # sched: [8:3.33]
 ; GENERIC-NEXT:    por %xmm1, %xmm0 # sched: [1:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SLM-LABEL: test_aeskeygenassist:
-; SLM:       # BB#0:
+; SLM:       # %bb.0:
 ; SLM-NEXT:    aeskeygenassist $7, %xmm0, %xmm1 # sched: [8:5.00]
 ; SLM-NEXT:    aeskeygenassist $7, (%rdi), %xmm0 # sched: [8:5.00]
 ; SLM-NEXT:    por %xmm1, %xmm0 # sched: [1:0.50]
 ; SLM-NEXT:    retq # sched: [4:1.00]
 ;
 ; SANDY-LABEL: test_aeskeygenassist:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaeskeygenassist $7, %xmm0, %xmm0 # sched: [8:3.67]
 ; SANDY-NEXT:    vaeskeygenassist $7, (%rdi), %xmm1 # sched: [8:3.33]
 ; SANDY-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_aeskeygenassist:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaeskeygenassist $7, %xmm0, %xmm0 # sched: [29:7.00]
 ; HASWELL-NEXT:    vaeskeygenassist $7, (%rdi), %xmm1 # sched: [28:7.00]
 ; HASWELL-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_aeskeygenassist:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaeskeygenassist $7, %xmm0, %xmm0 # sched: [29:7.00]
 ; BROADWELL-NEXT:    vaeskeygenassist $7, (%rdi), %xmm1 # sched: [33:7.00]
 ; BROADWELL-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_aeskeygenassist:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaeskeygenassist $7, %xmm0, %xmm0 # sched: [20:6.00]
 ; SKYLAKE-NEXT:    vaeskeygenassist $7, (%rdi), %xmm1 # sched: [25:6.00]
 ; SKYLAKE-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_aeskeygenassist:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaeskeygenassist $7, (%rdi), %xmm1 # sched: [7:1.00]
 ; BTVER2-NEXT:    vaeskeygenassist $7, %xmm0, %xmm0 # sched: [2:1.00]
 ; BTVER2-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_aeskeygenassist:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaeskeygenassist $7, (%rdi), %xmm1 # sched: [11:0.50]
 ; ZNVER1-NEXT:    vaeskeygenassist $7, %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]

Modified: llvm/trunk/test/CodeGen/X86/aes_intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/aes_intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/aes_intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/aes_intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -6,12 +6,12 @@
 
 define <2 x i64> @test_x86_aesni_aesdec(<2 x i64> %a0, <2 x i64> %a1) {
 ; SSE-LABEL: test_x86_aesni_aesdec:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    aesdec %xmm1, %xmm0 # encoding: [0x66,0x0f,0x38,0xde,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX-LABEL: test_x86_aesni_aesdec:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaesdec %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xde,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -22,12 +22,12 @@ declare <2 x i64> @llvm.x86.aesni.aesdec
 
 define <2 x i64> @test_x86_aesni_aesdeclast(<2 x i64> %a0, <2 x i64> %a1) {
 ; SSE-LABEL: test_x86_aesni_aesdeclast:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    aesdeclast %xmm1, %xmm0 # encoding: [0x66,0x0f,0x38,0xdf,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX-LABEL: test_x86_aesni_aesdeclast:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaesdeclast %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xdf,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -38,12 +38,12 @@ declare <2 x i64> @llvm.x86.aesni.aesdec
 
 define <2 x i64> @test_x86_aesni_aesenc(<2 x i64> %a0, <2 x i64> %a1) {
 ; SSE-LABEL: test_x86_aesni_aesenc:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    aesenc %xmm1, %xmm0 # encoding: [0x66,0x0f,0x38,0xdc,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX-LABEL: test_x86_aesni_aesenc:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaesenc %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xdc,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -54,12 +54,12 @@ declare <2 x i64> @llvm.x86.aesni.aesenc
 
 define <2 x i64> @test_x86_aesni_aesenclast(<2 x i64> %a0, <2 x i64> %a1) {
 ; SSE-LABEL: test_x86_aesni_aesenclast:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    aesenclast %xmm1, %xmm0 # encoding: [0x66,0x0f,0x38,0xdd,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX-LABEL: test_x86_aesni_aesenclast:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaesenclast %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xdd,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -70,12 +70,12 @@ declare <2 x i64> @llvm.x86.aesni.aesenc
 
 define <2 x i64> @test_x86_aesni_aesimc(<2 x i64> %a0) {
 ; SSE-LABEL: test_x86_aesni_aesimc:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    aesimc %xmm0, %xmm0 # encoding: [0x66,0x0f,0x38,0xdb,0xc0]
 ; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX-LABEL: test_x86_aesni_aesimc:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaesimc %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xdb,0xc0]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %a0) ; <<2 x i64>> [#uses=1]
@@ -86,12 +86,12 @@ declare <2 x i64> @llvm.x86.aesni.aesimc
 
 define <2 x i64> @test_x86_aesni_aeskeygenassist(<2 x i64> %a0) {
 ; SSE-LABEL: test_x86_aesni_aeskeygenassist:
-; SSE:       # BB#0:
+; SSE:       # %bb.0:
 ; SSE-NEXT:    aeskeygenassist $7, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x3a,0xdf,0xc0,0x07]
 ; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX-LABEL: test_x86_aesni_aeskeygenassist:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaeskeygenassist $7, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0xdf,0xc0,0x07]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %a0, i8 7) ; <<2 x i64>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/all-ones-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/all-ones-vector.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/all-ones-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/all-ones-vector.ll Mon Dec  4 09:18:51 2017
@@ -12,22 +12,22 @@
 
 define <16 x i8> @allones_v16i8() nounwind {
 ; X32-SSE-LABEL: allones_v16i8:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX-LABEL: allones_v16i8:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v16i8:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: allones_v16i8:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   ret <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -35,22 +35,22 @@ define <16 x i8> @allones_v16i8() nounwi
 
 define <8 x i16> @allones_v8i16() nounwind {
 ; X32-SSE-LABEL: allones_v8i16:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX-LABEL: allones_v8i16:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v8i16:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: allones_v8i16:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   ret <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -58,22 +58,22 @@ define <8 x i16> @allones_v8i16() nounwi
 
 define <4 x i32> @allones_v4i32() nounwind {
 ; X32-SSE-LABEL: allones_v4i32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX-LABEL: allones_v4i32:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v4i32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: allones_v4i32:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   ret <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -81,22 +81,22 @@ define <4 x i32> @allones_v4i32() nounwi
 
 define <2 x i64> @allones_v2i64() nounwind {
 ; X32-SSE-LABEL: allones_v2i64:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX-LABEL: allones_v2i64:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v2i64:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: allones_v2i64:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   ret <2 x i64> <i64 -1, i64 -1>
@@ -104,22 +104,22 @@ define <2 x i64> @allones_v2i64() nounwi
 
 define <2 x double> @allones_v2f64() nounwind {
 ; X32-SSE-LABEL: allones_v2f64:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX-LABEL: allones_v2f64:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v2f64:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: allones_v2f64:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   ret <2 x double> <double 0xffffffffffffffff, double 0xffffffffffffffff>
@@ -127,22 +127,22 @@ define <2 x double> @allones_v2f64() nou
 
 define <4 x float> @allones_v4f32() nounwind {
 ; X32-SSE-LABEL: allones_v4f32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX-LABEL: allones_v4f32:
-; X32-AVX:       # BB#0:
+; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X32-AVX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v4f32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: allones_v4f32:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   ret <4 x float> <float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000>
@@ -150,36 +150,36 @@ define <4 x float> @allones_v4f32() noun
 
 define <32 x i8> @allones_v32i8() nounwind {
 ; X32-SSE-LABEL: allones_v32i8:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v32i8:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX256-LABEL: allones_v32i8:
-; X32-AVX256:       # BB#0:
+; X32-AVX256:       # %bb.0:
 ; X32-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX256-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v32i8:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v32i8:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX256-LABEL: allones_v32i8:
-; X64-AVX256:       # BB#0:
+; X64-AVX256:       # %bb.0:
 ; X64-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX256-NEXT:    retq
   ret <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -187,36 +187,36 @@ define <32 x i8> @allones_v32i8() nounwi
 
 define <16 x i16> @allones_v16i16() nounwind {
 ; X32-SSE-LABEL: allones_v16i16:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v16i16:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX256-LABEL: allones_v16i16:
-; X32-AVX256:       # BB#0:
+; X32-AVX256:       # %bb.0:
 ; X32-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX256-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v16i16:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v16i16:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX256-LABEL: allones_v16i16:
-; X64-AVX256:       # BB#0:
+; X64-AVX256:       # %bb.0:
 ; X64-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX256-NEXT:    retq
   ret <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -224,36 +224,36 @@ define <16 x i16> @allones_v16i16() noun
 
 define <8 x i32> @allones_v8i32() nounwind {
 ; X32-SSE-LABEL: allones_v8i32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v8i32:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX256-LABEL: allones_v8i32:
-; X32-AVX256:       # BB#0:
+; X32-AVX256:       # %bb.0:
 ; X32-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX256-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v8i32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v8i32:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX256-LABEL: allones_v8i32:
-; X64-AVX256:       # BB#0:
+; X64-AVX256:       # %bb.0:
 ; X64-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX256-NEXT:    retq
   ret <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -261,36 +261,36 @@ define <8 x i32> @allones_v8i32() nounwi
 
 define <4 x i64> @allones_v4i64() nounwind {
 ; X32-SSE-LABEL: allones_v4i64:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v4i64:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX256-LABEL: allones_v4i64:
-; X32-AVX256:       # BB#0:
+; X32-AVX256:       # %bb.0:
 ; X32-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX256-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v4i64:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v4i64:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX256-LABEL: allones_v4i64:
-; X64-AVX256:       # BB#0:
+; X64-AVX256:       # %bb.0:
 ; X64-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX256-NEXT:    retq
   ret <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>
@@ -298,36 +298,36 @@ define <4 x i64> @allones_v4i64() nounwi
 
 define <4 x double> @allones_v4f64() nounwind {
 ; X32-SSE-LABEL: allones_v4f64:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v4f64:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX256-LABEL: allones_v4f64:
-; X32-AVX256:       # BB#0:
+; X32-AVX256:       # %bb.0:
 ; X32-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX256-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v4f64:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v4f64:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX256-LABEL: allones_v4f64:
-; X64-AVX256:       # BB#0:
+; X64-AVX256:       # %bb.0:
 ; X64-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX256-NEXT:    retq
   ret <4 x double> <double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff>
@@ -335,36 +335,36 @@ define <4 x double> @allones_v4f64() nou
 
 define <4 x double> @allones_v4f64_optsize() nounwind optsize {
 ; X32-SSE-LABEL: allones_v4f64_optsize:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v4f64_optsize:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX256-LABEL: allones_v4f64_optsize:
-; X32-AVX256:       # BB#0:
+; X32-AVX256:       # %bb.0:
 ; X32-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX256-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v4f64_optsize:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v4f64_optsize:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX256-LABEL: allones_v4f64_optsize:
-; X64-AVX256:       # BB#0:
+; X64-AVX256:       # %bb.0:
 ; X64-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX256-NEXT:    retq
   ret <4 x double> <double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff>
@@ -372,36 +372,36 @@ define <4 x double> @allones_v4f64_optsi
 
 define <8 x float> @allones_v8f32() nounwind {
 ; X32-SSE-LABEL: allones_v8f32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v8f32:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX256-LABEL: allones_v8f32:
-; X32-AVX256:       # BB#0:
+; X32-AVX256:       # %bb.0:
 ; X32-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX256-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v8f32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v8f32:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX256-LABEL: allones_v8f32:
-; X64-AVX256:       # BB#0:
+; X64-AVX256:       # %bb.0:
 ; X64-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX256-NEXT:    retq
   ret <8 x float> <float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000>
@@ -409,36 +409,36 @@ define <8 x float> @allones_v8f32() noun
 
 define <8 x float> @allones_v8f32_optsize() nounwind optsize {
 ; X32-SSE-LABEL: allones_v8f32_optsize:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v8f32_optsize:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX256-LABEL: allones_v8f32_optsize:
-; X32-AVX256:       # BB#0:
+; X32-AVX256:       # %bb.0:
 ; X32-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX256-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v8f32_optsize:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v8f32_optsize:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX256-LABEL: allones_v8f32_optsize:
-; X64-AVX256:       # BB#0:
+; X64-AVX256:       # %bb.0:
 ; X64-AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX256-NEXT:    retq
   ret <8 x float> <float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000>
@@ -446,7 +446,7 @@ define <8 x float> @allones_v8f32_optsiz
 
 define <64 x i8> @allones_v64i8() nounwind {
 ; X32-SSE-LABEL: allones_v64i8:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -454,31 +454,31 @@ define <64 x i8> @allones_v64i8() nounwi
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v64i8:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX2-LABEL: allones_v64i8:
-; X32-AVX2:       # BB#0:
+; X32-AVX2:       # %bb.0:
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X32-AVX2-NEXT:    retl
 ;
 ; X32-KNL-LABEL: allones_v64i8:
-; X32-KNL:       # BB#0:
+; X32-KNL:       # %bb.0:
 ; X32-KNL-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-KNL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X32-KNL-NEXT:    retl
 ;
 ; X32-SKX-LABEL: allones_v64i8:
-; X32-SKX:       # BB#0:
+; X32-SKX:       # %bb.0:
 ; X32-SKX-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X32-SKX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v64i8:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -486,26 +486,26 @@ define <64 x i8> @allones_v64i8() nounwi
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v64i8:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: allones_v64i8:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-KNL-LABEL: allones_v64i8:
-; X64-KNL:       # BB#0:
+; X64-KNL:       # %bb.0:
 ; X64-KNL-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-KNL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X64-KNL-NEXT:    retq
 ;
 ; X64-SKX-LABEL: allones_v64i8:
-; X64-SKX:       # BB#0:
+; X64-SKX:       # %bb.0:
 ; X64-SKX-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X64-SKX-NEXT:    retq
   ret <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -513,7 +513,7 @@ define <64 x i8> @allones_v64i8() nounwi
 
 define <32 x i16> @allones_v32i16() nounwind {
 ; X32-SSE-LABEL: allones_v32i16:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -521,31 +521,31 @@ define <32 x i16> @allones_v32i16() noun
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v32i16:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX2-LABEL: allones_v32i16:
-; X32-AVX2:       # BB#0:
+; X32-AVX2:       # %bb.0:
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X32-AVX2-NEXT:    retl
 ;
 ; X32-KNL-LABEL: allones_v32i16:
-; X32-KNL:       # BB#0:
+; X32-KNL:       # %bb.0:
 ; X32-KNL-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-KNL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X32-KNL-NEXT:    retl
 ;
 ; X32-SKX-LABEL: allones_v32i16:
-; X32-SKX:       # BB#0:
+; X32-SKX:       # %bb.0:
 ; X32-SKX-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X32-SKX-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v32i16:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -553,26 +553,26 @@ define <32 x i16> @allones_v32i16() noun
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v32i16:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: allones_v32i16:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-KNL-LABEL: allones_v32i16:
-; X64-KNL:       # BB#0:
+; X64-KNL:       # %bb.0:
 ; X64-KNL-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-KNL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X64-KNL-NEXT:    retq
 ;
 ; X64-SKX-LABEL: allones_v32i16:
-; X64-SKX:       # BB#0:
+; X64-SKX:       # %bb.0:
 ; X64-SKX-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X64-SKX-NEXT:    retq
   ret <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -580,7 +580,7 @@ define <32 x i16> @allones_v32i16() noun
 
 define <16 x i32> @allones_v16i32() nounwind {
 ; X32-SSE-LABEL: allones_v16i32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -588,25 +588,25 @@ define <16 x i32> @allones_v16i32() noun
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v16i32:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX2-LABEL: allones_v16i32:
-; X32-AVX2:       # BB#0:
+; X32-AVX2:       # %bb.0:
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X32-AVX2-NEXT:    retl
 ;
 ; X32-AVX512-LABEL: allones_v16i32:
-; X32-AVX512:       # BB#0:
+; X32-AVX512:       # %bb.0:
 ; X32-AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X32-AVX512-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v16i32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -614,20 +614,20 @@ define <16 x i32> @allones_v16i32() noun
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v16i32:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: allones_v16i32:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: allones_v16i32:
-; X64-AVX512:       # BB#0:
+; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    retq
   ret <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -635,7 +635,7 @@ define <16 x i32> @allones_v16i32() noun
 
 define <8 x i64> @allones_v8i64() nounwind {
 ; X32-SSE-LABEL: allones_v8i64:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -643,25 +643,25 @@ define <8 x i64> @allones_v8i64() nounwi
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v8i64:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX2-LABEL: allones_v8i64:
-; X32-AVX2:       # BB#0:
+; X32-AVX2:       # %bb.0:
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X32-AVX2-NEXT:    retl
 ;
 ; X32-AVX512-LABEL: allones_v8i64:
-; X32-AVX512:       # BB#0:
+; X32-AVX512:       # %bb.0:
 ; X32-AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X32-AVX512-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v8i64:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -669,20 +669,20 @@ define <8 x i64> @allones_v8i64() nounwi
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v8i64:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: allones_v8i64:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: allones_v8i64:
-; X64-AVX512:       # BB#0:
+; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    retq
   ret <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
@@ -690,7 +690,7 @@ define <8 x i64> @allones_v8i64() nounwi
 
 define <8 x double> @allones_v8f64() nounwind {
 ; X32-SSE-LABEL: allones_v8f64:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -698,25 +698,25 @@ define <8 x double> @allones_v8f64() nou
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v8f64:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX2-LABEL: allones_v8f64:
-; X32-AVX2:       # BB#0:
+; X32-AVX2:       # %bb.0:
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X32-AVX2-NEXT:    retl
 ;
 ; X32-AVX512-LABEL: allones_v8f64:
-; X32-AVX512:       # BB#0:
+; X32-AVX512:       # %bb.0:
 ; X32-AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X32-AVX512-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v8f64:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -724,20 +724,20 @@ define <8 x double> @allones_v8f64() nou
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v8f64:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: allones_v8f64:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: allones_v8f64:
-; X64-AVX512:       # BB#0:
+; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    retq
   ret <8 x double> <double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff, double 0xffffffffffffffff>
@@ -745,7 +745,7 @@ define <8 x double> @allones_v8f64() nou
 
 define <16 x float> @allones_v16f32() nounwind {
 ; X32-SSE-LABEL: allones_v16f32:
-; X32-SSE:       # BB#0:
+; X32-SSE:       # %bb.0:
 ; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X32-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X32-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -753,25 +753,25 @@ define <16 x float> @allones_v16f32() no
 ; X32-SSE-NEXT:    retl
 ;
 ; X32-AVX1-LABEL: allones_v16f32:
-; X32-AVX1:       # BB#0:
+; X32-AVX1:       # %bb.0:
 ; X32-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX2-LABEL: allones_v16f32:
-; X32-AVX2:       # BB#0:
+; X32-AVX2:       # %bb.0:
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X32-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X32-AVX2-NEXT:    retl
 ;
 ; X32-AVX512-LABEL: allones_v16f32:
-; X32-AVX512:       # BB#0:
+; X32-AVX512:       # %bb.0:
 ; X32-AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X32-AVX512-NEXT:    retl
 ;
 ; X64-SSE-LABEL: allones_v16f32:
-; X64-SSE:       # BB#0:
+; X64-SSE:       # %bb.0:
 ; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-SSE-NEXT:    pcmpeqd %xmm2, %xmm2
@@ -779,20 +779,20 @@ define <16 x float> @allones_v16f32() no
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: allones_v16f32:
-; X64-AVX1:       # BB#0:
+; X64-AVX1:       # %bb.0:
 ; X64-AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; X64-AVX1-NEXT:    vmovaps %ymm0, %ymm1
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: allones_v16f32:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # %bb.0:
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; X64-AVX2-NEXT:    retq
 ;
 ; X64-AVX512-LABEL: allones_v16f32:
-; X64-AVX512:       # BB#0:
+; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
 ; X64-AVX512-NEXT:    retq
   ret <16 x float> <float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000>

Modified: llvm/trunk/test/CodeGen/X86/and-sink.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/and-sink.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/and-sink.ll (original)
+++ llvm/trunk/test/CodeGen/X86/and-sink.ll Mon Dec  4 09:18:51 2017
@@ -9,15 +9,15 @@
 ; Test that 'and' is sunk into bb0.
 define i32 @and_sink1(i32 %a, i1 %c) {
 ; CHECK-LABEL: and_sink1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    je .LBB0_3
-; CHECK-NEXT:  # BB#1: # %bb0
+; CHECK-NEXT:  # %bb.1: # %bb0
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl $0, A
 ; CHECK-NEXT:    testb $4, %al
 ; CHECK-NEXT:    jne .LBB0_3
-; CHECK-NEXT:  # BB#2: # %bb1
+; CHECK-NEXT:  # %bb.2: # %bb1
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    retl
 ; CHECK-NEXT:  .LBB0_3: # %bb2
@@ -46,11 +46,11 @@ bb2:
 ; Test that both 'and' and cmp get sunk to bb1.
 define i32 @and_sink2(i32 %a, i1 %c, i1 %c2) {
 ; CHECK-LABEL: and_sink2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl $0, A
 ; CHECK-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    je .LBB1_5
-; CHECK-NEXT:  # BB#1: # %bb0.preheader
+; CHECK-NEXT:  # %bb.1: # %bb0.preheader
 ; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    .p2align 4, 0x90
@@ -59,12 +59,12 @@ define i32 @and_sink2(i32 %a, i1 %c, i1
 ; CHECK-NEXT:    movl $0, B
 ; CHECK-NEXT:    testb $1, %al
 ; CHECK-NEXT:    je .LBB1_5
-; CHECK-NEXT:  # BB#3: # %bb1
+; CHECK-NEXT:  # %bb.3: # %bb1
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    movl $0, C
 ; CHECK-NEXT:    testb $4, %cl
 ; CHECK-NEXT:    jne .LBB1_2
-; CHECK-NEXT:  # BB#4: # %bb2
+; CHECK-NEXT:  # %bb.4: # %bb2
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    retl
 ; CHECK-NEXT:  .LBB1_5: # %bb3
@@ -100,10 +100,10 @@ bb3:
 ; Test that CodeGenPrepare doesn't get stuck in a loop sinking and hoisting a masked load.
 define i32 @and_sink3(i1 %c, i32* %p) {
 ; CHECK-LABEL: and_sink3:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    je .LBB2_3
-; CHECK-NEXT:  # BB#1: # %bb0
+; CHECK-NEXT:  # %bb.1: # %bb0
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movzbl (%eax), %eax
 ; CHECK-NEXT:    testl %eax, %eax
@@ -138,16 +138,16 @@ bb2:
 ; Test that CodeGenPrepare sinks/duplicates non-immediate 'and'.
 define i32 @and_sink4(i32 %a, i32 %b, i1 %c) {
 ; CHECK-LABEL: and_sink4:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    je .LBB3_4
-; CHECK-NEXT:  # BB#1: # %bb0
+; CHECK-NEXT:  # %bb.1: # %bb0
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    testl %eax, %ecx
 ; CHECK-NEXT:    movl $0, A
 ; CHECK-NEXT:    jne .LBB3_4
-; CHECK-NEXT:  # BB#2: # %bb1
+; CHECK-NEXT:  # %bb.2: # %bb1
 ; CHECK-NEXT:    leal (%ecx,%eax), %edx
 ; CHECK-NEXT:    testl %eax, %ecx
 ; CHECK-NEXT:    movl %edx, B
@@ -189,15 +189,15 @@ bb3:
 ; when it would increase register pressure.
 define i32 @and_sink5(i32 %a, i32 %b, i32 %a2, i32 %b2, i1 %c) {
 ; CHECK-LABEL: and_sink5:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testb $1, {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    je .LBB4_4
-; CHECK-NEXT:  # BB#1: # %bb0
+; CHECK-NEXT:  # %bb.1: # %bb0
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    andl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl $0, A
 ; CHECK-NEXT:    jne .LBB4_4
-; CHECK-NEXT:  # BB#2: # %bb1
+; CHECK-NEXT:  # %bb.2: # %bb1
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    testl %eax, %eax

Modified: llvm/trunk/test/CodeGen/X86/anyext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/anyext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/anyext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/anyext.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i32 @foo(i32 %p, i8 zeroext %x) nounwind {
 ; X32-LABEL: foo:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X32-NEXT:    divb {{[0-9]+}}(%esp)
@@ -15,7 +15,7 @@ define i32 @foo(i32 %p, i8 zeroext %x) n
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: foo:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    # kill: %eax<def> %eax<kill> %ax<def>
 ; X64-NEXT:    divb %sil
@@ -31,7 +31,7 @@ define i32 @foo(i32 %p, i8 zeroext %x) n
 
 define i32 @bar(i32 %p, i16 zeroext %x) nounwind {
 ; X32-LABEL: bar:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    xorl %edx, %edx
 ; X32-NEXT:    divw {{[0-9]+}}(%esp)
@@ -40,7 +40,7 @@ define i32 @bar(i32 %p, i16 zeroext %x)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: bar:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %edx, %edx
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    divw %si

Modified: llvm/trunk/test/CodeGen/X86/atom-fixup-lea2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atom-fixup-lea2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atom-fixup-lea2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atom-fixup-lea2.ll Mon Dec  4 09:18:51 2017
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mcpu=atom -mtriple=i686-linux | FileCheck %s
 ; RUN: llc < %s -mcpu=goldmont -mtriple=i686-linux | FileCheck %s
 
-; CHECK:BB#5
+; CHECK:%bb.5
 ; CHECK-NEXT:leal
 ; CHECK-NEXT:leal
 ; CHECK-NEXT:leal

Modified: llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll Mon Dec  4 09:18:51 2017
@@ -4,14 +4,14 @@
 
 define i32 @test_add_1_cmov_slt(i64* %p, i32 %a0, i32 %a1) #0 {
 ; FASTINCDEC-LABEL: test_add_1_cmov_slt:
-; FASTINCDEC:       # BB#0: # %entry
+; FASTINCDEC:       # %bb.0: # %entry
 ; FASTINCDEC-NEXT:    lock incq (%rdi)
 ; FASTINCDEC-NEXT:    cmovgl %edx, %esi
 ; FASTINCDEC-NEXT:    movl %esi, %eax
 ; FASTINCDEC-NEXT:    retq
 ;
 ; SLOWINCDEC-LABEL: test_add_1_cmov_slt:
-; SLOWINCDEC:       # BB#0: # %entry
+; SLOWINCDEC:       # %bb.0: # %entry
 ; SLOWINCDEC-NEXT:    lock addq $1, (%rdi)
 ; SLOWINCDEC-NEXT:    cmovgl %edx, %esi
 ; SLOWINCDEC-NEXT:    movl %esi, %eax
@@ -25,14 +25,14 @@ entry:
 
 define i32 @test_add_1_cmov_sge(i64* %p, i32 %a0, i32 %a1) #0 {
 ; FASTINCDEC-LABEL: test_add_1_cmov_sge:
-; FASTINCDEC:       # BB#0: # %entry
+; FASTINCDEC:       # %bb.0: # %entry
 ; FASTINCDEC-NEXT:    lock incq (%rdi)
 ; FASTINCDEC-NEXT:    cmovlel %edx, %esi
 ; FASTINCDEC-NEXT:    movl %esi, %eax
 ; FASTINCDEC-NEXT:    retq
 ;
 ; SLOWINCDEC-LABEL: test_add_1_cmov_sge:
-; SLOWINCDEC:       # BB#0: # %entry
+; SLOWINCDEC:       # %bb.0: # %entry
 ; SLOWINCDEC-NEXT:    lock addq $1, (%rdi)
 ; SLOWINCDEC-NEXT:    cmovlel %edx, %esi
 ; SLOWINCDEC-NEXT:    movl %esi, %eax
@@ -46,14 +46,14 @@ entry:
 
 define i32 @test_sub_1_cmov_sle(i64* %p, i32 %a0, i32 %a1) #0 {
 ; FASTINCDEC-LABEL: test_sub_1_cmov_sle:
-; FASTINCDEC:       # BB#0: # %entry
+; FASTINCDEC:       # %bb.0: # %entry
 ; FASTINCDEC-NEXT:    lock decq (%rdi)
 ; FASTINCDEC-NEXT:    cmovgel %edx, %esi
 ; FASTINCDEC-NEXT:    movl %esi, %eax
 ; FASTINCDEC-NEXT:    retq
 ;
 ; SLOWINCDEC-LABEL: test_sub_1_cmov_sle:
-; SLOWINCDEC:       # BB#0: # %entry
+; SLOWINCDEC:       # %bb.0: # %entry
 ; SLOWINCDEC-NEXT:    lock addq $-1, (%rdi)
 ; SLOWINCDEC-NEXT:    cmovgel %edx, %esi
 ; SLOWINCDEC-NEXT:    movl %esi, %eax
@@ -67,14 +67,14 @@ entry:
 
 define i32 @test_sub_1_cmov_sgt(i64* %p, i32 %a0, i32 %a1) #0 {
 ; FASTINCDEC-LABEL: test_sub_1_cmov_sgt:
-; FASTINCDEC:       # BB#0: # %entry
+; FASTINCDEC:       # %bb.0: # %entry
 ; FASTINCDEC-NEXT:    lock decq (%rdi)
 ; FASTINCDEC-NEXT:    cmovll %edx, %esi
 ; FASTINCDEC-NEXT:    movl %esi, %eax
 ; FASTINCDEC-NEXT:    retq
 ;
 ; SLOWINCDEC-LABEL: test_sub_1_cmov_sgt:
-; SLOWINCDEC:       # BB#0: # %entry
+; SLOWINCDEC:       # %bb.0: # %entry
 ; SLOWINCDEC-NEXT:    lock addq $-1, (%rdi)
 ; SLOWINCDEC-NEXT:    cmovll %edx, %esi
 ; SLOWINCDEC-NEXT:    movl %esi, %eax
@@ -89,7 +89,7 @@ entry:
 ; FIXME: (setcc slt x, 0) gets combined into shr early.
 define i8 @test_add_1_setcc_slt(i64* %p) #0 {
 ; CHECK-LABEL: test_add_1_setcc_slt:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
 ; CHECK-NEXT:    shrq $63, %rax
@@ -104,13 +104,13 @@ entry:
 
 define i8 @test_sub_1_setcc_sgt(i64* %p) #0 {
 ; FASTINCDEC-LABEL: test_sub_1_setcc_sgt:
-; FASTINCDEC:       # BB#0: # %entry
+; FASTINCDEC:       # %bb.0: # %entry
 ; FASTINCDEC-NEXT:    lock decq (%rdi)
 ; FASTINCDEC-NEXT:    setge %al
 ; FASTINCDEC-NEXT:    retq
 ;
 ; SLOWINCDEC-LABEL: test_sub_1_setcc_sgt:
-; SLOWINCDEC:       # BB#0: # %entry
+; SLOWINCDEC:       # %bb.0: # %entry
 ; SLOWINCDEC-NEXT:    lock addq $-1, (%rdi)
 ; SLOWINCDEC-NEXT:    setge %al
 ; SLOWINCDEC-NEXT:    retq
@@ -123,10 +123,10 @@ entry:
 
 define i32 @test_add_1_brcond_sge(i64* %p, i32 %a0, i32 %a1) #0 {
 ; FASTINCDEC-LABEL: test_add_1_brcond_sge:
-; FASTINCDEC:       # BB#0: # %entry
+; FASTINCDEC:       # %bb.0: # %entry
 ; FASTINCDEC-NEXT:    lock incq (%rdi)
 ; FASTINCDEC-NEXT:    jle .LBB6_2
-; FASTINCDEC-NEXT:  # BB#1: # %t
+; FASTINCDEC-NEXT:  # %bb.1: # %t
 ; FASTINCDEC-NEXT:    movl %esi, %eax
 ; FASTINCDEC-NEXT:    retq
 ; FASTINCDEC-NEXT:  .LBB6_2: # %f
@@ -134,10 +134,10 @@ define i32 @test_add_1_brcond_sge(i64* %
 ; FASTINCDEC-NEXT:    retq
 ;
 ; SLOWINCDEC-LABEL: test_add_1_brcond_sge:
-; SLOWINCDEC:       # BB#0: # %entry
+; SLOWINCDEC:       # %bb.0: # %entry
 ; SLOWINCDEC-NEXT:    lock addq $1, (%rdi)
 ; SLOWINCDEC-NEXT:    jle .LBB6_2
-; SLOWINCDEC-NEXT:  # BB#1: # %t
+; SLOWINCDEC-NEXT:  # %bb.1: # %t
 ; SLOWINCDEC-NEXT:    movl %esi, %eax
 ; SLOWINCDEC-NEXT:    retq
 ; SLOWINCDEC-NEXT:  .LBB6_2: # %f
@@ -158,7 +158,7 @@ f:
 
 define i32 @test_add_1_cmov_sle(i64* %p, i32 %a0, i32 %a1) #0 {
 ; CHECK-LABEL: test_add_1_cmov_sle:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
 ; CHECK-NEXT:    testq %rax, %rax
@@ -174,7 +174,7 @@ entry:
 
 define i32 @test_add_1_cmov_sgt(i64* %p, i32 %a0, i32 %a1) #0 {
 ; CHECK-LABEL: test_add_1_cmov_sgt:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
 ; CHECK-NEXT:    testq %rax, %rax
@@ -192,7 +192,7 @@ entry:
 
 define i8 @test_add_1_setcc_sgt_reuse(i64* %p, i64* %p2) #0 {
 ; CHECK-LABEL: test_add_1_setcc_sgt_reuse:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $1, %ecx
 ; CHECK-NEXT:    lock xaddq %rcx, (%rdi)
 ; CHECK-NEXT:    testq %rcx, %rcx
@@ -209,7 +209,7 @@ entry:
 
 define i8 @test_sub_2_setcc_sgt(i64* %p) #0 {
 ; CHECK-LABEL: test_sub_2_setcc_sgt:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq $-2, %rax
 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
 ; CHECK-NEXT:    testq %rax, %rax
@@ -225,7 +225,7 @@ entry:
 define i8 @test_add_1_cmov_cmov(i64* %p, i8* %q) #0 {
 ; TODO: It's possible to use "lock inc" here, but both cmovs need to be updated.
 ; CHECK-LABEL: test_add_1_cmov_cmov:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $1, %eax
 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
 ; CHECK-NEXT:    testq   %rax, %rax
@@ -240,13 +240,13 @@ entry:
 
 define i8 @test_sub_1_cmp_1_setcc_eq(i64* %p) #0 {
 ; FASTINCDEC-LABEL: test_sub_1_cmp_1_setcc_eq:
-; FASTINCDEC:       # BB#0: # %entry
+; FASTINCDEC:       # %bb.0: # %entry
 ; FASTINCDEC-NEXT:    lock decq (%rdi)
 ; FASTINCDEC-NEXT:    sete %al
 ; FASTINCDEC-NEXT:    retq
 ;
 ; SLOWINCDEC-LABEL: test_sub_1_cmp_1_setcc_eq:
-; SLOWINCDEC:       # BB#0: # %entry
+; SLOWINCDEC:       # %bb.0: # %entry
 ; SLOWINCDEC-NEXT:    lock subq $1, (%rdi)
 ; SLOWINCDEC-NEXT:    sete %al
 ; SLOWINCDEC-NEXT:    retq
@@ -259,13 +259,13 @@ entry:
 
 define i8 @test_sub_1_cmp_1_setcc_ne(i64* %p) #0 {
 ; FASTINCDEC-LABEL: test_sub_1_cmp_1_setcc_ne:
-; FASTINCDEC:       # BB#0: # %entry
+; FASTINCDEC:       # %bb.0: # %entry
 ; FASTINCDEC-NEXT:    lock decq (%rdi)
 ; FASTINCDEC-NEXT:    setne %al
 ; FASTINCDEC-NEXT:    retq
 ;
 ; SLOWINCDEC-LABEL: test_sub_1_cmp_1_setcc_ne:
-; SLOWINCDEC:       # BB#0: # %entry
+; SLOWINCDEC:       # %bb.0: # %entry
 ; SLOWINCDEC-NEXT:    lock subq $1, (%rdi)
 ; SLOWINCDEC-NEXT:    setne %al
 ; SLOWINCDEC-NEXT:    retq
@@ -278,7 +278,7 @@ entry:
 
 define i8 @test_sub_1_cmp_1_setcc_ugt(i64* %p) #0 {
 ; CHECK-LABEL: test_sub_1_cmp_1_setcc_ugt:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lock subq $1, (%rdi)
 ; CHECK-NEXT:    seta %al
 ; CHECK-NEXT:    retq
@@ -293,7 +293,7 @@ entry:
 ; comparison can be folded into the atomic subtract.
 define i8 @test_sub_1_cmp_1_setcc_sle(i64* %p) #0 {
 ; CHECK-LABEL: test_sub_1_cmp_1_setcc_sle:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq $-1, %rax
 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
 ; CHECK-NEXT:    cmpq $2, %rax
@@ -308,7 +308,7 @@ entry:
 
 define i8 @test_sub_3_cmp_3_setcc_eq(i64* %p) #0 {
 ; CHECK-LABEL: test_sub_3_cmp_3_setcc_eq:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lock subq $3, (%rdi)
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    retq
@@ -323,7 +323,7 @@ entry:
 ; comparison can be folded into the atomic subtract.
 define i8 @test_sub_3_cmp_3_setcc_uge(i64* %p) #0 {
 ; CHECK-LABEL: test_sub_3_cmp_3_setcc_uge:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq $-3, %rax
 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
 ; CHECK-NEXT:    cmpq $2, %rax

Modified: llvm/trunk/test/CodeGen/X86/atomic-minmax-i6432.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-minmax-i6432.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-minmax-i6432.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-minmax-i6432.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define i64 @atomic_max_i64() nounwind {
 ; LINUX-LABEL: atomic_max_i64:
-; LINUX:       # BB#0: # %entry
+; LINUX:       # %bb.0: # %entry
 ; LINUX-NEXT:    pushl %ebx
 ; LINUX-NEXT:    pushl %esi
 ; LINUX-NEXT:    movl sc64+4, %edx
@@ -24,13 +24,13 @@ define i64 @atomic_max_i64() nounwind {
 ; LINUX-NEXT:    cmovll %eax, %ebx
 ; LINUX-NEXT:    lock cmpxchg8b sc64
 ; LINUX-NEXT:    jne .LBB0_1
-; LINUX-NEXT:  # BB#2: # %atomicrmw.end
+; LINUX-NEXT:  # %bb.2: # %atomicrmw.end
 ; LINUX-NEXT:    popl %esi
 ; LINUX-NEXT:    popl %ebx
 ; LINUX-NEXT:    retl
 ;
 ; PIC-LABEL: atomic_max_i64:
-; PIC:       ## BB#0: ## %entry
+; PIC:       ## %bb.0: ## %entry
 ; PIC-NEXT:    pushl %ebx
 ; PIC-NEXT:    pushl %edi
 ; PIC-NEXT:    pushl %esi
@@ -53,7 +53,7 @@ define i64 @atomic_max_i64() nounwind {
 ; PIC-NEXT:    cmovll %eax, %ebx
 ; PIC-NEXT:    lock cmpxchg8b (%esi)
 ; PIC-NEXT:    jne LBB0_1
-; PIC-NEXT:  ## BB#2: ## %atomicrmw.end
+; PIC-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; PIC-NEXT:    popl %esi
 ; PIC-NEXT:    popl %edi
 ; PIC-NEXT:    popl %ebx
@@ -66,7 +66,7 @@ entry:
 
 define i64 @atomic_min_i64() nounwind {
 ; LINUX-LABEL: atomic_min_i64:
-; LINUX:       # BB#0: # %entry
+; LINUX:       # %bb.0: # %entry
 ; LINUX-NEXT:    pushl %ebx
 ; LINUX-NEXT:    movl sc64+4, %edx
 ; LINUX-NEXT:    movl sc64, %eax
@@ -82,12 +82,12 @@ define i64 @atomic_min_i64() nounwind {
 ; LINUX-NEXT:    cmovll %eax, %ebx
 ; LINUX-NEXT:    lock cmpxchg8b sc64
 ; LINUX-NEXT:    jne .LBB1_1
-; LINUX-NEXT:  # BB#2: # %atomicrmw.end
+; LINUX-NEXT:  # %bb.2: # %atomicrmw.end
 ; LINUX-NEXT:    popl %ebx
 ; LINUX-NEXT:    retl
 ;
 ; PIC-LABEL: atomic_min_i64:
-; PIC:       ## BB#0: ## %entry
+; PIC:       ## %bb.0: ## %entry
 ; PIC-NEXT:    pushl %ebx
 ; PIC-NEXT:    pushl %esi
 ; PIC-NEXT:    calll L1$pb
@@ -108,7 +108,7 @@ define i64 @atomic_min_i64() nounwind {
 ; PIC-NEXT:    cmovll %eax, %ebx
 ; PIC-NEXT:    lock cmpxchg8b (%esi)
 ; PIC-NEXT:    jne LBB1_1
-; PIC-NEXT:  ## BB#2: ## %atomicrmw.end
+; PIC-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; PIC-NEXT:    popl %esi
 ; PIC-NEXT:    popl %ebx
 ; PIC-NEXT:    retl
@@ -120,7 +120,7 @@ entry:
 
 define i64 @atomic_umax_i64() nounwind {
 ; LINUX-LABEL: atomic_umax_i64:
-; LINUX:       # BB#0: # %entry
+; LINUX:       # %bb.0: # %entry
 ; LINUX-NEXT:    pushl %ebx
 ; LINUX-NEXT:    pushl %esi
 ; LINUX-NEXT:    movl sc64+4, %edx
@@ -138,13 +138,13 @@ define i64 @atomic_umax_i64() nounwind {
 ; LINUX-NEXT:    cmovbl %eax, %ebx
 ; LINUX-NEXT:    lock cmpxchg8b sc64
 ; LINUX-NEXT:    jne .LBB2_1
-; LINUX-NEXT:  # BB#2: # %atomicrmw.end
+; LINUX-NEXT:  # %bb.2: # %atomicrmw.end
 ; LINUX-NEXT:    popl %esi
 ; LINUX-NEXT:    popl %ebx
 ; LINUX-NEXT:    retl
 ;
 ; PIC-LABEL: atomic_umax_i64:
-; PIC:       ## BB#0: ## %entry
+; PIC:       ## %bb.0: ## %entry
 ; PIC-NEXT:    pushl %ebx
 ; PIC-NEXT:    pushl %edi
 ; PIC-NEXT:    pushl %esi
@@ -167,7 +167,7 @@ define i64 @atomic_umax_i64() nounwind {
 ; PIC-NEXT:    cmovbl %eax, %ebx
 ; PIC-NEXT:    lock cmpxchg8b (%esi)
 ; PIC-NEXT:    jne LBB2_1
-; PIC-NEXT:  ## BB#2: ## %atomicrmw.end
+; PIC-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; PIC-NEXT:    popl %esi
 ; PIC-NEXT:    popl %edi
 ; PIC-NEXT:    popl %ebx
@@ -180,7 +180,7 @@ entry:
 
 define i64 @atomic_umin_i64() nounwind {
 ; LINUX-LABEL: atomic_umin_i64:
-; LINUX:       # BB#0: # %entry
+; LINUX:       # %bb.0: # %entry
 ; LINUX-NEXT:    pushl %ebx
 ; LINUX-NEXT:    movl sc64+4, %edx
 ; LINUX-NEXT:    movl sc64, %eax
@@ -196,12 +196,12 @@ define i64 @atomic_umin_i64() nounwind {
 ; LINUX-NEXT:    cmovbl %eax, %ebx
 ; LINUX-NEXT:    lock cmpxchg8b sc64
 ; LINUX-NEXT:    jne .LBB3_1
-; LINUX-NEXT:  # BB#2: # %atomicrmw.end
+; LINUX-NEXT:  # %bb.2: # %atomicrmw.end
 ; LINUX-NEXT:    popl %ebx
 ; LINUX-NEXT:    retl
 ;
 ; PIC-LABEL: atomic_umin_i64:
-; PIC:       ## BB#0: ## %entry
+; PIC:       ## %bb.0: ## %entry
 ; PIC-NEXT:    pushl %ebx
 ; PIC-NEXT:    pushl %esi
 ; PIC-NEXT:    calll L3$pb
@@ -222,7 +222,7 @@ define i64 @atomic_umin_i64() nounwind {
 ; PIC-NEXT:    cmovbl %eax, %ebx
 ; PIC-NEXT:    lock cmpxchg8b (%esi)
 ; PIC-NEXT:    jne LBB3_1
-; PIC-NEXT:  ## BB#2: ## %atomicrmw.end
+; PIC-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; PIC-NEXT:    popl %esi
 ; PIC-NEXT:    popl %ebx
 ; PIC-NEXT:    retl
@@ -236,7 +236,7 @@ entry:
 
 define void @tf_bug(i8* %ptr) nounwind {
 ; LINUX-LABEL: tf_bug:
-; LINUX:       # BB#0: # %entry
+; LINUX:       # %bb.0: # %entry
 ; LINUX-NEXT:    pushl %ebx
 ; LINUX-NEXT:    pushl %esi
 ; LINUX-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -251,7 +251,7 @@ define void @tf_bug(i8* %ptr) nounwind {
 ; LINUX-NEXT:    adcl $0, %ecx
 ; LINUX-NEXT:    lock cmpxchg8b id
 ; LINUX-NEXT:    jne .LBB4_1
-; LINUX-NEXT:  # BB#2: # %atomicrmw.end
+; LINUX-NEXT:  # %bb.2: # %atomicrmw.end
 ; LINUX-NEXT:    addl $1, %eax
 ; LINUX-NEXT:    adcl $0, %edx
 ; LINUX-NEXT:    movl %eax, (%esi)
@@ -261,7 +261,7 @@ define void @tf_bug(i8* %ptr) nounwind {
 ; LINUX-NEXT:    retl
 ;
 ; PIC-LABEL: tf_bug:
-; PIC:       ## BB#0: ## %entry
+; PIC:       ## %bb.0: ## %entry
 ; PIC-NEXT:    pushl %ebx
 ; PIC-NEXT:    pushl %edi
 ; PIC-NEXT:    pushl %esi
@@ -280,7 +280,7 @@ define void @tf_bug(i8* %ptr) nounwind {
 ; PIC-NEXT:    adcl $0, %ecx
 ; PIC-NEXT:    lock cmpxchg8b _id-L4$pb(%edi)
 ; PIC-NEXT:    jne LBB4_1
-; PIC-NEXT:  ## BB#2: ## %atomicrmw.end
+; PIC-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; PIC-NEXT:    addl $1, %eax
 ; PIC-NEXT:    adcl $0, %edx
 ; PIC-NEXT:    movl %eax, (%esi)

Modified: llvm/trunk/test/CodeGen/X86/atomic128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic128.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 ; register live-ranges, we end up with a useless copy.
 define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
 ; CHECK-LABEL: val_compare_and_swap:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -26,7 +26,7 @@ define i128 @val_compare_and_swap(i128*
 
 define void @fetch_and_nand(i128* %p, i128 %bits) {
 ; CHECK-LABEL: fetch_and_nand:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -44,7 +44,7 @@ define void @fetch_and_nand(i128* %p, i1
 ; CHECK-NEXT:    notq %rcx
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    jne LBB1_1
-; CHECK-NEXT:  ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    movq %rax, {{.*}}(%rip)
 ; CHECK-NEXT:    movq %rdx, _var+{{.*}}(%rip)
 ; CHECK-NEXT:    popq %rbx
@@ -56,7 +56,7 @@ define void @fetch_and_nand(i128* %p, i1
 
 define void @fetch_and_or(i128* %p, i128 %bits) {
 ; CHECK-LABEL: fetch_and_or:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -72,7 +72,7 @@ define void @fetch_and_or(i128* %p, i128
 ; CHECK-NEXT:    orq %r8, %rcx
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    jne LBB2_1
-; CHECK-NEXT:  ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    movq %rax, {{.*}}(%rip)
 ; CHECK-NEXT:    movq %rdx, _var+{{.*}}(%rip)
 ; CHECK-NEXT:    popq %rbx
@@ -84,7 +84,7 @@ define void @fetch_and_or(i128* %p, i128
 
 define void @fetch_and_add(i128* %p, i128 %bits) {
 ; CHECK-LABEL: fetch_and_add:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -100,7 +100,7 @@ define void @fetch_and_add(i128* %p, i12
 ; CHECK-NEXT:    adcq %r8, %rcx
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    jne LBB3_1
-; CHECK-NEXT:  ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    movq %rax, {{.*}}(%rip)
 ; CHECK-NEXT:    movq %rdx, _var+{{.*}}(%rip)
 ; CHECK-NEXT:    popq %rbx
@@ -112,7 +112,7 @@ define void @fetch_and_add(i128* %p, i12
 
 define void @fetch_and_sub(i128* %p, i128 %bits) {
 ; CHECK-LABEL: fetch_and_sub:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -128,7 +128,7 @@ define void @fetch_and_sub(i128* %p, i12
 ; CHECK-NEXT:    sbbq %r8, %rcx
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    jne LBB4_1
-; CHECK-NEXT:  ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    movq %rax, {{.*}}(%rip)
 ; CHECK-NEXT:    movq %rdx, _var+{{.*}}(%rip)
 ; CHECK-NEXT:    popq %rbx
@@ -140,7 +140,7 @@ define void @fetch_and_sub(i128* %p, i12
 
 define void @fetch_and_min(i128* %p, i128 %bits) {
 ; CHECK-LABEL: fetch_and_min:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -159,7 +159,7 @@ define void @fetch_and_min(i128* %p, i12
 ; CHECK-NEXT:    cmovgeq %rax, %rbx
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    jne LBB5_1
-; CHECK-NEXT:  ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    movq %rax, {{.*}}(%rip)
 ; CHECK-NEXT:    movq %rdx, _var+{{.*}}(%rip)
 ; CHECK-NEXT:    popq %rbx
@@ -171,7 +171,7 @@ define void @fetch_and_min(i128* %p, i12
 
 define void @fetch_and_max(i128* %p, i128 %bits) {
 ; CHECK-LABEL: fetch_and_max:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -190,7 +190,7 @@ define void @fetch_and_max(i128* %p, i12
 ; CHECK-NEXT:    cmovgeq %rax, %rbx
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    jne LBB6_1
-; CHECK-NEXT:  ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    movq %rax, {{.*}}(%rip)
 ; CHECK-NEXT:    movq %rdx, _var+{{.*}}(%rip)
 ; CHECK-NEXT:    popq %rbx
@@ -202,7 +202,7 @@ define void @fetch_and_max(i128* %p, i12
 
 define void @fetch_and_umin(i128* %p, i128 %bits) {
 ; CHECK-LABEL: fetch_and_umin:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -221,7 +221,7 @@ define void @fetch_and_umin(i128* %p, i1
 ; CHECK-NEXT:    cmovaeq %rax, %rbx
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    jne LBB7_1
-; CHECK-NEXT:  ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    movq %rax, {{.*}}(%rip)
 ; CHECK-NEXT:    movq %rdx, _var+{{.*}}(%rip)
 ; CHECK-NEXT:    popq %rbx
@@ -233,7 +233,7 @@ define void @fetch_and_umin(i128* %p, i1
 
 define void @fetch_and_umax(i128* %p, i128 %bits) {
 ; CHECK-LABEL: fetch_and_umax:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -252,7 +252,7 @@ define void @fetch_and_umax(i128* %p, i1
 ; CHECK-NEXT:    cmovbq %rax, %rbx
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    jne LBB8_1
-; CHECK-NEXT:  ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    movq %rax, {{.*}}(%rip)
 ; CHECK-NEXT:    movq %rdx, _var+{{.*}}(%rip)
 ; CHECK-NEXT:    popq %rbx
@@ -264,7 +264,7 @@ define void @fetch_and_umax(i128* %p, i1
 
 define i128 @atomic_load_seq_cst(i128* %p) {
 ; CHECK-LABEL: atomic_load_seq_cst:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -281,7 +281,7 @@ define i128 @atomic_load_seq_cst(i128* %
 
 define i128 @atomic_load_relaxed(i128* %p) {
 ; CHECK-LABEL: atomic_load_relaxed:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -298,7 +298,7 @@ define i128 @atomic_load_relaxed(i128* %
 
 define void @atomic_store_seq_cst(i128* %p, i128 %in) {
 ; CHECK-LABEL: atomic_store_seq_cst:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -311,7 +311,7 @@ define void @atomic_store_seq_cst(i128*
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    jne LBB11_1
-; CHECK-NEXT:  ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
    store atomic i128 %in, i128* %p seq_cst, align 16
@@ -320,7 +320,7 @@ define void @atomic_store_seq_cst(i128*
 
 define void @atomic_store_release(i128* %p, i128 %in) {
 ; CHECK-LABEL: atomic_store_release:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -333,7 +333,7 @@ define void @atomic_store_release(i128*
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    jne LBB12_1
-; CHECK-NEXT:  ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
    store atomic i128 %in, i128* %p release, align 16
@@ -342,7 +342,7 @@ define void @atomic_store_release(i128*
 
 define void @atomic_store_relaxed(i128* %p, i128 %in) {
 ; CHECK-LABEL: atomic_store_relaxed:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset %rbx, -16
@@ -355,7 +355,7 @@ define void @atomic_store_relaxed(i128*
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    jne LBB13_1
-; CHECK-NEXT:  ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
    store atomic i128 %in, i128* %p unordered, align 16

Modified: llvm/trunk/test/CodeGen/X86/avg-mask.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avg-mask.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avg-mask.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avg-mask.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <16 x i8> @avg_v16i8_mask(<16 x i8> %a, <16 x i8> %b, <16 x i8> %src, i16 %mask) nounwind {
 ; AVX512F-LABEL: avg_v16i8_mask:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpavgb %xmm1, %xmm0, %xmm0
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -14,7 +14,7 @@ define <16 x i8> @avg_v16i8_mask(<16 x i
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v16i8_mask:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovd %edi, %k1
 ; AVX512BWVL-NEXT:    vpavgb %xmm1, %xmm0, %xmm2 {%k1}
 ; AVX512BWVL-NEXT:    vmovdqa %xmm2, %xmm0
@@ -32,7 +32,7 @@ define <16 x i8> @avg_v16i8_mask(<16 x i
 
 define <16 x i8> @avg_v16i8_maskz(<16 x i8> %a, <16 x i8> %b, i16 %mask) nounwind {
 ; AVX512F-LABEL: avg_v16i8_maskz:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpavgb %xmm1, %xmm0, %xmm0
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -42,7 +42,7 @@ define <16 x i8> @avg_v16i8_maskz(<16 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v16i8_maskz:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovd %edi, %k1
 ; AVX512BWVL-NEXT:    vpavgb %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; AVX512BWVL-NEXT:    retq
@@ -59,7 +59,7 @@ define <16 x i8> @avg_v16i8_maskz(<16 x
 
 define <32 x i8> @avg_v32i8_mask(<32 x i8> %a, <32 x i8> %b, <32 x i8> %src, i32 %mask) nounwind {
 ; AVX512F-LABEL: avg_v32i8_mask:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    pushq %rbp
 ; AVX512F-NEXT:    movq %rsp, %rbp
 ; AVX512F-NEXT:    andq $-32, %rsp
@@ -79,7 +79,7 @@ define <32 x i8> @avg_v32i8_mask(<32 x i
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v32i8_mask:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovd %edi, %k1
 ; AVX512BWVL-NEXT:    vpavgb %ymm1, %ymm0, %ymm2 {%k1}
 ; AVX512BWVL-NEXT:    vmovdqa %ymm2, %ymm0
@@ -97,7 +97,7 @@ define <32 x i8> @avg_v32i8_mask(<32 x i
 
 define <32 x i8> @avg_v32i8_maskz(<32 x i8> %a, <32 x i8> %b, i32 %mask) nounwind {
 ; AVX512F-LABEL: avg_v32i8_maskz:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    pushq %rbp
 ; AVX512F-NEXT:    movq %rsp, %rbp
 ; AVX512F-NEXT:    andq $-32, %rsp
@@ -117,7 +117,7 @@ define <32 x i8> @avg_v32i8_maskz(<32 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v32i8_maskz:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovd %edi, %k1
 ; AVX512BWVL-NEXT:    vpavgb %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; AVX512BWVL-NEXT:    retq
@@ -134,7 +134,7 @@ define <32 x i8> @avg_v32i8_maskz(<32 x
 
 define <64 x i8> @avg_v64i8_mask(<64 x i8> %a, <64 x i8> %b, <64 x i8> %src, i64 %mask) nounwind {
 ; AVX512F-LABEL: avg_v64i8_mask:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    pushq %rbp
 ; AVX512F-NEXT:    movq %rsp, %rbp
 ; AVX512F-NEXT:    andq $-32, %rsp
@@ -174,7 +174,7 @@ define <64 x i8> @avg_v64i8_mask(<64 x i
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v64i8_mask:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovq %rdi, %k1
 ; AVX512BWVL-NEXT:    vpavgb %zmm1, %zmm0, %zmm2 {%k1}
 ; AVX512BWVL-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -192,7 +192,7 @@ define <64 x i8> @avg_v64i8_mask(<64 x i
 
 define <64 x i8> @avg_v64i8_maskz(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind {
 ; AVX512F-LABEL: avg_v64i8_maskz:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    pushq %rbp
 ; AVX512F-NEXT:    movq %rsp, %rbp
 ; AVX512F-NEXT:    andq $-32, %rsp
@@ -232,7 +232,7 @@ define <64 x i8> @avg_v64i8_maskz(<64 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v64i8_maskz:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovq %rdi, %k1
 ; AVX512BWVL-NEXT:    vpavgb %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; AVX512BWVL-NEXT:    retq
@@ -249,7 +249,7 @@ define <64 x i8> @avg_v64i8_maskz(<64 x
 
 define <8 x i16> @avg_v8i16_mask(<8 x i16> %a, <8 x i16> %b, <8 x i16> %src, i8 %mask) nounwind {
 ; AVX512F-LABEL: avg_v8i16_mask:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpavgw %xmm1, %xmm0, %xmm0
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -259,7 +259,7 @@ define <8 x i16> @avg_v8i16_mask(<8 x i1
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v8i16_mask:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovd %edi, %k1
 ; AVX512BWVL-NEXT:    vpavgw %xmm1, %xmm0, %xmm2 {%k1}
 ; AVX512BWVL-NEXT:    vmovdqa %xmm2, %xmm0
@@ -277,7 +277,7 @@ define <8 x i16> @avg_v8i16_mask(<8 x i1
 
 define <8 x i16> @avg_v8i16_maskz(<8 x i16> %a, <8 x i16> %b, i8 %mask) nounwind {
 ; AVX512F-LABEL: avg_v8i16_maskz:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpavgw %xmm1, %xmm0, %xmm0
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -287,7 +287,7 @@ define <8 x i16> @avg_v8i16_maskz(<8 x i
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v8i16_maskz:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovd %edi, %k1
 ; AVX512BWVL-NEXT:    vpavgw %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; AVX512BWVL-NEXT:    retq
@@ -304,7 +304,7 @@ define <8 x i16> @avg_v8i16_maskz(<8 x i
 
 define <16 x i16> @avg_v16i16_mask(<16 x i16> %a, <16 x i16> %b, <16 x i16> %src, i16 %mask) nounwind {
 ; AVX512F-LABEL: avg_v16i16_mask:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpavgw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -313,7 +313,7 @@ define <16 x i16> @avg_v16i16_mask(<16 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v16i16_mask:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovd %edi, %k1
 ; AVX512BWVL-NEXT:    vpavgw %ymm1, %ymm0, %ymm2 {%k1}
 ; AVX512BWVL-NEXT:    vmovdqa %ymm2, %ymm0
@@ -331,7 +331,7 @@ define <16 x i16> @avg_v16i16_mask(<16 x
 
 define <16 x i16> @avg_v16i16_maskz(<16 x i16> %a, <16 x i16> %b, i16 %mask) nounwind {
 ; AVX512F-LABEL: avg_v16i16_maskz:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpavgw %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -340,7 +340,7 @@ define <16 x i16> @avg_v16i16_maskz(<16
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v16i16_maskz:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovd %edi, %k1
 ; AVX512BWVL-NEXT:    vpavgw %ymm1, %ymm0, %ymm0 {%k1} {z}
 ; AVX512BWVL-NEXT:    retq
@@ -357,7 +357,7 @@ define <16 x i16> @avg_v16i16_maskz(<16
 
 define <32 x i16> @avg_v32i16_mask(<32 x i16> %a, <32 x i16> %b, <32 x i16> %src, i32 %mask) nounwind {
 ; AVX512F-LABEL: avg_v32i16_mask:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    pushq %rbp
 ; AVX512F-NEXT:    movq %rsp, %rbp
 ; AVX512F-NEXT:    andq $-32, %rsp
@@ -384,7 +384,7 @@ define <32 x i16> @avg_v32i16_mask(<32 x
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v32i16_mask:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovd %edi, %k1
 ; AVX512BWVL-NEXT:    vpavgw %zmm1, %zmm0, %zmm2 {%k1}
 ; AVX512BWVL-NEXT:    vmovdqa64 %zmm2, %zmm0
@@ -402,7 +402,7 @@ define <32 x i16> @avg_v32i16_mask(<32 x
 
 define <32 x i16> @avg_v32i16_maskz(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind {
 ; AVX512F-LABEL: avg_v32i16_maskz:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    pushq %rbp
 ; AVX512F-NEXT:    movq %rsp, %rbp
 ; AVX512F-NEXT:    andq $-32, %rsp
@@ -429,7 +429,7 @@ define <32 x i16> @avg_v32i16_maskz(<32
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BWVL-LABEL: avg_v32i16_maskz:
-; AVX512BWVL:       # BB#0:
+; AVX512BWVL:       # %bb.0:
 ; AVX512BWVL-NEXT:    kmovd %edi, %k1
 ; AVX512BWVL-NEXT:    vpavgw %zmm1, %zmm0, %zmm0 {%k1} {z}
 ; AVX512BWVL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avg.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avg.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define void @avg_v4i8(<4 x i8>* %a, <4 x i8>* %b) nounwind {
 ; SSE2-LABEL: avg_v4i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    pavgb %xmm0, %xmm1
@@ -15,7 +15,7 @@ define void @avg_v4i8(<4 x i8>* %a, <4 x
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v4i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vpavgb %xmm0, %xmm1, %xmm0
@@ -35,7 +35,7 @@ define void @avg_v4i8(<4 x i8>* %a, <4 x
 
 define void @avg_v8i8(<8 x i8>* %a, <8 x i8>* %b) nounwind {
 ; SSE2-LABEL: avg_v8i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    pavgb %xmm0, %xmm1
@@ -43,7 +43,7 @@ define void @avg_v8i8(<8 x i8>* %a, <8 x
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v8i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX-NEXT:    vpavgb %xmm0, %xmm1, %xmm0
@@ -63,14 +63,14 @@ define void @avg_v8i8(<8 x i8>* %a, <8 x
 
 define void @avg_v16i8(<16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; SSE2-LABEL: avg_v16i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rsi), %xmm0
 ; SSE2-NEXT:    pavgb (%rdi), %xmm0
 ; SSE2-NEXT:    movdqu %xmm0, (%rax)
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v16i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rsi), %xmm0
 ; AVX-NEXT:    vpavgb (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqu %xmm0, (%rax)
@@ -89,7 +89,7 @@ define void @avg_v16i8(<16 x i8>* %a, <1
 
 define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) nounwind {
 ; SSE2-LABEL: avg_v32i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm3
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm8
 ; SSE2-NEXT:    movdqa (%rsi), %xmm0
@@ -176,7 +176,7 @@ define void @avg_v32i8(<32 x i8>* %a, <3
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v32i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -239,7 +239,7 @@ define void @avg_v32i8(<32 x i8>* %a, <3
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v32i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rsi), %ymm0
 ; AVX2-NEXT:    vpavgb (%rdi), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqu %ymm0, (%rax)
@@ -247,7 +247,7 @@ define void @avg_v32i8(<32 x i8>* %a, <3
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: avg_v32i8:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa (%rsi), %ymm0
 ; AVX512-NEXT:    vpavgb (%rdi), %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqu %ymm0, (%rax)
@@ -267,7 +267,7 @@ define void @avg_v32i8(<32 x i8>* %a, <3
 
 define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind {
 ; SSE2-LABEL: avg_v64i8:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm6
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm2
 ; SSE2-NEXT:    movdqa 32(%rdi), %xmm1
@@ -448,7 +448,7 @@ define void @avg_v64i8(<64 x i8>* %a, <6
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v64i8:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    subq $24, %rsp
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -593,7 +593,7 @@ define void @avg_v64i8(<64 x i8>* %a, <6
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v64i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -673,7 +673,7 @@ define void @avg_v64i8(<64 x i8>* %a, <6
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: avg_v64i8:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
 ; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
 ; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
@@ -707,7 +707,7 @@ define void @avg_v64i8(<64 x i8>* %a, <6
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: avg_v64i8:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm0
 ; AVX512BW-NEXT:    vpavgb (%rdi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu32 %zmm0, (%rax)
@@ -727,7 +727,7 @@ define void @avg_v64i8(<64 x i8>* %a, <6
 
 define void @avg_v4i16(<4 x i16>* %a, <4 x i16>* %b) nounwind {
 ; SSE2-LABEL: avg_v4i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    pavgw %xmm0, %xmm1
@@ -735,7 +735,7 @@ define void @avg_v4i16(<4 x i16>* %a, <4
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v4i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX-NEXT:    vpavgw %xmm0, %xmm1, %xmm0
@@ -755,14 +755,14 @@ define void @avg_v4i16(<4 x i16>* %a, <4
 
 define void @avg_v8i16(<8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; SSE2-LABEL: avg_v8i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rsi), %xmm0
 ; SSE2-NEXT:    pavgw (%rdi), %xmm0
 ; SSE2-NEXT:    movdqu %xmm0, (%rax)
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v8i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rsi), %xmm0
 ; AVX-NEXT:    vpavgw (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqu %xmm0, (%rax)
@@ -781,7 +781,7 @@ define void @avg_v8i16(<8 x i16>* %a, <8
 
 define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) nounwind {
 ; SSE2-LABEL: avg_v16i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm2
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm4
 ; SSE2-NEXT:    movdqa (%rsi), %xmm0
@@ -827,7 +827,7 @@ define void @avg_v16i16(<16 x i16>* %a,
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v16i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -862,7 +862,7 @@ define void @avg_v16i16(<16 x i16>* %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v16i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rsi), %ymm0
 ; AVX2-NEXT:    vpavgw (%rdi), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqu %ymm0, (%rax)
@@ -870,7 +870,7 @@ define void @avg_v16i16(<16 x i16>* %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: avg_v16i16:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa (%rsi), %ymm0
 ; AVX512-NEXT:    vpavgw (%rdi), %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqu %ymm0, (%rax)
@@ -890,7 +890,7 @@ define void @avg_v16i16(<16 x i16>* %a,
 
 define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind {
 ; SSE2-LABEL: avg_v32i16:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm4
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm11
 ; SSE2-NEXT:    movdqa 32(%rdi), %xmm10
@@ -976,7 +976,7 @@ define void @avg_v32i16(<32 x i16>* %a,
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v32i16:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -1039,7 +1039,7 @@ define void @avg_v32i16(<32 x i16>* %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v32i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -1078,7 +1078,7 @@ define void @avg_v32i16(<32 x i16>* %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: avg_v32i16:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
@@ -1096,7 +1096,7 @@ define void @avg_v32i16(<32 x i16>* %a,
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: avg_v32i16:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm0
 ; AVX512BW-NEXT:    vpavgw (%rdi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu32 %zmm0, (%rax)
@@ -1116,7 +1116,7 @@ define void @avg_v32i16(<32 x i16>* %a,
 
 define void @avg_v4i8_2(<4 x i8>* %a, <4 x i8>* %b) nounwind {
 ; SSE2-LABEL: avg_v4i8_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    pavgb %xmm0, %xmm1
@@ -1124,7 +1124,7 @@ define void @avg_v4i8_2(<4 x i8>* %a, <4
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v4i8_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vpavgb %xmm1, %xmm0, %xmm0
@@ -1144,7 +1144,7 @@ define void @avg_v4i8_2(<4 x i8>* %a, <4
 
 define void @avg_v8i8_2(<8 x i8>* %a, <8 x i8>* %b) nounwind {
 ; SSE2-LABEL: avg_v8i8_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    pavgb %xmm0, %xmm1
@@ -1152,7 +1152,7 @@ define void @avg_v8i8_2(<8 x i8>* %a, <8
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v8i8_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX-NEXT:    vpavgb %xmm1, %xmm0, %xmm0
@@ -1172,14 +1172,14 @@ define void @avg_v8i8_2(<8 x i8>* %a, <8
 
 define void @avg_v16i8_2(<16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; SSE2-LABEL: avg_v16i8_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pavgb (%rsi), %xmm0
 ; SSE2-NEXT:    movdqu %xmm0, (%rax)
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v16i8_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpavgb (%rsi), %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqu %xmm0, (%rax)
@@ -1198,7 +1198,7 @@ define void @avg_v16i8_2(<16 x i8>* %a,
 
 define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) nounwind {
 ; SSE2-LABEL: avg_v32i8_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm3
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm8
 ; SSE2-NEXT:    movdqa (%rsi), %xmm0
@@ -1285,7 +1285,7 @@ define void @avg_v32i8_2(<32 x i8>* %a,
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v32i8_2:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -1348,7 +1348,7 @@ define void @avg_v32i8_2(<32 x i8>* %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v32i8_2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vpavgb (%rsi), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqu %ymm0, (%rax)
@@ -1356,7 +1356,7 @@ define void @avg_v32i8_2(<32 x i8>* %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: avg_v32i8_2:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512-NEXT:    vpavgb (%rsi), %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqu %ymm0, (%rax)
@@ -1376,7 +1376,7 @@ define void @avg_v32i8_2(<32 x i8>* %a,
 
 define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) nounwind {
 ; SSE2-LABEL: avg_v64i8_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rsi), %xmm14
 ; SSE2-NEXT:    movdqa 16(%rsi), %xmm12
 ; SSE2-NEXT:    movdqa 32(%rsi), %xmm2
@@ -1510,7 +1510,7 @@ define void @avg_v64i8_2(<64 x i8>* %a,
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v64i8_2:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -1627,7 +1627,7 @@ define void @avg_v64i8_2(<64 x i8>* %a,
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v64i8_2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -1699,7 +1699,7 @@ define void @avg_v64i8_2(<64 x i8>* %a,
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: avg_v64i8_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
 ; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
 ; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
@@ -1729,7 +1729,7 @@ define void @avg_v64i8_2(<64 x i8>* %a,
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: avg_v64i8_2:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm0
 ; AVX512BW-NEXT:    vpavgb %zmm0, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu32 %zmm0, (%rax)
@@ -1750,7 +1750,7 @@ define void @avg_v64i8_2(<64 x i8>* %a,
 
 define void @avg_v4i16_2(<4 x i16>* %a, <4 x i16>* %b) nounwind {
 ; SSE2-LABEL: avg_v4i16_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 ; SSE2-NEXT:    pavgw %xmm0, %xmm1
@@ -1758,7 +1758,7 @@ define void @avg_v4i16_2(<4 x i16>* %a,
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v4i16_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX-NEXT:    vpavgw %xmm1, %xmm0, %xmm0
@@ -1778,14 +1778,14 @@ define void @avg_v4i16_2(<4 x i16>* %a,
 
 define void @avg_v8i16_2(<8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; SSE2-LABEL: avg_v8i16_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pavgw (%rsi), %xmm0
 ; SSE2-NEXT:    movdqu %xmm0, (%rax)
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v8i16_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpavgw (%rsi), %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqu %xmm0, (%rax)
@@ -1804,7 +1804,7 @@ define void @avg_v8i16_2(<8 x i16>* %a,
 
 define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) nounwind {
 ; SSE2-LABEL: avg_v16i16_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm2
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm4
 ; SSE2-NEXT:    movdqa (%rsi), %xmm0
@@ -1850,7 +1850,7 @@ define void @avg_v16i16_2(<16 x i16>* %a
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v16i16_2:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -1885,7 +1885,7 @@ define void @avg_v16i16_2(<16 x i16>* %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v16i16_2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vpavgw (%rsi), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqu %ymm0, (%rax)
@@ -1893,7 +1893,7 @@ define void @avg_v16i16_2(<16 x i16>* %a
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: avg_v16i16_2:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512-NEXT:    vpavgw (%rsi), %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqu %ymm0, (%rax)
@@ -1913,7 +1913,7 @@ define void @avg_v16i16_2(<16 x i16>* %a
 
 define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind {
 ; SSE2-LABEL: avg_v32i16_2:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm4
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm11
 ; SSE2-NEXT:    movdqa 32(%rdi), %xmm10
@@ -1999,7 +1999,7 @@ define void @avg_v32i16_2(<32 x i16>* %a
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v32i16_2:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -2062,7 +2062,7 @@ define void @avg_v32i16_2(<32 x i16>* %a
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v32i16_2:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -2101,7 +2101,7 @@ define void @avg_v32i16_2(<32 x i16>* %a
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: avg_v32i16_2:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
@@ -2119,7 +2119,7 @@ define void @avg_v32i16_2(<32 x i16>* %a
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: avg_v32i16_2:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vpavgw (%rsi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu32 %zmm0, (%rax)
@@ -2139,14 +2139,14 @@ define void @avg_v32i16_2(<32 x i16>* %a
 
 define void @avg_v4i8_const(<4 x i8>* %a) nounwind {
 ; SSE2-LABEL: avg_v4i8_const:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    pavgb {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    movd %xmm0, (%rax)
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v4i8_const:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vpavgb {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, (%rax)
@@ -2162,14 +2162,14 @@ define void @avg_v4i8_const(<4 x i8>* %a
 
 define void @avg_v8i8_const(<8 x i8>* %a) nounwind {
 ; SSE2-LABEL: avg_v8i8_const:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    pavgb {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    movq %xmm0, (%rax)
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v8i8_const:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vpavgb {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vmovq %xmm0, (%rax)
@@ -2185,14 +2185,14 @@ define void @avg_v8i8_const(<8 x i8>* %a
 
 define void @avg_v16i8_const(<16 x i8>* %a) nounwind {
 ; SSE2-LABEL: avg_v16i8_const:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pavgb {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    movdqu %xmm0, (%rax)
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v16i8_const:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpavgb {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqu %xmm0, (%rax)
@@ -2208,7 +2208,7 @@ define void @avg_v16i8_const(<16 x i8>*
 
 define void @avg_v32i8_const(<32 x i8>* %a) nounwind {
 ; SSE2-LABEL: avg_v32i8_const:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm3
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
@@ -2259,7 +2259,7 @@ define void @avg_v32i8_const(<32 x i8>*
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v32i8_const:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -2298,7 +2298,7 @@ define void @avg_v32i8_const(<32 x i8>*
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v32i8_const:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vpavgb {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqu %ymm0, (%rax)
@@ -2306,7 +2306,7 @@ define void @avg_v32i8_const(<32 x i8>*
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: avg_v32i8_const:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512-NEXT:    vpavgb {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqu %ymm0, (%rax)
@@ -2323,7 +2323,7 @@ define void @avg_v32i8_const(<32 x i8>*
 
 define void @avg_v64i8_const(<64 x i8>* %a) nounwind {
 ; SSE2-LABEL: avg_v64i8_const:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm5
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm6
 ; SSE2-NEXT:    movdqa 32(%rdi), %xmm15
@@ -2442,7 +2442,7 @@ define void @avg_v64i8_const(<64 x i8>*
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v64i8_const:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; AVX1-NEXT:    vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
@@ -2535,7 +2535,7 @@ define void @avg_v64i8_const(<64 x i8>*
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v64i8_const:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -2589,7 +2589,7 @@ define void @avg_v64i8_const(<64 x i8>*
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: avg_v64i8_const:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
 ; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
 ; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
@@ -2616,7 +2616,7 @@ define void @avg_v64i8_const(<64 x i8>*
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: avg_v64i8_const:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vpavgb {{.*}}(%rip), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu32 %zmm0, (%rax)
@@ -2633,14 +2633,14 @@ define void @avg_v64i8_const(<64 x i8>*
 
 define void @avg_v4i16_const(<4 x i16>* %a) nounwind {
 ; SSE2-LABEL: avg_v4i16_const:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    pavgw {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    movq %xmm0, (%rax)
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v4i16_const:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vpavgw {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vmovq %xmm0, (%rax)
@@ -2656,14 +2656,14 @@ define void @avg_v4i16_const(<4 x i16>*
 
 define void @avg_v8i16_const(<8 x i16>* %a) nounwind {
 ; SSE2-LABEL: avg_v8i16_const:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
 ; SSE2-NEXT:    pavgw {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    movdqu %xmm0, (%rax)
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v8i16_const:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpavgw {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vmovdqu %xmm0, (%rax)
@@ -2679,7 +2679,7 @@ define void @avg_v8i16_const(<8 x i16>*
 
 define void @avg_v16i16_const(<16 x i16>* %a) nounwind {
 ; SSE2-LABEL: avg_v16i16_const:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm3
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm0
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
@@ -2714,7 +2714,7 @@ define void @avg_v16i16_const(<16 x i16>
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v16i16_const:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -2737,7 +2737,7 @@ define void @avg_v16i16_const(<16 x i16>
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v16i16_const:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vpavgw {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqu %ymm0, (%rax)
@@ -2745,7 +2745,7 @@ define void @avg_v16i16_const(<16 x i16>
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: avg_v16i16_const:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512-NEXT:    vpavgw {{.*}}(%rip), %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqu %ymm0, (%rax)
@@ -2762,7 +2762,7 @@ define void @avg_v16i16_const(<16 x i16>
 
 define void @avg_v32i16_const(<32 x i16>* %a) nounwind {
 ; SSE2-LABEL: avg_v32i16_const:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm7
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm6
 ; SSE2-NEXT:    movdqa 32(%rdi), %xmm4
@@ -2825,7 +2825,7 @@ define void @avg_v32i16_const(<32 x i16>
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v32i16_const:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -2864,7 +2864,7 @@ define void @avg_v32i16_const(<32 x i16>
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v32i16_const:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -2894,7 +2894,7 @@ define void @avg_v32i16_const(<32 x i16>
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: avg_v32i16_const:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [1,2,3,4,5,6,7,8,1,2,3,4,5,6,7,8]
@@ -2909,7 +2909,7 @@ define void @avg_v32i16_const(<32 x i16>
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: avg_v32i16_const:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vpavgw {{.*}}(%rip), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu32 %zmm0, (%rax)
@@ -2926,12 +2926,12 @@ define void @avg_v32i16_const(<32 x i16>
 
 define <16 x i8> @avg_v16i8_3(<16 x i8> %a, <16 x i8> %b) nounwind {
 ; SSE2-LABEL: avg_v16i8_3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pavgb %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: avg_v16i8_3:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpavgb %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %za = zext <16 x i8> %a to <16 x i16>
@@ -2945,7 +2945,7 @@ define <16 x i8> @avg_v16i8_3(<16 x i8>
 
 define <32 x i8> @avg_v32i8_3(<32 x i8> %a, <32 x i8> %b) nounwind {
 ; SSE2-LABEL: avg_v32i8_3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm5, %xmm5
 ; SSE2-NEXT:    movdqa %xmm0, %xmm6
 ; SSE2-NEXT:    punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
@@ -2982,7 +2982,7 @@ define <32 x i8> @avg_v32i8_3(<32 x i8>
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v32i8_3:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
@@ -3021,12 +3021,12 @@ define <32 x i8> @avg_v32i8_3(<32 x i8>
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v32i8_3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpavgb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: avg_v32i8_3:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpavgb %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %za = zext <32 x i8> %a to <32 x i16>
@@ -3040,7 +3040,7 @@ define <32 x i8> @avg_v32i8_3(<32 x i8>
 
 define <64 x i8> @avg_v64i8_3(<64 x i8> %a, <64 x i8> %b) nounwind {
 ; SSE2-LABEL: avg_v64i8_3:
-; SSE2:       # BB#0:
+; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pxor %xmm9, %xmm9
 ; SSE2-NEXT:    movdqa %xmm0, %xmm10
 ; SSE2-NEXT:    punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
@@ -3107,7 +3107,7 @@ define <64 x i8> @avg_v64i8_3(<64 x i8>
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: avg_v64i8_3:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
@@ -3179,7 +3179,7 @@ define <64 x i8> @avg_v64i8_3(<64 x i8>
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: avg_v64i8_3:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 ; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
 ; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -3227,7 +3227,7 @@ define <64 x i8> @avg_v64i8_3(<64 x i8>
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: avg_v64i8_3:
-; AVX512F:       # BB#0:
+; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm4
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm5
 ; AVX512F-NEXT:    vextracti128 $1, %ymm3, %xmm6
@@ -3241,7 +3241,7 @@ define <64 x i8> @avg_v64i8_3(<64 x i8>
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: avg_v64i8_3:
-; AVX512BW:       # BB#0:
+; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vpavgb %zmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
   %za = zext <64 x i8> %a to <64 x i16>

Modified: llvm/trunk/test/CodeGen/X86/avx-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-arith.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-arith.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <4 x double> @addpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: addpd256:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vaddpd %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -13,7 +13,7 @@ entry:
 
 define <4 x double> @addpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: addpd256fold:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vaddpd {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -23,7 +23,7 @@ entry:
 
 define <8 x float> @addps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: addps256:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vaddps %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -33,7 +33,7 @@ entry:
 
 define <8 x float> @addps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: addps256fold:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vaddps {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -43,7 +43,7 @@ entry:
 
 define <4 x double> @subpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: subpd256:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vsubpd %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -53,7 +53,7 @@ entry:
 
 define <4 x double> @subpd256fold(<4 x double> %y, <4 x double>* nocapture %x) nounwind uwtable readonly ssp {
 ; CHECK-LABEL: subpd256fold:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vsubpd (%rdi), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -64,7 +64,7 @@ entry:
 
 define <8 x float> @subps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: subps256:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vsubps %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -74,7 +74,7 @@ entry:
 
 define <8 x float> @subps256fold(<8 x float> %y, <8 x float>* nocapture %x) nounwind uwtable readonly ssp {
 ; CHECK-LABEL: subps256fold:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vsubps (%rdi), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -85,7 +85,7 @@ entry:
 
 define <4 x double> @mulpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: mulpd256:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vmulpd %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -95,7 +95,7 @@ entry:
 
 define <4 x double> @mulpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: mulpd256fold:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vmulpd {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -105,7 +105,7 @@ entry:
 
 define <8 x float> @mulps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: mulps256:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vmulps %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -115,7 +115,7 @@ entry:
 
 define <8 x float> @mulps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: mulps256fold:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -125,7 +125,7 @@ entry:
 
 define <4 x double> @divpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: divpd256:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vdivpd %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -135,7 +135,7 @@ entry:
 
 define <4 x double> @divpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: divpd256fold:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vdivpd {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -145,7 +145,7 @@ entry:
 
 define <8 x float> @divps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: divps256:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vdivps %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -155,7 +155,7 @@ entry:
 
 define <8 x float> @divps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: divps256fold:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vdivps {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -165,7 +165,7 @@ entry:
 
 define float @sqrtA(float %a) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: sqrtA:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -177,7 +177,7 @@ declare double @sqrt(double) readnone
 
 define double @sqrtB(double %a) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: sqrtB:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -190,7 +190,7 @@ declare float @sqrtf(float) readnone
 
 define <4 x i64> @vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
 ; CHECK-LABEL: vpaddq:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
@@ -203,7 +203,7 @@ define <4 x i64> @vpaddq(<4 x i64> %i, <
 
 define <8 x i32> @vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
 ; CHECK-LABEL: vpaddd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpaddd %xmm2, %xmm3, %xmm2
@@ -216,7 +216,7 @@ define <8 x i32> @vpaddd(<8 x i32> %i, <
 
 define <16 x i16> @vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
 ; CHECK-LABEL: vpaddw:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpaddw %xmm2, %xmm3, %xmm2
@@ -229,7 +229,7 @@ define <16 x i16> @vpaddw(<16 x i16> %i,
 
 define <32 x i8> @vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
 ; CHECK-LABEL: vpaddb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
@@ -242,7 +242,7 @@ define <32 x i8> @vpaddb(<32 x i8> %i, <
 
 define <4 x i64> @vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
 ; CHECK-LABEL: vpsubq:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpsubq %xmm2, %xmm3, %xmm2
@@ -255,7 +255,7 @@ define <4 x i64> @vpsubq(<4 x i64> %i, <
 
 define <8 x i32> @vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
 ; CHECK-LABEL: vpsubd:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpsubd %xmm2, %xmm3, %xmm2
@@ -268,7 +268,7 @@ define <8 x i32> @vpsubd(<8 x i32> %i, <
 
 define <16 x i16> @vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
 ; CHECK-LABEL: vpsubw:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpsubw %xmm2, %xmm3, %xmm2
@@ -281,7 +281,7 @@ define <16 x i16> @vpsubw(<16 x i16> %i,
 
 define <32 x i8> @vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
 ; CHECK-LABEL: vpsubb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpsubb %xmm2, %xmm3, %xmm2
@@ -294,7 +294,7 @@ define <32 x i8> @vpsubb(<32 x i8> %i, <
 
 define <8 x i32> @vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
 ; CHECK-LABEL: vpmulld:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpmulld %xmm2, %xmm3, %xmm2
@@ -307,7 +307,7 @@ define <8 x i32> @vpmulld(<8 x i32> %i,
 
 define <16 x i16> @vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
 ; CHECK-LABEL: vpmullw:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpmullw %xmm2, %xmm3, %xmm2
@@ -320,7 +320,7 @@ define <16 x i16> @vpmullw(<16 x i16> %i
 
 define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
 ; CHECK-LABEL: mul_v4i64:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpsrlq $32, %xmm3, %xmm4
@@ -349,7 +349,7 @@ declare <4 x float> @llvm.x86.sse.sqrt.s
 
 define <4 x float> @int_sqrt_ss() {
 ; CHECK-LABEL: int_sqrt_ss:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
@@ -361,7 +361,7 @@ define <4 x float> @int_sqrt_ss() {
 
 define <2 x double> @vector_sqrt_scalar_load(double* %a0) optsize {
 ; CHECK-LABEL: vector_sqrt_scalar_load:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vsqrtpd %xmm0, %xmm0
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx-basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-basic.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-basic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-basic.ll Mon Dec  4 09:18:51 2017
@@ -7,7 +7,7 @@
 
 define void @zero128() nounwind ssp {
 ; CHECK-LABEL: zero128:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    movq _z@{{.*}}(%rip), %rax
 ; CHECK-NEXT:    vmovaps %xmm0, (%rax)
@@ -18,7 +18,7 @@ define void @zero128() nounwind ssp {
 
 define void @zero256() nounwind ssp {
 ; CHECK-LABEL: zero256:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq _x@{{.*}}(%rip), %rax
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovaps %ymm0, (%rax)
@@ -33,7 +33,7 @@ define void @zero256() nounwind ssp {
 
 define void @ones([0 x float]* nocapture %RET, [0 x float]* nocapture %aFOO) nounwind {
 ; CHECK-LABEL: ones:
-; CHECK:       ## BB#0: ## %allocas
+; CHECK:       ## %bb.0: ## %allocas
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    vmovaps %ymm0, (%rdi)
@@ -50,7 +50,7 @@ float>* %ptr2vec615, align 32
 
 define void @ones2([0 x i32]* nocapture %RET, [0 x i32]* nocapture %aFOO) nounwind {
 ; CHECK-LABEL: ones2:
-; CHECK:       ## BB#0: ## %allocas
+; CHECK:       ## %bb.0: ## %allocas
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vcmptrueps %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    vmovaps %ymm0, (%rdi)
@@ -65,7 +65,7 @@ allocas:
 ;;; Just make sure this doesn't crash
 define <4 x i64> @ISelCrash(<4 x i64> %a) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: ISelCrash:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    retq
   %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 4>
@@ -75,7 +75,7 @@ define <4 x i64> @ISelCrash(<4 x i64> %a
 ;;; Don't crash on movd
 define <8 x i32> @VMOVZQI2PQI([0 x float]* nocapture %aFOO) nounwind {
 ; CHECK-LABEL: VMOVZQI2PQI:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
 ; CHECK-NEXT:    retq
@@ -92,7 +92,7 @@ define <8 x i32> @VMOVZQI2PQI([0 x float
 ; rdar://10566486
 define <16 x float> @fneg(<16 x float> %a) nounwind {
 ; CHECK-LABEL: fneg:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps {{.*#+}} ymm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
 ; CHECK-NEXT:    vxorps %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    vxorps %ymm2, %ymm1, %ymm1
@@ -104,7 +104,7 @@ define <16 x float> @fneg(<16 x float> %
 ;;; Don't crash on build vector
 define <16 x i16> @build_vec_16x16(i16 %a) nounwind readonly {
 ; CHECK-LABEL: build_vec_16x16:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movzwl %di, %eax
 ; CHECK-NEXT:    vmovd %eax, %xmm0
 ; CHECK-NEXT:    retq
@@ -116,7 +116,7 @@ define <16 x i16> @build_vec_16x16(i16 %
 ;;; an incorrect mnemonic of "movd" was printed for this instruction.
 define i64 @VMOVPQIto64rr(<2 x i64> %a) {
 ; CHECK-LABEL: VMOVPQIto64rr:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovq %xmm0, %rax
 ; CHECK-NEXT:    retq
   %vecext.i = extractelement <2 x i64> %a, i32 0
@@ -126,7 +126,7 @@ define i64 @VMOVPQIto64rr(<2 x i64> %a)
 ; PR22685
 define <8 x float> @mov00_8f32(float* %ptr) {
 ; CHECK-LABEL: mov00_8f32:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    retq
   %val = load float, float* %ptr

Modified: llvm/trunk/test/CodeGen/X86/avx-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-bitcast.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-bitcast.ll Mon Dec  4 09:18:51 2017
@@ -2,7 +2,7 @@
 
 define i64 @bitcasti64tof64() {
 ; CHECK-LABEL: bitcasti64tof64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK:         vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vmovq %xmm0, %rax
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx-cast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-cast.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-cast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-cast.ll Mon Dec  4 09:18:51 2017
@@ -8,7 +8,7 @@
 
 define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp {
 ; AVX-LABEL: castA:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
@@ -19,7 +19,7 @@ define <8 x float> @castA(<4 x float> %m
 
 define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp {
 ; AVX-LABEL: castB:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
@@ -32,14 +32,14 @@ define <4 x double> @castB(<2 x double>
 
 define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: castC:
-; AVX1:       ## BB#0:
+; AVX1:       ## %bb.0:
 ; AVX1-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: castC:
-; AVX2:       ## BB#0:
+; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
@@ -53,7 +53,7 @@ define <4 x i64> @castC(<2 x i64> %m) no
 
 define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp {
 ; AVX-LABEL: castD:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
@@ -63,7 +63,7 @@ define <4 x float> @castD(<8 x float> %m
 
 define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp {
 ; AVX-LABEL: castE:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
@@ -73,7 +73,7 @@ define <2 x i64> @castE(<4 x i64> %m) no
 
 define <2 x double> @castF(<4 x double> %m) nounwind uwtable readnone ssp {
 ; AVX-LABEL: castF:
-; AVX:       ## BB#0:
+; AVX:       ## %bb.0:
 ; AVX-NEXT:    ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-cmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-cmp.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <8 x i32> @cmp00(<8 x float> %a, <8 x float> %b) nounwind {
 ; CHECK-LABEL: cmp00:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpltps %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %bincmp = fcmp olt <8 x float> %a, %b
@@ -13,7 +13,7 @@ define <8 x i32> @cmp00(<8 x float> %a,
 
 define <4 x i64> @cmp01(<4 x double> %a, <4 x double> %b) nounwind {
 ; CHECK-LABEL: cmp01:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpltpd %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %bincmp = fcmp olt <4 x double> %a, %b
@@ -25,12 +25,12 @@ declare void @scale() nounwind
 
 define void @render() nounwind {
 ; CHECK-LABEL: render:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jne .LBB2_6
-; CHECK-NEXT:  # BB#1: # %for.cond5.preheader
+; CHECK-NEXT:  # %bb.1: # %for.cond5.preheader
 ; CHECK-NEXT:    xorl %ebx, %ebx
 ; CHECK-NEXT:    jmp .LBB2_2
 ; CHECK-NEXT:    .p2align 4, 0x90
@@ -41,11 +41,11 @@ define void @render() nounwind {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb %bl, %bl
 ; CHECK-NEXT:    jne .LBB2_2
-; CHECK-NEXT:  # BB#3: # %for.cond5
+; CHECK-NEXT:  # %bb.3: # %for.cond5
 ; CHECK-NEXT:    # in Loop: Header=BB2_2 Depth=1
 ; CHECK-NEXT:    testb %bl, %bl
 ; CHECK-NEXT:    je .LBB2_2
-; CHECK-NEXT:  # BB#4: # %for.body33
+; CHECK-NEXT:  # %bb.4: # %for.body33
 ; CHECK-NEXT:    # in Loop: Header=BB2_2 Depth=1
 ; CHECK-NEXT:    vucomisd {{\.LCPI.*}}, %xmm0
 ; CHECK-NEXT:    jne .LBB2_5
@@ -78,7 +78,7 @@ for.end52:
 
 define <8 x i32> @int256_cmp(<8 x i32> %i, <8 x i32> %j) nounwind {
 ; CHECK-LABEL: int256_cmp:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; CHECK-NEXT:    vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -92,7 +92,7 @@ define <8 x i32> @int256_cmp(<8 x i32> %
 
 define <4 x i64> @v4i64_cmp(<4 x i64> %i, <4 x i64> %j) nounwind {
 ; CHECK-LABEL: v4i64_cmp:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; CHECK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -106,7 +106,7 @@ define <4 x i64> @v4i64_cmp(<4 x i64> %i
 
 define <16 x i16> @v16i16_cmp(<16 x i16> %i, <16 x i16> %j) nounwind {
 ; CHECK-LABEL: v16i16_cmp:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; CHECK-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -120,7 +120,7 @@ define <16 x i16> @v16i16_cmp(<16 x i16>
 
 define <32 x i8> @v32i8_cmp(<32 x i8> %i, <32 x i8> %j) nounwind {
 ; CHECK-LABEL: v32i8_cmp:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; CHECK-NEXT:    vpcmpgtb %xmm2, %xmm3, %xmm2
@@ -134,7 +134,7 @@ define <32 x i8> @v32i8_cmp(<32 x i8> %i
 
 define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind {
 ; CHECK-LABEL: int256_cmpeq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm3, %xmm2
@@ -148,7 +148,7 @@ define <8 x i32> @int256_cmpeq(<8 x i32>
 
 define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind {
 ; CHECK-LABEL: v4i64_cmpeq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpcmpeqq %xmm2, %xmm3, %xmm2
@@ -162,7 +162,7 @@ define <4 x i64> @v4i64_cmpeq(<4 x i64>
 
 define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind {
 ; CHECK-LABEL: v16i16_cmpeq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpcmpeqw %xmm2, %xmm3, %xmm2
@@ -176,7 +176,7 @@ define <16 x i16> @v16i16_cmpeq(<16 x i1
 
 define <32 x i8> @v32i8_cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind {
 ; CHECK-LABEL: v32i8_cmpeq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpcmpeqb %xmm2, %xmm3, %xmm2
@@ -192,7 +192,7 @@ define <32 x i8> @v32i8_cmpeq(<32 x i8>
 
 define i32 @scalarcmpA() uwtable ssp {
 ; CHECK-LABEL: scalarcmpA:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vcmpeqsd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovq %xmm0, %rax
@@ -206,7 +206,7 @@ define i32 @scalarcmpA() uwtable ssp {
 
 define i32 @scalarcmpB() uwtable ssp {
 ; CHECK-LABEL: scalarcmpB:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vcmpeqss %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovd %xmm0, %eax

Modified: llvm/trunk/test/CodeGen/X86/avx-cvt-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-cvt-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-cvt-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-cvt-2.ll Mon Dec  4 09:18:51 2017
@@ -9,7 +9,7 @@
 
 define void @fptoui16(%f32vec_t %a, %i16vec_t *%p) {
 ; CHECK-LABEL: fptoui16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttps2dq %ymm0, %ymm0
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
@@ -23,7 +23,7 @@ define void @fptoui16(%f32vec_t %a, %i16
 
 define void @fptosi16(%f32vec_t %a, %i16vec_t *%p) {
 ; CHECK-LABEL: fptosi16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttps2dq %ymm0, %ymm0
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
@@ -37,7 +37,7 @@ define void @fptosi16(%f32vec_t %a, %i16
 
 define void @fptoui8(%f32vec_t %a, %i8vec_t *%p) {
 ; CHECK-LABEL: fptoui8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttps2dq %ymm0, %ymm0
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
@@ -52,7 +52,7 @@ define void @fptoui8(%f32vec_t %a, %i8ve
 
 define void @fptosi8(%f32vec_t %a, %i8vec_t *%p) {
 ; CHECK-LABEL: fptosi8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttps2dq %ymm0, %ymm0
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/avx-cvt-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-cvt-3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-cvt-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-cvt-3.ll Mon Dec  4 09:18:51 2017
@@ -6,14 +6,14 @@
 
 define <8 x float> @sitofp_insert_zero_v8i32(<8 x i32> %a0) {
 ; X86-LABEL: sitofp_insert_zero_v8i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
 ; X86-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: sitofp_insert_zero_v8i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
 ; X64-NEXT:    vcvtdq2ps %ymm0, %ymm0
@@ -28,14 +28,14 @@ define <8 x float> @sitofp_insert_zero_v
 
 define <8 x float> @sitofp_shuffle_zero_v8i32(<8 x i32> %a0) {
 ; X86-LABEL: sitofp_shuffle_zero_v8i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
 ; X86-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: sitofp_shuffle_zero_v8i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
 ; X64-NEXT:    vcvtdq2ps %ymm0, %ymm0
@@ -47,7 +47,7 @@ define <8 x float> @sitofp_shuffle_zero_
 
 define <8 x float> @sitofp_insert_allbits_v8i32(<8 x i32> %a0) {
 ; X86-LABEL: sitofp_insert_allbits_v8i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
 ; X86-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
@@ -55,7 +55,7 @@ define <8 x float> @sitofp_insert_allbit
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: sitofp_insert_allbits_v8i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
 ; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
@@ -71,7 +71,7 @@ define <8 x float> @sitofp_insert_allbit
 
 define <8 x float> @sitofp_shuffle_allbits_v8i32(<8 x i32> %a0) {
 ; X86-LABEL: sitofp_shuffle_allbits_v8i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
 ; X86-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
@@ -79,7 +79,7 @@ define <8 x float> @sitofp_shuffle_allbi
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: sitofp_shuffle_allbits_v8i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
 ; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
@@ -92,7 +92,7 @@ define <8 x float> @sitofp_shuffle_allbi
 
 define <8 x float> @sitofp_insert_constants_v8i32(<8 x i32> %a0) {
 ; X86-LABEL: sitofp_insert_constants_v8i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
 ; X86-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
@@ -107,7 +107,7 @@ define <8 x float> @sitofp_insert_consta
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: sitofp_insert_constants_v8i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
 ; X64-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
@@ -130,13 +130,13 @@ define <8 x float> @sitofp_insert_consta
 
 define <8 x float> @sitofp_shuffle_constants_v8i32(<8 x i32> %a0) {
 ; X86-LABEL: sitofp_shuffle_constants_v8i32:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    vblendps {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7]
 ; X86-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: sitofp_shuffle_constants_v8i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vblendps {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7]
 ; X64-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; X64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-cvt.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-cvt.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <8 x float> @sitofp00(<8 x i32> %a) nounwind {
 ; CHECK-LABEL: sitofp00:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %b = sitofp <8 x i32> %a to <8 x float>
@@ -12,7 +12,7 @@ define <8 x float> @sitofp00(<8 x i32> %
 
 define <8 x i32> @fptosi00(<8 x float> %a) nounwind {
 ; CHECK-LABEL: fptosi00:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttps2dq %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %b = fptosi <8 x float> %a to <8 x i32>
@@ -21,7 +21,7 @@ define <8 x i32> @fptosi00(<8 x float> %
 
 define <4 x double> @sitofp01(<4 x i32> %a) {
 ; CHECK-LABEL: sitofp01:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; CHECK-NEXT:    retq
   %b = sitofp <4 x i32> %a to <4 x double>
@@ -30,7 +30,7 @@ define <4 x double> @sitofp01(<4 x i32>
 
 define <8 x float> @sitofp02(<8 x i16> %a) {
 ; CHECK-LABEL: sitofp02:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovsxwd %xmm0, %xmm1
 ; CHECK-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; CHECK-NEXT:    vpmovsxwd %xmm0, %xmm0
@@ -43,7 +43,7 @@ define <8 x float> @sitofp02(<8 x i16> %
 
 define <4 x i32> @fptosi01(<4 x double> %a) {
 ; CHECK-LABEL: fptosi01:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttpd2dq %ymm0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
@@ -53,7 +53,7 @@ define <4 x i32> @fptosi01(<4 x double>
 
 define <8 x float> @fptrunc00(<8 x double> %b) nounwind {
 ; CHECK-LABEL: fptrunc00:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtpd2ps %ymm0, %xmm0
 ; CHECK-NEXT:    vcvtpd2ps %ymm1, %xmm1
 ; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -64,7 +64,7 @@ define <8 x float> @fptrunc00(<8 x doubl
 
 define <4 x float> @fptrunc01(<2 x double> %a0, <4 x float> %a1) nounwind {
 ; CHECK-LABEL: fptrunc01:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtsd2ss %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %ext = extractelement <2 x double> %a0, i32 0
@@ -75,7 +75,7 @@ define <4 x float> @fptrunc01(<2 x doubl
 
 define <4 x double> @fpext00(<4 x float> %b) nounwind {
 ; CHECK-LABEL: fpext00:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtps2pd %xmm0, %ymm0
 ; CHECK-NEXT:    retq
   %a = fpext <4 x float> %b to <4 x double>
@@ -84,7 +84,7 @@ define <4 x double> @fpext00(<4 x float>
 
 define <2 x double> @fpext01(<2 x double> %a0, <4 x float> %a1) nounwind {
 ; CHECK-LABEL: fpext01:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %ext = extractelement <4 x float> %a1, i32 0
@@ -95,7 +95,7 @@ define <2 x double> @fpext01(<2 x double
 
 define double @funcA(i64* nocapture %e) nounwind uwtable readonly ssp {
 ; CHECK-LABEL: funcA:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtsi2sdq (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %tmp1 = load i64, i64* %e, align 8
@@ -105,7 +105,7 @@ define double @funcA(i64* nocapture %e)
 
 define double @funcB(i32* nocapture %e) nounwind uwtable readonly ssp {
 ; CHECK-LABEL: funcB:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtsi2sdl (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %tmp1 = load i32, i32* %e, align 4
@@ -115,7 +115,7 @@ define double @funcB(i32* nocapture %e)
 
 define float @funcC(i32* nocapture %e) nounwind uwtable readonly ssp {
 ; CHECK-LABEL: funcC:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtsi2ssl (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %tmp1 = load i32, i32* %e, align 4
@@ -125,7 +125,7 @@ define float @funcC(i32* nocapture %e) n
 
 define float @funcD(i64* nocapture %e) nounwind uwtable readonly ssp {
 ; CHECK-LABEL: funcD:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtsi2ssq (%rdi), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %tmp1 = load i64, i64* %e, align 8
@@ -135,7 +135,7 @@ define float @funcD(i64* nocapture %e) n
 
 define void @fpext() nounwind uwtable {
 ; CHECK-LABEL: fpext:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovsd %xmm0, -{{[0-9]+}}(%rsp)
@@ -150,7 +150,7 @@ define void @fpext() nounwind uwtable {
 
 define double @nearbyint_f64(double %a) {
 ; CHECK-LABEL: nearbyint_f64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vroundsd $12, %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call double @llvm.nearbyint.f64(double %a)
@@ -160,7 +160,7 @@ declare double @llvm.nearbyint.f64(doubl
 
 define float @floor_f32(float %a) {
 ; CHECK-LABEL: floor_f32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vroundss $9, %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call float @llvm.floor.f32(float %a)

Modified: llvm/trunk/test/CodeGen/X86/avx-gfni-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-gfni-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-gfni-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-gfni-intrinsics.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 declare <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i8)
 define <16 x i8> @test_vgf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2) {
 ; CHECK-LABEL: test_vgf2p8affineinvqb_128:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgf2p8affineinvqb $11, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0xf9,0xcf,0xc1,0x0b]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
   %1 = call <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
@@ -14,7 +14,7 @@ define <16 x i8> @test_vgf2p8affineinvqb
 declare <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8>, <32 x i8>, i8)
 define <32 x i8> @test_vgf2p8affineinvqb_256(<32 x i8> %src1, <32 x i8> %src2) {
 ; CHECK-LABEL: test_vgf2p8affineinvqb_256:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgf2p8affineinvqb $11, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0xfd,0xcf,0xc1,0x0b]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
   %1 = call <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8> %src1, <32 x i8> %src2, i8 11)
@@ -24,7 +24,7 @@ define <32 x i8> @test_vgf2p8affineinvqb
 declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
 define <16 x i8> @test_vgf2p8affineqb(<16 x i8> %src1, <16 x i8> %src2) {
 ; CHECK-LABEL: test_vgf2p8affineqb:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgf2p8affineqb $11, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0xf9,0xce,0xc1,0x0b]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
   %1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src1, <16 x i8> %src2, i8 11)
@@ -34,7 +34,7 @@ define <16 x i8> @test_vgf2p8affineqb(<1
 declare <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8>, <32 x i8>, i8)
 define <32 x i8> @test_vgf2p8affineqb_256(<32 x i8> %src1, <32 x i8> %src2) {
 ; CHECK-LABEL: test_vgf2p8affineqb_256:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgf2p8affineqb $11, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0xfd,0xce,0xc1,0x0b]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
   %1 = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> %src1, <32 x i8> %src2, i8 11)
@@ -44,7 +44,7 @@ define <32 x i8> @test_vgf2p8affineqb_25
 declare <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8>, <16 x i8>)
 define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2) {
 ; CHECK-LABEL: test_vgf2p8mulb_128:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgf2p8mulb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xcf,0xc1]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
   %1 = call <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8> %src1, <16 x i8> %src2)
@@ -54,7 +54,7 @@ define <16 x i8> @test_vgf2p8mulb_128(<1
 declare <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8>, <32 x i8>)
 define <32 x i8> @test_vgf2p8mulb_256(<32 x i8> %src1, <32 x i8> %src2) {
 ; CHECK-LABEL: test_vgf2p8mulb_256:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vgf2p8mulb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0xcf,0xc1]
 ; CHECK-NEXT:    retl ## encoding: [0xc3]
   %1 = call <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8> %src1, <32 x i8> %src2)

Modified: llvm/trunk/test/CodeGen/X86/avx-insertelt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-insertelt.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-insertelt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-insertelt.ll Mon Dec  4 09:18:51 2017
@@ -19,13 +19,13 @@ define <4 x double> @insert_f64(<4 x dou
 
 define <32 x i8> @insert_i8(<32 x i8> %y, i8 %f, <32 x i8> %x) {
 ; AVX-LABEL: insert_i8:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrb $0, %edi, %xmm0, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_i8:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpinsrb $0, %edi, %xmm0, %xmm1
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-NEXT:    retq
@@ -35,13 +35,13 @@ define <32 x i8> @insert_i8(<32 x i8> %y
 
 define <16 x i16> @insert_i16(<16 x i16> %y, i16 %f, <16 x i16> %x) {
 ; AVX-LABEL: insert_i16:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrw $0, %edi, %xmm0, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_i16:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpinsrw $0, %edi, %xmm0, %xmm1
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-NEXT:    retq
@@ -51,13 +51,13 @@ define <16 x i16> @insert_i16(<16 x i16>
 
 define <8 x i32> @insert_i32(<8 x i32> %y, i32 %f, <8 x i32> %x) {
 ; AVX-LABEL: insert_i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrd $0, %edi, %xmm0, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_i32:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovd %edi, %xmm1
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
 ; AVX2-NEXT:    retq
@@ -67,13 +67,13 @@ define <8 x i32> @insert_i32(<8 x i32> %
 
 define <4 x i64> @insert_i64(<4 x i64> %y, i64 %f, <4 x i64> %x) {
 ; AVX-LABEL: insert_i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpinsrq $0, %rdi, %xmm0, %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_i64:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpinsrq $0, %rdi, %xmm0, %xmm1
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll Mon Dec  4 09:18:51 2017
@@ -6,12 +6,12 @@
 
 define <4 x double> @test_mm256_add_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_add_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_add_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = fadd <4 x double> %a0, %a1
@@ -20,12 +20,12 @@ define <4 x double> @test_mm256_add_pd(<
 
 define <8 x float> @test_mm256_add_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_add_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_add_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = fadd <8 x float> %a0, %a1
@@ -34,12 +34,12 @@ define <8 x float> @test_mm256_add_ps(<8
 
 define <4 x double> @test_mm256_addsub_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_addsub_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_addsub_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -49,12 +49,12 @@ declare <4 x double> @llvm.x86.avx.addsu
 
 define <8 x float> @test_mm256_addsub_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_addsub_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_addsub_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -64,12 +64,12 @@ declare <8 x float> @llvm.x86.avx.addsub
 
 define <4 x double> @test_mm256_and_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_and_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_and_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = bitcast <4 x double> %a0 to <4 x i64>
@@ -81,12 +81,12 @@ define <4 x double> @test_mm256_and_pd(<
 
 define <8 x float> @test_mm256_and_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_and_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_and_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = bitcast <8 x float> %a0 to <8 x i32>
@@ -98,7 +98,7 @@ define <8 x float> @test_mm256_and_ps(<8
 
 define <4 x double> @test_mm256_andnot_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_andnot_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; X32-NEXT:    vcmptrueps %ymm2, %ymm2, %ymm2
 ; X32-NEXT:    vxorps %ymm2, %ymm0, %ymm0
@@ -106,7 +106,7 @@ define <4 x double> @test_mm256_andnot_p
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_andnot_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; X64-NEXT:    vcmptrueps %ymm2, %ymm2, %ymm2
 ; X64-NEXT:    vxorps %ymm2, %ymm0, %ymm0
@@ -122,12 +122,12 @@ define <4 x double> @test_mm256_andnot_p
 
 define <8 x float> @test_mm256_andnot_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_andnot_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vandnps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_andnot_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vandnps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = bitcast <8 x float> %a0 to <8 x i32>
@@ -140,12 +140,12 @@ define <8 x float> @test_mm256_andnot_ps
 
 define <4 x double> @test_mm256_blend_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_blend_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_blend_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
@@ -154,12 +154,12 @@ define <4 x double> @test_mm256_blend_pd
 
 define <8 x float> @test_mm256_blend_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_blend_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6],ymm1[7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_blend_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6],ymm1[7]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
@@ -168,12 +168,12 @@ define <8 x float> @test_mm256_blend_ps(
 
 define <4 x double> @test_mm256_blendv_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) nounwind {
 ; X32-LABEL: test_mm256_blendv_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_blendv_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
@@ -183,12 +183,12 @@ declare <4 x double> @llvm.x86.avx.blend
 
 define <8 x float> @test_mm256_blendv_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) nounwind {
 ; X32-LABEL: test_mm256_blendv_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_blendv_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
@@ -198,13 +198,13 @@ declare <8 x float> @llvm.x86.avx.blendv
 
 define <4 x double> @test_mm256_broadcast_pd(<2 x double>* %a0) nounwind {
 ; X32-LABEL: test_mm256_broadcast_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_broadcast_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X64-NEXT:    retq
   %ld = load <2 x double>, <2 x double>* %a0
@@ -214,13 +214,13 @@ define <4 x double> @test_mm256_broadcas
 
 define <8 x float> @test_mm256_broadcast_ps(<4 x float>* %a0) nounwind {
 ; X32-LABEL: test_mm256_broadcast_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_broadcast_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X64-NEXT:    retq
   %ld = load <4 x float>, <4 x float>* %a0
@@ -230,13 +230,13 @@ define <8 x float> @test_mm256_broadcast
 
 define <4 x double> @test_mm256_broadcast_sd(double* %a0) nounwind {
 ; X32-LABEL: test_mm256_broadcast_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastsd (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_broadcast_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; X64-NEXT:    retq
   %ld = load double, double* %a0
@@ -249,13 +249,13 @@ define <4 x double> @test_mm256_broadcas
 
 define <4 x float> @test_mm_broadcast_ss(float* %a0) nounwind {
 ; X32-LABEL: test_mm_broadcast_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss (%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_broadcast_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastss (%rdi), %xmm0
 ; X64-NEXT:    retq
   %ld = load float, float* %a0
@@ -268,13 +268,13 @@ define <4 x float> @test_mm_broadcast_ss
 
 define <8 x float> @test_mm256_broadcast_ss(float* %a0) nounwind {
 ; X32-LABEL: test_mm256_broadcast_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_broadcast_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastss (%rdi), %ymm0
 ; X64-NEXT:    retq
   %ld = load float, float* %a0
@@ -291,11 +291,11 @@ define <8 x float> @test_mm256_broadcast
 
 define <8 x float> @test_mm256_castpd_ps(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_castpd_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castpd_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <4 x double> %a0 to <8 x float>
   ret <8 x float> %res
@@ -303,11 +303,11 @@ define <8 x float> @test_mm256_castpd_ps
 
 define <4 x i64> @test_mm256_castpd_si256(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_castpd_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castpd_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <4 x double> %a0 to <4 x i64>
   ret <4 x i64> %res
@@ -315,12 +315,12 @@ define <4 x i64> @test_mm256_castpd_si25
 
 define <4 x double> @test_mm256_castpd128_pd256(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_castpd128_pd256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castpd128_pd256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X64-NEXT:    retq
   %res = shufflevector <2 x double> %a0, <2 x double> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -329,13 +329,13 @@ define <4 x double> @test_mm256_castpd12
 
 define <2 x double> @test_mm256_castpd256_pd128(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_castpd256_pd128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castpd256_pd128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -345,11 +345,11 @@ define <2 x double> @test_mm256_castpd25
 
 define <4 x double> @test_mm256_castps_pd(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_castps_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castps_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <8 x float> %a0 to <4 x double>
   ret <4 x double> %res
@@ -357,11 +357,11 @@ define <4 x double> @test_mm256_castps_p
 
 define <4 x i64> @test_mm256_castps_si256(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_castps_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castps_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <8 x float> %a0 to <4 x i64>
   ret <4 x i64> %res
@@ -369,12 +369,12 @@ define <4 x i64> @test_mm256_castps_si25
 
 define <8 x float> @test_mm256_castps128_ps256(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_castps128_ps256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castps128_ps256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X64-NEXT:    retq
   %res = shufflevector <4 x float> %a0, <4 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -383,13 +383,13 @@ define <8 x float> @test_mm256_castps128
 
 define <4 x float> @test_mm256_castps256_ps128(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_castps256_ps128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castps256_ps128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -399,12 +399,12 @@ define <4 x float> @test_mm256_castps256
 
 define <4 x i64> @test_mm256_castsi128_si256(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_castsi128_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castsi128_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X64-NEXT:    retq
   %res = shufflevector <2 x i64> %a0, <2 x i64> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -413,11 +413,11 @@ define <4 x i64> @test_mm256_castsi128_s
 
 define <4 x double> @test_mm256_castsi256_pd(<4 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_castsi256_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castsi256_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <4 x i64> %a0 to <4 x double>
   ret <4 x double> %res
@@ -425,11 +425,11 @@ define <4 x double> @test_mm256_castsi25
 
 define <8 x float> @test_mm256_castsi256_ps(<4 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_castsi256_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castsi256_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   %res = bitcast <4 x i64> %a0 to <8 x float>
   ret <8 x float> %res
@@ -437,13 +437,13 @@ define <8 x float> @test_mm256_castsi256
 
 define <2 x i64> @test_mm256_castsi256_si128(<4 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_castsi256_si128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_castsi256_si128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -453,12 +453,12 @@ define <2 x i64> @test_mm256_castsi256_s
 
 define <4 x double> @test_mm256_ceil_pd(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_ceil_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vroundpd $2, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_ceil_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vroundpd $2, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 2)
@@ -468,12 +468,12 @@ declare <4 x double> @llvm.x86.avx.round
 
 define <8 x float> @test_mm256_ceil_ps(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_ceil_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vroundps $2, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_ceil_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vroundps $2, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 2)
@@ -483,12 +483,12 @@ declare <8 x float> @llvm.x86.avx.round.
 
 define <2 x double> @test_mm_cmp_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmp_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcmpgepd %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmp_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcmpgepd %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 13)
@@ -498,12 +498,12 @@ declare <2 x double> @llvm.x86.sse2.cmp.
 
 define <4 x double> @test_mm256_cmp_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_cmp_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcmpgepd %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cmp_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcmpgepd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> %a0, <4 x double> %a1, i8 13)
@@ -513,12 +513,12 @@ declare <4 x double> @llvm.x86.avx.cmp.p
 
 define <4 x float> @test_mm_cmp_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmp_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcmpgeps %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmp_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcmpgeps %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 13)
@@ -528,12 +528,12 @@ declare <4 x float> @llvm.x86.sse.cmp.ps
 
 define <8 x float> @test_mm256_cmp_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_cmp_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcmpgeps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cmp_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcmpgeps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 13)
@@ -543,12 +543,12 @@ declare <8 x float> @llvm.x86.avx.cmp.ps
 
 define <2 x double> @test_mm_cmp_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_cmp_sd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcmpgesd %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmp_sd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcmpgesd %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 13)
@@ -558,12 +558,12 @@ declare <2 x double> @llvm.x86.sse2.cmp.
 
 define <4 x float> @test_mm_cmp_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_cmp_ss:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcmpgess %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_cmp_ss:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcmpgess %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 13)
@@ -573,12 +573,12 @@ declare <4 x float> @llvm.x86.sse.cmp.ss
 
 define <4 x double> @test_mm256_cvtepi32_pd(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_cvtepi32_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cvtepi32_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -588,12 +588,12 @@ define <4 x double> @test_mm256_cvtepi32
 
 define <8 x float> @test_mm256_cvtepi32_ps(<4 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_cvtepi32_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cvtepi32_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -604,13 +604,13 @@ declare <8 x float> @llvm.x86.avx.cvtdq2
 
 define <2 x i64> @test_mm256_cvtpd_epi32(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_cvtpd_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtpd2dq %ymm0, %xmm0
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cvtpd_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtpd2dq %ymm0, %xmm0
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -622,13 +622,13 @@ declare <4 x i32> @llvm.x86.avx.cvt.pd2d
 
 define <4 x float> @test_mm256_cvtpd_ps(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_cvtpd_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtpd2ps %ymm0, %xmm0
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cvtpd_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtpd2ps %ymm0, %xmm0
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -639,12 +639,12 @@ declare <4 x float> @llvm.x86.avx.cvt.pd
 
 define <4 x i64> @test_mm256_cvtps_epi32(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_cvtps_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtps2dq %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cvtps_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtps2dq %ymm0, %ymm0
 ; X64-NEXT:    retq
   %cvt = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> %a0)
@@ -655,12 +655,12 @@ declare <8 x i32> @llvm.x86.avx.cvt.ps2d
 
 define <4 x double> @test_mm256_cvtps_pd(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_cvtps_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvtps2pd %xmm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cvtps_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvtps2pd %xmm0, %ymm0
 ; X64-NEXT:    retq
   %res = fpext <4 x float> %a0 to <4 x double>
@@ -669,13 +669,13 @@ define <4 x double> @test_mm256_cvtps_pd
 
 define <2 x i64> @test_mm256_cvttpd_epi32(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_cvttpd_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvttpd2dq %ymm0, %xmm0
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cvttpd_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvttpd2dq %ymm0, %xmm0
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -687,12 +687,12 @@ declare <4 x i32> @llvm.x86.avx.cvtt.pd2
 
 define <4 x i64> @test_mm256_cvttps_epi32(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_cvttps_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vcvttps2dq %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_cvttps_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vcvttps2dq %ymm0, %ymm0
 ; X64-NEXT:    retq
   %cvt = call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> %a0)
@@ -703,12 +703,12 @@ declare <8 x i32> @llvm.x86.avx.cvtt.ps2
 
 define <4 x double> @test_mm256_div_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_div_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vdivpd %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_div_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vdivpd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = fdiv <4 x double> %a0, %a1
@@ -717,12 +717,12 @@ define <4 x double> @test_mm256_div_pd(<
 
 define <8 x float> @test_mm256_div_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_div_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vdivps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_div_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vdivps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = fdiv <8 x float> %a0, %a1
@@ -731,12 +731,12 @@ define <8 x float> @test_mm256_div_ps(<8
 
 define <8 x float> @test_mm256_dp_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_dp_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_dp_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7)
@@ -746,7 +746,7 @@ declare <8 x float> @llvm.x86.avx.dp.ps.
 
 define i32 @test_mm256_extract_epi8(<4 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_extract_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X32-NEXT:    vpextrb $15, %xmm0, %eax
 ; X32-NEXT:    movzbl %al, %eax
@@ -754,7 +754,7 @@ define i32 @test_mm256_extract_epi8(<4 x
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_extract_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-NEXT:    vpextrb $15, %xmm0, %eax
 ; X64-NEXT:    movzbl %al, %eax
@@ -768,7 +768,7 @@ define i32 @test_mm256_extract_epi8(<4 x
 
 define i32 @test_mm256_extract_epi16(<4 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_extract_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X32-NEXT:    vpextrw $3, %xmm0, %eax
 ; X32-NEXT:    movzwl %ax, %eax
@@ -776,7 +776,7 @@ define i32 @test_mm256_extract_epi16(<4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_extract_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-NEXT:    vpextrw $3, %xmm0, %eax
 ; X64-NEXT:    movzwl %ax, %eax
@@ -790,14 +790,14 @@ define i32 @test_mm256_extract_epi16(<4
 
 define i32 @test_mm256_extract_epi32(<4 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_extract_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X32-NEXT:    vextractps $1, %xmm0, %eax
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_extract_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-NEXT:    vextractps $1, %xmm0, %eax
 ; X64-NEXT:    vzeroupper
@@ -809,7 +809,7 @@ define i32 @test_mm256_extract_epi32(<4
 
 define i64 @test_mm256_extract_epi64(<4 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_extract_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X32-NEXT:    vextractps $2, %xmm0, %eax
 ; X32-NEXT:    vextractps $3, %xmm0, %edx
@@ -817,7 +817,7 @@ define i64 @test_mm256_extract_epi64(<4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_extract_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-NEXT:    vpextrq $1, %xmm0, %rax
 ; X64-NEXT:    vzeroupper
@@ -828,13 +828,13 @@ define i64 @test_mm256_extract_epi64(<4
 
 define <2 x double> @test_mm256_extractf128_pd(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_extractf128_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_extractf128_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -844,13 +844,13 @@ define <2 x double> @test_mm256_extractf
 
 define <4 x float> @test_mm256_extractf128_ps(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_extractf128_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_extractf128_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -860,13 +860,13 @@ define <4 x float> @test_mm256_extractf1
 
 define <2 x i64> @test_mm256_extractf128_si256(<4 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_extractf128_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_extractf128_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -876,12 +876,12 @@ define <2 x i64> @test_mm256_extractf128
 
 define <4 x double> @test_mm256_floor_pd(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_floor_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vroundpd $1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_floor_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vroundpd $1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 1)
@@ -890,12 +890,12 @@ define <4 x double> @test_mm256_floor_pd
 
 define <8 x float> @test_mm256_floor_ps(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_floor_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vroundps $1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_floor_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vroundps $1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 1)
@@ -904,12 +904,12 @@ define <8 x float> @test_mm256_floor_ps(
 
 define <4 x double> @test_mm256_hadd_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_hadd_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_hadd_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -919,12 +919,12 @@ declare <4 x double> @llvm.x86.avx.hadd.
 
 define <8 x float> @test_mm256_hadd_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_hadd_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_hadd_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -934,12 +934,12 @@ declare <8 x float> @llvm.x86.avx.hadd.p
 
 define <4 x double> @test_mm256_hsub_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_hsub_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_hsub_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -949,12 +949,12 @@ declare <4 x double> @llvm.x86.avx.hsub.
 
 define <8 x float> @test_mm256_hsub_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_hsub_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vhsubps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_hsub_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vhsubps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -964,14 +964,14 @@ declare <8 x float> @llvm.x86.avx.hsub.p
 
 define <4 x i64> @test_mm256_insert_epi8(<4 x i64> %a0, i8 %a1) nounwind {
 ; X32-LABEL: test_mm256_insert_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm1
 ; X32-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_insert_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm1
 ; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
@@ -984,7 +984,7 @@ define <4 x i64> @test_mm256_insert_epi8
 
 define <4 x i64> @test_mm256_insert_epi16(<4 x i64> %a0, i16 %a1) nounwind {
 ; X32-LABEL: test_mm256_insert_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X32-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
@@ -992,7 +992,7 @@ define <4 x i64> @test_mm256_insert_epi1
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_insert_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-NEXT:    vpinsrw $6, %edi, %xmm1, %xmm1
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1005,13 +1005,13 @@ define <4 x i64> @test_mm256_insert_epi1
 
 define <4 x i64> @test_mm256_insert_epi32(<4 x i64> %a0, i32 %a1) nounwind {
 ; X32-LABEL: test_mm256_insert_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm1
 ; X32-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_insert_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm1
 ; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; X64-NEXT:    retq
@@ -1023,7 +1023,7 @@ define <4 x i64> @test_mm256_insert_epi3
 
 define <4 x i64> @test_mm256_insert_epi64(<4 x i64> %a0, i64 %a1) nounwind {
 ; X32-LABEL: test_mm256_insert_epi64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
 ; X32-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
@@ -1031,7 +1031,7 @@ define <4 x i64> @test_mm256_insert_epi6
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_insert_epi64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-NEXT:    vpinsrq $1, %rdi, %xmm1, %xmm1
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1042,13 +1042,13 @@ define <4 x i64> @test_mm256_insert_epi6
 
 define <4 x double> @test_mm256_insertf128_pd(<4 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_insertf128_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; X32-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_insertf128_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; X64-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; X64-NEXT:    retq
@@ -1059,12 +1059,12 @@ define <4 x double> @test_mm256_insertf1
 
 define <8 x float> @test_mm256_insertf128_ps(<8 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_insertf128_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_insertf128_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %ext = shufflevector <4 x float> %a1, <4 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1074,13 +1074,13 @@ define <8 x float> @test_mm256_insertf12
 
 define <4 x i64> @test_mm256_insertf128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_insertf128_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; X32-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_insertf128_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; X64-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; X64-NEXT:    retq
@@ -1091,13 +1091,13 @@ define <4 x i64> @test_mm256_insertf128_
 
 define <4 x i64> @test_mm256_lddqu_si256(<4 x i64>* %a0) nounwind {
 ; X32-LABEL: test_mm256_lddqu_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vlddqu (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_lddqu_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vlddqu (%rdi), %ymm0
 ; X64-NEXT:    retq
   %arg0 = bitcast <4 x i64>* %a0 to i8*
@@ -1109,13 +1109,13 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.2
 
 define <4 x double> @test_mm256_load_pd(double* %a0) nounwind {
 ; X32-LABEL: test_mm256_load_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovaps (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_load_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps (%rdi), %ymm0
 ; X64-NEXT:    retq
   %arg0 = bitcast double* %a0 to <4 x double>*
@@ -1125,13 +1125,13 @@ define <4 x double> @test_mm256_load_pd(
 
 define <8 x float> @test_mm256_load_ps(float* %a0) nounwind {
 ; X32-LABEL: test_mm256_load_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovaps (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_load_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps (%rdi), %ymm0
 ; X64-NEXT:    retq
   %arg0 = bitcast float* %a0 to <8 x float>*
@@ -1141,13 +1141,13 @@ define <8 x float> @test_mm256_load_ps(f
 
 define <4 x i64> @test_mm256_load_si256(<4 x i64>* %a0) nounwind {
 ; X32-LABEL: test_mm256_load_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovaps (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_load_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps (%rdi), %ymm0
 ; X64-NEXT:    retq
   %res = load <4 x i64>, <4 x i64>* %a0, align 32
@@ -1156,13 +1156,13 @@ define <4 x i64> @test_mm256_load_si256(
 
 define <4 x double> @test_mm256_loadu_pd(double* %a0) nounwind {
 ; X32-LABEL: test_mm256_loadu_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovups (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_loadu_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups (%rdi), %ymm0
 ; X64-NEXT:    retq
   %arg0 = bitcast double* %a0 to <4 x double>*
@@ -1172,13 +1172,13 @@ define <4 x double> @test_mm256_loadu_pd
 
 define <8 x float> @test_mm256_loadu_ps(float* %a0) nounwind {
 ; X32-LABEL: test_mm256_loadu_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovups (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_loadu_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups (%rdi), %ymm0
 ; X64-NEXT:    retq
   %arg0 = bitcast float* %a0 to <8 x float>*
@@ -1188,13 +1188,13 @@ define <8 x float> @test_mm256_loadu_ps(
 
 define <4 x i64> @test_mm256_loadu_si256(<4 x i64>* %a0) nounwind {
 ; X32-LABEL: test_mm256_loadu_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovups (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_loadu_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups (%rdi), %ymm0
 ; X64-NEXT:    retq
   %res = load <4 x i64>, <4 x i64>* %a0, align 1
@@ -1203,7 +1203,7 @@ define <4 x i64> @test_mm256_loadu_si256
 
 define <8 x float> @test_mm256_loadu2_m128(float* %a0, float* %a1) nounwind {
 ; X32-LABEL: test_mm256_loadu2_m128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovups (%eax), %xmm0
@@ -1211,7 +1211,7 @@ define <8 x float> @test_mm256_loadu2_m1
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_loadu2_m128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups (%rsi), %xmm0
 ; X64-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0
 ; X64-NEXT:    retq
@@ -1227,7 +1227,7 @@ define <8 x float> @test_mm256_loadu2_m1
 
 define <4 x double> @test_mm256_loadu2_m128d(double* %a0, double* %a1) nounwind {
 ; X32-LABEL: test_mm256_loadu2_m128d:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovups (%eax), %xmm0
@@ -1235,7 +1235,7 @@ define <4 x double> @test_mm256_loadu2_m
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_loadu2_m128d:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups (%rsi), %xmm0
 ; X64-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0
 ; X64-NEXT:    retq
@@ -1251,7 +1251,7 @@ define <4 x double> @test_mm256_loadu2_m
 
 define <4 x i64> @test_mm256_loadu2_m128i(i64* %a0, i64* %a1) nounwind {
 ; X32-LABEL: test_mm256_loadu2_m128i:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovups (%eax), %xmm0
@@ -1259,7 +1259,7 @@ define <4 x i64> @test_mm256_loadu2_m128
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_loadu2_m128i:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups (%rsi), %xmm0
 ; X64-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0
 ; X64-NEXT:    retq
@@ -1275,13 +1275,13 @@ define <4 x i64> @test_mm256_loadu2_m128
 
 define <2 x double> @test_mm_maskload_pd(double* %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_maskload_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmaskmovpd (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_maskload_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast double* %a0 to i8*
@@ -1292,13 +1292,13 @@ declare <2 x double> @llvm.x86.avx.maskl
 
 define <4 x double> @test_mm256_maskload_pd(double* %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_maskload_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmaskmovpd (%eax), %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_maskload_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm0
 ; X64-NEXT:    retq
   %arg0 = bitcast double* %a0 to i8*
@@ -1309,13 +1309,13 @@ declare <4 x double> @llvm.x86.avx.maskl
 
 define <4 x float> @test_mm_maskload_ps(float* %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_maskload_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmaskmovps (%eax), %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_maskload_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %arg0 = bitcast float* %a0 to i8*
@@ -1327,13 +1327,13 @@ declare <4 x float> @llvm.x86.avx.masklo
 
 define <8 x float> @test_mm256_maskload_ps(float* %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_maskload_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmaskmovps (%eax), %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_maskload_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm0
 ; X64-NEXT:    retq
   %arg0 = bitcast float* %a0 to i8*
@@ -1345,13 +1345,13 @@ declare <8 x float> @llvm.x86.avx.masklo
 
 define void @test_mm_maskstore_pd(double* %a0, <2 x i64> %a1, <2 x double> %a2) nounwind {
 ; X32-LABEL: test_mm_maskstore_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmaskmovpd %xmm1, %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_maskstore_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %arg0 = bitcast double* %a0 to i8*
@@ -1362,14 +1362,14 @@ declare void @llvm.x86.avx.maskstore.pd(
 
 define void @test_mm256_maskstore_pd(double* %a0, <4 x i64> %a1, <4 x double> %a2) nounwind {
 ; X32-LABEL: test_mm256_maskstore_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmaskmovpd %ymm1, %ymm0, (%eax)
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_maskstore_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -1381,13 +1381,13 @@ declare void @llvm.x86.avx.maskstore.pd.
 
 define void @test_mm_maskstore_ps(float* %a0, <2 x i64> %a1, <4 x float> %a2) nounwind {
 ; X32-LABEL: test_mm_maskstore_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmaskmovps %xmm1, %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_maskstore_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %arg0 = bitcast float* %a0 to i8*
@@ -1399,14 +1399,14 @@ declare void @llvm.x86.avx.maskstore.ps(
 
 define void @test_mm256_maskstore_ps(float* %a0, <4 x i64> %a1, <8 x float> %a2) nounwind {
 ; X32-LABEL: test_mm256_maskstore_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmaskmovps %ymm1, %ymm0, (%eax)
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_maskstore_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -1419,12 +1419,12 @@ declare void @llvm.x86.avx.maskstore.ps.
 
 define <4 x double> @test_mm256_max_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_max_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_max_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -1434,12 +1434,12 @@ declare <4 x double> @llvm.x86.avx.max.p
 
 define <8 x float> @test_mm256_max_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_max_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmaxps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_max_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaxps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -1449,12 +1449,12 @@ declare <8 x float> @llvm.x86.avx.max.ps
 
 define <4 x double> @test_mm256_min_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_min_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vminpd %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_min_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vminpd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -1464,12 +1464,12 @@ declare <4 x double> @llvm.x86.avx.min.p
 
 define <8 x float> @test_mm256_min_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_min_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vminps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_min_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vminps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -1479,12 +1479,12 @@ declare <8 x float> @llvm.x86.avx.min.ps
 
 define <4 x double> @test_mm256_movedup_pd(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_movedup_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_movedup_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x double> %a0, <4 x double> %a0, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -1493,12 +1493,12 @@ define <4 x double> @test_mm256_movedup_
 
 define <8 x float> @test_mm256_movehdup_ps(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_movehdup_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_movehdup_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x float> %a0, <8 x float> %a0, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
@@ -1507,12 +1507,12 @@ define <8 x float> @test_mm256_movehdup_
 
 define <8 x float> @test_mm256_moveldup_ps(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_moveldup_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_moveldup_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x float> %a0, <8 x float> %a0, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -1521,13 +1521,13 @@ define <8 x float> @test_mm256_moveldup_
 
 define i32 @test_mm256_movemask_pd(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_movemask_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovmskpd %ymm0, %eax
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_movemask_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovmskpd %ymm0, %eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -1538,13 +1538,13 @@ declare i32 @llvm.x86.avx.movmsk.pd.256(
 
 define i32 @test_mm256_movemask_ps(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_movemask_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovmskps %ymm0, %eax
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_movemask_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovmskps %ymm0, %eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -1555,12 +1555,12 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(
 
 define <4 x double> @test_mm256_mul_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_mul_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_mul_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = fmul <4 x double> %a0, %a1
@@ -1569,12 +1569,12 @@ define <4 x double> @test_mm256_mul_pd(<
 
 define <8 x float> @test_mm256_mul_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_mul_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmulps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_mul_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmulps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = fmul <8 x float> %a0, %a1
@@ -1583,12 +1583,12 @@ define <8 x float> @test_mm256_mul_ps(<8
 
 define <4 x double> @test_mm256_or_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_or_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vorps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_or_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vorps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = bitcast <4 x double> %a0 to <4 x i64>
@@ -1600,12 +1600,12 @@ define <4 x double> @test_mm256_or_pd(<4
 
 define <8 x float> @test_mm256_or_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_or_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vorps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_or_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vorps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = bitcast <8 x float> %a0 to <8 x i32>
@@ -1617,12 +1617,12 @@ define <8 x float> @test_mm256_or_ps(<8
 
 define <2 x double> @test_mm_permute_pd(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm_permute_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_permute_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
 ; X64-NEXT:    retq
   %res = shufflevector <2 x double> %a0, <2 x double> %a0, <2 x i32> <i32 1, i32 0>
@@ -1631,12 +1631,12 @@ define <2 x double> @test_mm_permute_pd(
 
 define <4 x double> @test_mm256_permute_pd(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_permute_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_permute_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x double> %a0, <4 x double> %a0, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@@ -1645,12 +1645,12 @@ define <4 x double> @test_mm256_permute_
 
 define <4 x float> @test_mm_permute_ps(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm_permute_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_permute_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x float> %a0, <4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -1659,12 +1659,12 @@ define <4 x float> @test_mm_permute_ps(<
 
 define <4 x float> @test2_mm_permute_ps(<4 x float> %a0) nounwind {
 ; X32-LABEL: test2_mm_permute_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test2_mm_permute_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,2,3]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x float> %a0, <4 x float> %a0, <4 x i32> <i32 2, i32 1, i32 2, i32 3>
@@ -1673,12 +1673,12 @@ define <4 x float> @test2_mm_permute_ps(
 
 define <8 x float> @test_mm256_permute_ps(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_permute_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_permute_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x float> %a0, <8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
@@ -1687,12 +1687,12 @@ define <8 x float> @test_mm256_permute_p
 
 define <4 x double> @test_mm256_permute2f128_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_permute2f128_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm1[0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_permute2f128_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm1[0,1]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x double> zeroinitializer, <4 x double> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -1703,12 +1703,12 @@ declare <4 x double> @llvm.x86.avx.vperm
 ; PR26667
 define <8 x float> @test_mm256_permute2f128_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_permute2f128_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps %ymm1, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_permute2f128_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %ymm1, %ymm0
 ; X64-NEXT:    retq
   %res = shufflevector <8 x float> %a1, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
@@ -1718,12 +1718,12 @@ declare <8 x float> @llvm.x86.avx.vperm2
 
 define <4 x i64> @test_mm256_permute2f128_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_permute2f128_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_permute2f128_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
 ; X64-NEXT:    retq
   %1 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1736,12 +1736,12 @@ declare <8 x i32> @llvm.x86.avx.vperm2f1
 
 define <2 x double> @test_mm_permutevar_pd(<2 x double> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_permutevar_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_permutevar_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1)
@@ -1751,12 +1751,12 @@ declare <2 x double> @llvm.x86.avx.vperm
 
 define <4 x double> @test_mm256_permutevar_pd(<4 x double> %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_permutevar_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_permutevar_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1)
@@ -1766,12 +1766,12 @@ declare <4 x double> @llvm.x86.avx.vperm
 
 define <4 x float> @test_mm_permutevar_ps(<4 x float> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm_permutevar_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilps %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_permutevar_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilps %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
@@ -1782,12 +1782,12 @@ declare <4 x float> @llvm.x86.avx.vpermi
 
 define <8 x float> @test_mm256_permutevar_ps(<8 x float> %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_permutevar_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vpermilps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_permutevar_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %arg1 = bitcast <4 x i64> %a1 to <8 x i32>
@@ -1798,12 +1798,12 @@ declare <8 x float> @llvm.x86.avx.vpermi
 
 define <8 x float> @test_mm256_rcp_ps(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_rcp_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vrcpps %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_rcp_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vrcpps %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0)
@@ -1813,12 +1813,12 @@ declare <8 x float> @llvm.x86.avx.rcp.ps
 
 define <4 x double> @test_mm256_round_pd(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_round_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vroundpd $4, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_round_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vroundpd $4, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 4)
@@ -1827,12 +1827,12 @@ define <4 x double> @test_mm256_round_pd
 
 define <8 x float> @test_mm256_round_ps(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_round_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vroundps $4, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_round_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vroundps $4, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 4)
@@ -1841,12 +1841,12 @@ define <8 x float> @test_mm256_round_ps(
 
 define <8 x float> @test_mm256_rsqrt_ps(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_rsqrt_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vrsqrtps %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_rsqrt_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vrsqrtps %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0)
@@ -1856,7 +1856,7 @@ declare <8 x float> @llvm.x86.avx.rsqrt.
 
 define <4 x i64> @test_mm256_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31) nounwind {
 ; X32-LABEL: test_mm256_set_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovd %ecx, %xmm0
@@ -1925,7 +1925,7 @@ define <4 x i64> @test_mm256_set_epi8(i8
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %r10d
 ; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
 ; X64-NEXT:    vmovd %eax, %xmm0
@@ -2030,7 +2030,7 @@ define <4 x i64> @test_mm256_set_epi8(i8
 
 define <4 x i64> @test_mm256_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15) nounwind {
 ; X32-LABEL: test_mm256_set_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovd %eax, %xmm0
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
@@ -2067,7 +2067,7 @@ define <4 x i64> @test_mm256_set_epi16(i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
 ; X64-NEXT:    vmovd %eax, %xmm0
 ; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
@@ -2118,7 +2118,7 @@ define <4 x i64> @test_mm256_set_epi16(i
 
 define <4 x i64> @test_mm256_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) nounwind {
 ; X32-LABEL: test_mm256_set_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; X32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -2131,7 +2131,7 @@ define <4 x i64> @test_mm256_set_epi32(i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovd %ecx, %xmm0
 ; X64-NEXT:    vpinsrd $1, %edx, %xmm0, %xmm0
 ; X64-NEXT:    vpinsrd $2, %esi, %xmm0, %xmm0
@@ -2156,7 +2156,7 @@ define <4 x i64> @test_mm256_set_epi32(i
 
 define <4 x i64> @test_mm256_set_epi64x(i64 %a0, i64 %a1, i64 %a2, i64 %a3) nounwind {
 ; X32-LABEL: test_mm256_set_epi64x:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; X32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -2169,7 +2169,7 @@ define <4 x i64> @test_mm256_set_epi64x(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set_epi64x:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovq %rdi, %xmm0
 ; X64-NEXT:    vmovq %rsi, %xmm1
 ; X64-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
@@ -2187,13 +2187,13 @@ define <4 x i64> @test_mm256_set_epi64x(
 
 define <8 x float> @test_mm256_set_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_set_m128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; X32-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set_m128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; X64-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; X64-NEXT:    retq
@@ -2203,13 +2203,13 @@ define <8 x float> @test_mm256_set_m128(
 
 define <4 x double> @test_mm256_set_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_set_m128d:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; X32-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set_m128d:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; X64-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; X64-NEXT:    retq
@@ -2222,13 +2222,13 @@ define <4 x double> @test_mm256_set_m128
 
 define <4 x i64> @test_mm256_set_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_set_m128i:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; X32-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set_m128i:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; X64-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; X64-NEXT:    retq
@@ -2241,7 +2241,7 @@ define <4 x i64> @test_mm256_set_m128i(<
 
 define <4 x double> @test_mm256_set_pd(double %a0, double %a1, double %a2, double %a3) nounwind {
 ; X32-LABEL: test_mm256_set_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
 ; X32-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
@@ -2252,7 +2252,7 @@ define <4 x double> @test_mm256_set_pd(d
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; X64-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
 ; X64-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -2266,7 +2266,7 @@ define <4 x double> @test_mm256_set_pd(d
 
 define <8 x float> @test_mm256_set_ps(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) nounwind {
 ; X32-LABEL: test_mm256_set_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -2285,7 +2285,7 @@ define <8 x float> @test_mm256_set_ps(fl
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
 ; X64-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
 ; X64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
@@ -2307,7 +2307,7 @@ define <8 x float> @test_mm256_set_ps(fl
 
 define <4 x i64> @test_mm256_set1_epi8(i8 %a0) nounwind {
 ; X32-LABEL: test_mm256_set1_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovd %eax, %xmm0
 ; X32-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -2316,7 +2316,7 @@ define <4 x i64> @test_mm256_set1_epi8(i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set1_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    vmovd %eax, %xmm0
 ; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -2361,7 +2361,7 @@ define <4 x i64> @test_mm256_set1_epi8(i
 
 define <4 x i64> @test_mm256_set1_epi16(i16 %a0) nounwind {
 ; X32-LABEL: test_mm256_set1_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovd %eax, %xmm0
 ; X32-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
@@ -2370,7 +2370,7 @@ define <4 x i64> @test_mm256_set1_epi16(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set1_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovd %edi, %xmm0
 ; X64-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -2398,14 +2398,14 @@ define <4 x i64> @test_mm256_set1_epi16(
 
 define <4 x i64> @test_mm256_set1_epi32(i32 %a0) nounwind {
 ; X32-LABEL: test_mm256_set1_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X32-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set1_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovd %edi, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -2424,7 +2424,7 @@ define <4 x i64> @test_mm256_set1_epi32(
 
 define <4 x i64> @test_mm256_set1_epi64x(i64 %a0) nounwind {
 ; X32-LABEL: test_mm256_set1_epi64x:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovd %ecx, %xmm0
@@ -2435,7 +2435,7 @@ define <4 x i64> @test_mm256_set1_epi64x
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set1_epi64x:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovq %rdi, %xmm0
 ; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; X64-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -2449,14 +2449,14 @@ define <4 x i64> @test_mm256_set1_epi64x
 
 define <4 x double> @test_mm256_set1_pd(double %a0) nounwind {
 ; X32-LABEL: test_mm256_set1_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
 ; X32-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set1_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
 ; X64-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X64-NEXT:    retq
@@ -2469,14 +2469,14 @@ define <4 x double> @test_mm256_set1_pd(
 
 define <8 x float> @test_mm256_set1_ps(float %a0) nounwind {
 ; X32-LABEL: test_mm256_set1_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X32-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_set1_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; X64-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; X64-NEXT:    retq
@@ -2493,7 +2493,7 @@ define <8 x float> @test_mm256_set1_ps(f
 
 define <4 x i64> @test_mm256_setr_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31) nounwind {
 ; X32-LABEL: test_mm256_setr_epi8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovd %ecx, %xmm0
@@ -2562,7 +2562,7 @@ define <4 x i64> @test_mm256_setr_epi8(i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setr_epi8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %r10d
 ; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
 ; X64-NEXT:    vmovd %eax, %xmm0
@@ -2667,7 +2667,7 @@ define <4 x i64> @test_mm256_setr_epi8(i
 
 define <4 x i64> @test_mm256_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15) nounwind {
 ; X32-LABEL: test_mm256_setr_epi16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovd %eax, %xmm0
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
@@ -2704,7 +2704,7 @@ define <4 x i64> @test_mm256_setr_epi16(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setr_epi16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
 ; X64-NEXT:    vmovd %eax, %xmm0
 ; X64-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
@@ -2755,7 +2755,7 @@ define <4 x i64> @test_mm256_setr_epi16(
 
 define <4 x i64> @test_mm256_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) nounwind {
 ; X32-LABEL: test_mm256_setr_epi32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; X32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -2768,7 +2768,7 @@ define <4 x i64> @test_mm256_setr_epi32(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setr_epi32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovd %r8d, %xmm0
 ; X64-NEXT:    vpinsrd $1, %r9d, %xmm0, %xmm0
 ; X64-NEXT:    vpinsrd $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0
@@ -2793,7 +2793,7 @@ define <4 x i64> @test_mm256_setr_epi32(
 
 define <4 x i64> @test_mm256_setr_epi64x(i64 %a0, i64 %a1, i64 %a2, i64 %a3) nounwind {
 ; X32-LABEL: test_mm256_setr_epi64x:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; X32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
@@ -2806,7 +2806,7 @@ define <4 x i64> @test_mm256_setr_epi64x
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setr_epi64x:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovq %rcx, %xmm0
 ; X64-NEXT:    vmovq %rdx, %xmm1
 ; X64-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
@@ -2824,13 +2824,13 @@ define <4 x i64> @test_mm256_setr_epi64x
 
 define <8 x float> @test_mm256_setr_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_setr_m128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X32-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setr_m128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
@@ -2840,13 +2840,13 @@ define <8 x float> @test_mm256_setr_m128
 
 define <4 x double> @test_mm256_setr_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_setr_m128d:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X32-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setr_m128d:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
@@ -2859,13 +2859,13 @@ define <4 x double> @test_mm256_setr_m12
 
 define <4 x i64> @test_mm256_setr_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_setr_m128i:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X32-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setr_m128i:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
@@ -2878,7 +2878,7 @@ define <4 x i64> @test_mm256_setr_m128i(
 
 define <4 x double> @test_mm256_setr_pd(double %a0, double %a1, double %a2, double %a3) nounwind {
 ; X32-LABEL: test_mm256_setr_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X32-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
 ; X32-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
@@ -2889,7 +2889,7 @@ define <4 x double> @test_mm256_setr_pd(
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setr_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; X64-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
@@ -2903,7 +2903,7 @@ define <4 x double> @test_mm256_setr_pd(
 
 define <8 x float> @test_mm256_setr_ps(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) nounwind {
 ; X32-LABEL: test_mm256_setr_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X32-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -2922,7 +2922,7 @@ define <8 x float> @test_mm256_setr_ps(f
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setr_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
 ; X64-NEXT:    vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3]
 ; X64-NEXT:    vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0]
@@ -2944,12 +2944,12 @@ define <8 x float> @test_mm256_setr_ps(f
 
 define <4 x double> @test_mm256_setzero_pd() nounwind {
 ; X32-LABEL: test_mm256_setzero_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setzero_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   ret <4 x double> zeroinitializer
@@ -2957,12 +2957,12 @@ define <4 x double> @test_mm256_setzero_
 
 define <8 x float> @test_mm256_setzero_ps() nounwind {
 ; X32-LABEL: test_mm256_setzero_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setzero_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   ret <8 x float> zeroinitializer
@@ -2970,12 +2970,12 @@ define <8 x float> @test_mm256_setzero_p
 
 define <4 x i64> @test_mm256_setzero_si256() nounwind {
 ; X32-LABEL: test_mm256_setzero_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_setzero_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
   ret <4 x i64> zeroinitializer
@@ -2983,12 +2983,12 @@ define <4 x i64> @test_mm256_setzero_si2
 
 define <4 x double> @test_mm256_shuffle_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_shuffle_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_shuffle_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -2997,12 +2997,12 @@ define <4 x double> @test_mm256_shuffle_
 
 define <8 x float> @test_mm256_shuffle_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_shuffle_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_shuffle_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 0, i32 8, i32 8, i32 4, i32 4, i32 12, i32 12>
@@ -3011,12 +3011,12 @@ define <8 x float> @test_mm256_shuffle_p
 
 define <4 x double> @test_mm256_sqrt_pd(<4 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_sqrt_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vsqrtpd %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_sqrt_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vsqrtpd %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0)
@@ -3026,12 +3026,12 @@ declare <4 x double> @llvm.x86.avx.sqrt.
 
 define <8 x float> @test_mm256_sqrt_ps(<8 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_sqrt_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vsqrtps %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_sqrt_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vsqrtps %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0)
@@ -3041,14 +3041,14 @@ declare <8 x float> @llvm.x86.avx.sqrt.p
 
 define void @test_mm256_store_pd(double* %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_store_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovaps %ymm0, (%eax)
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_store_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -3059,14 +3059,14 @@ define void @test_mm256_store_pd(double*
 
 define void @test_mm256_store_ps(float* %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_store_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovaps %ymm0, (%eax)
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_store_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -3077,14 +3077,14 @@ define void @test_mm256_store_ps(float*
 
 define void @test_mm256_store_si256(<4 x i64>* %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_store_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovaps %ymm0, (%eax)
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_store_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -3094,14 +3094,14 @@ define void @test_mm256_store_si256(<4 x
 
 define void @test_mm256_storeu_pd(double* %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_storeu_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovups %ymm0, (%eax)
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_storeu_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -3112,14 +3112,14 @@ define void @test_mm256_storeu_pd(double
 
 define void @test_mm256_storeu_ps(float* %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_storeu_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovups %ymm0, (%eax)
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_storeu_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -3130,14 +3130,14 @@ define void @test_mm256_storeu_ps(float*
 
 define void @test_mm256_storeu_si256(<4 x i64>* %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_storeu_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovups %ymm0, (%eax)
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_storeu_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -3147,7 +3147,7 @@ define void @test_mm256_storeu_si256(<4
 
 define void @test_mm256_storeu2_m128(float* %a0, float* %a1, <8 x float> %a2) nounwind {
 ; X32-LABEL: test_mm256_storeu2_m128:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovups %xmm0, (%ecx)
@@ -3157,7 +3157,7 @@ define void @test_mm256_storeu2_m128(flo
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_storeu2_m128:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups %xmm0, (%rdi)
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-NEXT:    vmovups %xmm0, (%rsi)
@@ -3174,7 +3174,7 @@ define void @test_mm256_storeu2_m128(flo
 
 define void @test_mm256_storeu2_m128d(double* %a0, double* %a1, <4 x double> %a2) nounwind {
 ; X32-LABEL: test_mm256_storeu2_m128d:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovups %xmm0, (%ecx)
@@ -3184,7 +3184,7 @@ define void @test_mm256_storeu2_m128d(do
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_storeu2_m128d:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups %xmm0, (%rdi)
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-NEXT:    vmovups %xmm0, (%rsi)
@@ -3201,7 +3201,7 @@ define void @test_mm256_storeu2_m128d(do
 
 define void @test_mm256_storeu2_m128i(<2 x i64>* %a0, <2 x i64>* %a1, <4 x i64> %a2) nounwind {
 ; X32-LABEL: test_mm256_storeu2_m128i:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovups %xmm0, (%ecx)
@@ -3211,7 +3211,7 @@ define void @test_mm256_storeu2_m128i(<2
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_storeu2_m128i:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups %xmm0, (%rdi)
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-NEXT:    vmovups %xmm0, (%rsi)
@@ -3228,14 +3228,14 @@ define void @test_mm256_storeu2_m128i(<2
 
 define void @test_mm256_stream_pd(double *%a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_stream_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovntps %ymm0, (%eax)
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_stream_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovntps %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -3246,14 +3246,14 @@ define void @test_mm256_stream_pd(double
 
 define void @test_mm256_stream_ps(float *%a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_stream_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovntps %ymm0, (%eax)
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_stream_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovntps %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -3264,14 +3264,14 @@ define void @test_mm256_stream_ps(float
 
 define void @test_mm256_stream_si256(<4 x i64> *%a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_stream_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovntps %ymm0, (%eax)
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_stream_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovntps %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
@@ -3281,12 +3281,12 @@ define void @test_mm256_stream_si256(<4
 
 define <4 x double> @test_mm256_sub_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_sub_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vsubpd %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_sub_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vsubpd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = fsub <4 x double> %a0, %a1
@@ -3295,12 +3295,12 @@ define <4 x double> @test_mm256_sub_pd(<
 
 define <8 x float> @test_mm256_sub_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_sub_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vsubps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_sub_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vsubps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %res = fsub <8 x float> %a0, %a1
@@ -3309,14 +3309,14 @@ define <8 x float> @test_mm256_sub_ps(<8
 
 define i32 @test_mm_testc_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_testc_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestpd %xmm1, %xmm0
 ; X32-NEXT:    setb %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_testc_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestpd %xmm1, %xmm0
 ; X64-NEXT:    setb %al
@@ -3328,7 +3328,7 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x
 
 define i32 @test_mm256_testc_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_testc_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestpd %ymm1, %ymm0
 ; X32-NEXT:    setb %al
@@ -3336,7 +3336,7 @@ define i32 @test_mm256_testc_pd(<4 x dou
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_testc_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestpd %ymm1, %ymm0
 ; X64-NEXT:    setb %al
@@ -3349,14 +3349,14 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(
 
 define i32 @test_mm_testc_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_testc_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestps %xmm1, %xmm0
 ; X32-NEXT:    setb %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_testc_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestps %xmm1, %xmm0
 ; X64-NEXT:    setb %al
@@ -3368,7 +3368,7 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x
 
 define i32 @test_mm256_testc_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_testc_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestps %ymm1, %ymm0
 ; X32-NEXT:    setb %al
@@ -3376,7 +3376,7 @@ define i32 @test_mm256_testc_ps(<8 x flo
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_testc_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestps %ymm1, %ymm0
 ; X64-NEXT:    setb %al
@@ -3389,7 +3389,7 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(
 
 define i32 @test_mm256_testc_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_testc_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vptest %ymm1, %ymm0
 ; X32-NEXT:    setb %al
@@ -3397,7 +3397,7 @@ define i32 @test_mm256_testc_si256(<4 x
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_testc_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vptest %ymm1, %ymm0
 ; X64-NEXT:    setb %al
@@ -3410,14 +3410,14 @@ declare i32 @llvm.x86.avx.ptestc.256(<4
 
 define i32 @test_mm_testnzc_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_testnzc_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestpd %xmm1, %xmm0
 ; X32-NEXT:    seta %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_testnzc_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestpd %xmm1, %xmm0
 ; X64-NEXT:    seta %al
@@ -3429,7 +3429,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd(<2
 
 define i32 @test_mm256_testnzc_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_testnzc_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestpd %ymm1, %ymm0
 ; X32-NEXT:    seta %al
@@ -3437,7 +3437,7 @@ define i32 @test_mm256_testnzc_pd(<4 x d
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_testnzc_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestpd %ymm1, %ymm0
 ; X64-NEXT:    seta %al
@@ -3450,14 +3450,14 @@ declare i32 @llvm.x86.avx.vtestnzc.pd.25
 
 define i32 @test_mm_testnzc_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_testnzc_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestps %xmm1, %xmm0
 ; X32-NEXT:    seta %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_testnzc_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestps %xmm1, %xmm0
 ; X64-NEXT:    seta %al
@@ -3469,7 +3469,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps(<4
 
 define i32 @test_mm256_testnzc_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_testnzc_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestps %ymm1, %ymm0
 ; X32-NEXT:    seta %al
@@ -3477,7 +3477,7 @@ define i32 @test_mm256_testnzc_ps(<8 x f
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_testnzc_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestps %ymm1, %ymm0
 ; X64-NEXT:    seta %al
@@ -3490,7 +3490,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps.25
 
 define i32 @test_mm256_testnzc_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_testnzc_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vptest %ymm1, %ymm0
 ; X32-NEXT:    seta %al
@@ -3498,7 +3498,7 @@ define i32 @test_mm256_testnzc_si256(<4
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_testnzc_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vptest %ymm1, %ymm0
 ; X64-NEXT:    seta %al
@@ -3511,14 +3511,14 @@ declare i32 @llvm.x86.avx.ptestnzc.256(<
 
 define i32 @test_mm_testz_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
 ; X32-LABEL: test_mm_testz_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestpd %xmm1, %xmm0
 ; X32-NEXT:    sete %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_testz_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestpd %xmm1, %xmm0
 ; X64-NEXT:    sete %al
@@ -3530,7 +3530,7 @@ declare i32 @llvm.x86.avx.vtestz.pd(<2 x
 
 define i32 @test_mm256_testz_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_testz_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestpd %ymm1, %ymm0
 ; X32-NEXT:    sete %al
@@ -3538,7 +3538,7 @@ define i32 @test_mm256_testz_pd(<4 x dou
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_testz_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestpd %ymm1, %ymm0
 ; X64-NEXT:    sete %al
@@ -3551,14 +3551,14 @@ declare i32 @llvm.x86.avx.vtestz.pd.256(
 
 define i32 @test_mm_testz_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
 ; X32-LABEL: test_mm_testz_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestps %xmm1, %xmm0
 ; X32-NEXT:    sete %al
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_testz_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestps %xmm1, %xmm0
 ; X64-NEXT:    sete %al
@@ -3570,7 +3570,7 @@ declare i32 @llvm.x86.avx.vtestz.ps(<4 x
 
 define i32 @test_mm256_testz_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_testz_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vtestps %ymm1, %ymm0
 ; X32-NEXT:    sete %al
@@ -3578,7 +3578,7 @@ define i32 @test_mm256_testz_ps(<8 x flo
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_testz_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vtestps %ymm1, %ymm0
 ; X64-NEXT:    sete %al
@@ -3591,7 +3591,7 @@ declare i32 @llvm.x86.avx.vtestz.ps.256(
 
 define i32 @test_mm256_testz_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
 ; X32-LABEL: test_mm256_testz_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    vptest %ymm1, %ymm0
 ; X32-NEXT:    sete %al
@@ -3599,7 +3599,7 @@ define i32 @test_mm256_testz_si256(<4 x
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_testz_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    vptest %ymm1, %ymm0
 ; X64-NEXT:    sete %al
@@ -3612,56 +3612,56 @@ declare i32 @llvm.x86.avx.ptestz.256(<4
 
 define <2 x double> @test_mm_undefined_pd() nounwind {
 ; X32-LABEL: test_mm_undefined_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_undefined_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   ret <2 x double> undef
 }
 
 define <4 x double> @test_mm256_undefined_pd() nounwind {
 ; X32-LABEL: test_mm256_undefined_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_undefined_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   ret <4 x double> undef
 }
 
 define <8 x float> @test_mm256_undefined_ps() nounwind {
 ; X32-LABEL: test_mm256_undefined_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_undefined_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   ret <8 x float> undef
 }
 
 define <4 x i64> @test_mm256_undefined_si256() nounwind {
 ; X32-LABEL: test_mm256_undefined_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_undefined_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    retq
   ret <4 x i64> undef
 }
 
 define <4 x double> @test_mm256_unpackhi_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_unpackhi_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_unpackhi_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -3670,12 +3670,12 @@ define <4 x double> @test_mm256_unpackhi
 
 define <8 x float> @test_mm256_unpackhi_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_unpackhi_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_unpackhi_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -3684,12 +3684,12 @@ define <8 x float> @test_mm256_unpackhi_
 
 define <4 x double> @test_mm256_unpacklo_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_unpacklo_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_unpacklo_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
 ; X64-NEXT:    retq
   %res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -3698,12 +3698,12 @@ define <4 x double> @test_mm256_unpacklo
 
 define <8 x float> @test_mm256_unpacklo_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_unpacklo_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_unpacklo_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; X64-NEXT:    retq
   %res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -3712,12 +3712,12 @@ define <8 x float> @test_mm256_unpacklo_
 
 define <4 x double> @test_mm256_xor_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
 ; X32-LABEL: test_mm256_xor_pd:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_xor_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = bitcast <4 x double> %a0 to <4 x i64>
@@ -3729,12 +3729,12 @@ define <4 x double> @test_mm256_xor_pd(<
 
 define <8 x float> @test_mm256_xor_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
 ; X32-LABEL: test_mm256_xor_ps:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vxorps %ymm1, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_xor_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorps %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    retq
   %1 = bitcast <8 x float> %a0 to <8 x i32>
@@ -3746,12 +3746,12 @@ define <8 x float> @test_mm256_xor_ps(<8
 
 define void @test_mm256_zeroall() nounwind {
 ; X32-LABEL: test_mm256_zeroall:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vzeroall
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_zeroall:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vzeroall
 ; X64-NEXT:    retq
   call void @llvm.x86.avx.vzeroall()
@@ -3761,12 +3761,12 @@ declare void @llvm.x86.avx.vzeroall() no
 
 define void @test_mm256_zeroupper() nounwind {
 ; X32-LABEL: test_mm256_zeroupper:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_zeroupper:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
   call void @llvm.x86.avx.vzeroupper()
@@ -3776,12 +3776,12 @@ declare void @llvm.x86.avx.vzeroupper()
 
 define <4 x double> @test_mm256_zextpd128_pd256(<2 x double> %a0) nounwind {
 ; X32-LABEL: test_mm256_zextpd128_pd256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_zextpd128_pd256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = shufflevector <2 x double> %a0, <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -3790,12 +3790,12 @@ define <4 x double> @test_mm256_zextpd12
 
 define <8 x float> @test_mm256_zextps128_ps256(<4 x float> %a0) nounwind {
 ; X32-LABEL: test_mm256_zextps128_ps256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_zextps128_ps256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = shufflevector <4 x float> %a0, <4 x float> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -3804,12 +3804,12 @@ define <8 x float> @test_mm256_zextps128
 
 define <4 x i64> @test_mm256_zextsi128_si256(<2 x i64> %a0) nounwind {
 ; X32-LABEL: test_mm256_zextsi128_si256:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    vmovaps %xmm0, %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_zextsi128_si256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>

Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define <4 x double> @test_x86_avx_vinsertf128_pd_256_1(<4 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_vinsertf128_pd_256_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> %a0, <2 x double> %a1, i8 1)
@@ -16,7 +16,7 @@ declare <4 x double> @llvm.x86.avx.vinse
 
 define <8 x float> @test_x86_avx_vinsertf128_ps_256_1(<8 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_vinsertf128_ps_256_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %a0, <4 x float> %a1, i8 1)
@@ -26,7 +26,7 @@ declare <8 x float> @llvm.x86.avx.vinser
 
 define <8 x i32> @test_x86_avx_vinsertf128_si_256_1(<8 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 1)
@@ -38,7 +38,7 @@ define <8 x i32> @test_x86_avx_vinsertf1
 ; not a vinsertf128 $1.
 define <8 x i32> @test_x86_avx_vinsertf128_si_256_2(<8 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; CHECK-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; CHECK-NEXT:    ret{{[l|q]}}
@@ -51,7 +51,7 @@ declare <8 x i32> @llvm.x86.avx.vinsertf
 
 define <2 x double> @test_x86_avx_vextractf128_pd_256_1(<4 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx_vextractf128_pd_256_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
@@ -62,7 +62,7 @@ declare <2 x double> @llvm.x86.avx.vextr
 
 define <4 x float> @test_x86_avx_vextractf128_ps_256_1(<8 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx_vextractf128_ps_256_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
@@ -73,7 +73,7 @@ declare <4 x float> @llvm.x86.avx.vextra
 
 define <4 x i32> @test_x86_avx_vextractf128_si_256_1(<8 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx_vextractf128_si_256_1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
@@ -87,7 +87,7 @@ declare <4 x i32> @llvm.x86.avx.vextract
 ; not a vextractf128 of any kind.
 define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx_extractf128_pd_256_2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
@@ -98,13 +98,13 @@ define <2 x double> @test_x86_avx_extrac
 
 define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
 ; X86-LABEL: test_x86_avx_vbroadcastf128_pd_256:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X86-NEXT:    ret{{[l|q]}}
 ;
 ; X64-LABEL: test_x86_avx_vbroadcastf128_pd_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X64-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
@@ -115,13 +115,13 @@ declare <4 x double> @llvm.x86.avx.vbroa
 
 define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
 ; X86-LABEL: test_x86_avx_vbroadcastf128_ps_256:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X86-NEXT:    ret{{[l|q]}}
 ;
 ; X64-LABEL: test_x86_avx_vbroadcastf128_ps_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X64-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1]
@@ -132,7 +132,7 @@ declare <8 x float> @llvm.x86.avx.vbroad
 
 define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_blend_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 7) ; <<4 x double>> [#uses=1]
@@ -143,7 +143,7 @@ declare <4 x double> @llvm.x86.avx.blend
 
 define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_blend_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1]
@@ -154,7 +154,7 @@ declare <8 x float> @llvm.x86.avx.blend.
 
 define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_dp_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1]
@@ -165,7 +165,7 @@ declare <8 x float> @llvm.x86.avx.dp.ps.
 
 define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) {
 ; CHECK-LABEL: test_x86_sse2_psll_dq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
@@ -176,7 +176,7 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq
 
 define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) {
 ; CHECK-LABEL: test_x86_sse2_psrl_dq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
@@ -187,7 +187,7 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq
 
 define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_sse41_blendpd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i8 2) ; <<2 x double>> [#uses=1]
@@ -198,7 +198,7 @@ declare <2 x double> @llvm.x86.sse41.ble
 
 define <4 x float> @test_x86_sse41_blendps(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_x86_sse41_blendps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
@@ -209,7 +209,7 @@ declare <4 x float> @llvm.x86.sse41.blen
 
 define <8 x i16> @test_x86_sse41_pblendw(<8 x i16> %a0, <8 x i16> %a1) {
 ; CHECK-LABEL: test_x86_sse41_pblendw:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i8 7) ; <<8 x i16>> [#uses=1]
@@ -220,7 +220,7 @@ declare <8 x i16> @llvm.x86.sse41.pblend
 
 define <4 x i32> @test_x86_sse41_pmovsxbd(<16 x i8> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxbd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovsxbd %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
@@ -231,7 +231,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovsx
 
 define <2 x i64> @test_x86_sse41_pmovsxbq(<16 x i8> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxbq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovsxbq %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
@@ -242,7 +242,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovsx
 
 define <8 x i16> @test_x86_sse41_pmovsxbw(<16 x i8> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxbw:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovsxbw %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
@@ -253,7 +253,7 @@ declare <8 x i16> @llvm.x86.sse41.pmovsx
 
 define <2 x i64> @test_x86_sse41_pmovsxdq(<4 x i32> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxdq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovsxdq %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
@@ -264,7 +264,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovsx
 
 define <4 x i32> @test_x86_sse41_pmovsxwd(<8 x i16> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxwd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovsxwd %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
@@ -275,7 +275,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovsx
 
 define <2 x i64> @test_x86_sse41_pmovsxwq(<8 x i16> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxwq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovsxwq %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
@@ -286,7 +286,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovsx
 
 define <4 x i32> @test_x86_sse41_pmovzxbd(<16 x i8> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovzxbd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
@@ -297,7 +297,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovzx
 
 define <2 x i64> @test_x86_sse41_pmovzxbq(<16 x i8> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovzxbq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
@@ -308,7 +308,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovzx
 
 define <8 x i16> @test_x86_sse41_pmovzxbw(<16 x i8> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovzxbw:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
@@ -319,7 +319,7 @@ declare <8 x i16> @llvm.x86.sse41.pmovzx
 
 define <2 x i64> @test_x86_sse41_pmovzxdq(<4 x i32> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovzxdq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
@@ -330,7 +330,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovzx
 
 define <4 x i32> @test_x86_sse41_pmovzxwd(<8 x i16> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovzxwd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
@@ -341,7 +341,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovzx
 
 define <2 x i64> @test_x86_sse41_pmovzxwq(<8 x i16> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovzxwq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
@@ -352,7 +352,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovzx
 
 define <2 x double> @test_x86_sse2_cvtdq2pd(<4 x i32> %a0) {
 ; CHECK-LABEL: test_x86_sse2_cvtdq2pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtdq2pd %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32> %a0) ; <<2 x double>> [#uses=1]
@@ -363,7 +363,7 @@ declare <2 x double> @llvm.x86.sse2.cvtd
 
 define <4 x double> @test_x86_avx_cvtdq2_pd_256(<4 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx_cvtdq2_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32> %a0) ; <<4 x double>> [#uses=1]
@@ -374,7 +374,7 @@ declare <4 x double> @llvm.x86.avx.cvtdq
 
 define <2 x double> @test_x86_sse2_cvtps2pd(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_sse2_cvtps2pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtps2pd %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float> %a0) ; <<2 x double>> [#uses=1]
@@ -385,7 +385,7 @@ declare <2 x double> @llvm.x86.sse2.cvtp
 
 define <4 x double> @test_x86_avx_cvt_ps2_pd_256(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx_cvt_ps2_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtps2pd %xmm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.x86.avx.cvt.ps2.pd.256(<4 x float> %a0) ; <<4 x double>> [#uses=1]
@@ -397,7 +397,7 @@ declare <4 x double> @llvm.x86.avx.cvt.p
 define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) {
   ; add operation forces the execution domain.
 ; X86-LABEL: test_x86_sse2_storeu_dq:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
@@ -405,7 +405,7 @@ define void @test_x86_sse2_storeu_dq(i8*
 ; X86-NEXT:    ret{{[l|q]}}
 ;
 ; X64-LABEL: test_x86_sse2_storeu_dq:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    vmovdqu %xmm0, (%rdi)
@@ -420,7 +420,7 @@ declare void @llvm.x86.sse2.storeu.dq(i8
 define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
   ; fadd operation forces the execution domain.
 ; X86-LABEL: test_x86_sse2_storeu_pd:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
@@ -429,7 +429,7 @@ define void @test_x86_sse2_storeu_pd(i8*
 ; X86-NEXT:    ret{{[l|q]}}
 ;
 ; X64-LABEL: test_x86_sse2_storeu_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; X64-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
@@ -444,13 +444,13 @@ declare void @llvm.x86.sse2.storeu.pd(i8
 
 define void @test_x86_sse_storeu_ps(i8* %a0, <4 x float> %a1) {
 ; X86-LABEL: test_x86_sse_storeu_ps:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovups %xmm0, (%eax)
 ; X86-NEXT:    ret{{[l|q]}}
 ;
 ; X64-LABEL: test_x86_sse_storeu_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups %xmm0, (%rdi)
 ; X64-NEXT:    ret{{[l|q]}}
   call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1)
@@ -463,7 +463,7 @@ define void @test_x86_avx_storeu_dq_256(
   ; FIXME: unfortunately the execution domain fix pass changes this to vmovups and its hard to force with no 256-bit integer instructions
   ; add operation forces the execution domain.
 ; X86-LABEL: test_x86_avx_storeu_dq_256:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -475,7 +475,7 @@ define void @test_x86_avx_storeu_dq_256(
 ; X86-NEXT:    ret{{[l|q]}}
 ;
 ; X64-LABEL: test_x86_avx_storeu_dq_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; X64-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
@@ -494,7 +494,7 @@ declare void @llvm.x86.avx.storeu.dq.256
 define void @test_x86_avx_storeu_pd_256(i8* %a0, <4 x double> %a1) {
   ; add operation forces the execution domain.
 ; X86-LABEL: test_x86_avx_storeu_pd_256:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -503,7 +503,7 @@ define void @test_x86_avx_storeu_pd_256(
 ; X86-NEXT:    ret{{[l|q]}}
 ;
 ; X64-LABEL: test_x86_avx_storeu_pd_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; X64-NEXT:    vmovupd %ymm0, (%rdi)
@@ -518,14 +518,14 @@ declare void @llvm.x86.avx.storeu.pd.256
 
 define void @test_x86_avx_storeu_ps_256(i8* %a0, <8 x float> %a1) {
 ; X86-LABEL: test_x86_avx_storeu_ps_256:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    vmovups %ymm0, (%eax)
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    ret{{[l|q]}}
 ;
 ; X64-LABEL: test_x86_avx_storeu_ps_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovups %ymm0, (%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    ret{{[l|q]}}
@@ -537,7 +537,7 @@ declare void @llvm.x86.avx.storeu.ps.256
 
 define <2 x double> @test_x86_avx_vpermil_pd(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx_vpermil_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double> %a0, i8 1) ; <<2 x double>> [#uses=1]
@@ -548,7 +548,7 @@ declare <2 x double> @llvm.x86.avx.vperm
 
 define <4 x double> @test_x86_avx_vpermil_pd_256(<4 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx_vpermil_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double> %a0, i8 7) ; <<4 x double>> [#uses=1]
@@ -559,7 +559,7 @@ declare <4 x double> @llvm.x86.avx.vperm
 
 define <4 x float> @test_x86_avx_vpermil_ps(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx_vpermil_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,0]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float> %a0, i8 7) ; <<4 x float>> [#uses=1]
@@ -570,7 +570,7 @@ declare <4 x float> @llvm.x86.avx.vpermi
 
 define <8 x float> @test_x86_avx_vpermil_ps_256(<8 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx_vpermil_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,1,0,0,7,5,4,4]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float> %a0, i8 7) ; <<8 x float>> [#uses=1]
@@ -581,7 +581,7 @@ declare <8 x float> @llvm.x86.avx.vpermi
 
 define <4 x double> @test_x86_avx_vperm2f128_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_vperm2f128_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 3) ; <<4 x double>> [#uses=1]
@@ -592,7 +592,7 @@ declare <4 x double> @llvm.x86.avx.vperm
 
 define <8 x float> @test_x86_avx_vperm2f128_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_vperm2f128_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float> %a0, <8 x float> %a1, i8 3) ; <<8 x float>> [#uses=1]
@@ -603,7 +603,7 @@ declare <8 x float> @llvm.x86.avx.vperm2
 
 define <8 x i32> @test_x86_avx_vperm2f128_si_256(<8 x i32> %a0, <8 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx_vperm2f128_si_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32> %a0, <8 x i32> %a1, i8 3) ; <<8 x i32>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll Mon Dec  4 09:18:51 2017
@@ -6,7 +6,7 @@
 
 define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_addsub_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xd0,0xc1]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -17,7 +17,7 @@ declare <4 x double> @llvm.x86.avx.addsu
 
 define <8 x float> @test_x86_avx_addsub_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_addsub_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0xd0,0xc1]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -28,7 +28,7 @@ declare <8 x float> @llvm.x86.avx.addsub
 
 define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
 ; CHECK-LABEL: test_x86_avx_blendv_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x4b,0xc1,0x20]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) ; <<4 x double>> [#uses=1]
@@ -39,7 +39,7 @@ declare <4 x double> @llvm.x86.avx.blend
 
 define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
 ; CHECK-LABEL: test_x86_avx_blendv_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x4a,0xc1,0x20]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) ; <<8 x float>> [#uses=1]
@@ -50,7 +50,7 @@ declare <8 x float> @llvm.x86.avx.blendv
 
 define <4 x double> @test_x86_avx_cmp_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_cmp_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpordpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xc2,0xc1,0x07]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> %a0, <4 x double> %a1, i8 7) ; <<4 x double>> [#uses=1]
@@ -61,7 +61,7 @@ declare <4 x double> @llvm.x86.avx.cmp.p
 
 define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_cmp_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpordps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0xc2,0xc1,0x07]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) ; <<8 x float>> [#uses=1]
@@ -70,7 +70,7 @@ define <8 x float> @test_x86_avx_cmp_ps_
 
 define <8 x float> @test_x86_avx_cmp_ps_256_pseudo_op(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_cmp_ps_256_pseudo_op:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpeqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x00]
 ; CHECK-NEXT:    vcmpltps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x01]
 ; CHECK-NEXT:    vcmpleps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x02]
@@ -143,13 +143,13 @@ declare <8 x float> @llvm.x86.avx.cmp.ps
 
 define <4 x float> @test_x86_avx_cvt_pd2_ps_256(<4 x double> %a0) {
 ; AVX-LABEL: test_x86_avx_cvt_pd2_ps_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvtpd2ps %ymm0, %xmm0 # encoding: [0xc5,0xfd,0x5a,0xc0]
 ; AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_cvt_pd2_ps_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vcvtpd2ps %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5a,0xc0]
 ; AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
@@ -161,13 +161,13 @@ declare <4 x float> @llvm.x86.avx.cvt.pd
 
 define <4 x i32> @test_x86_avx_cvt_pd2dq_256(<4 x double> %a0) {
 ; AVX-LABEL: test_x86_avx_cvt_pd2dq_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvtpd2dq %ymm0, %xmm0 # encoding: [0xc5,0xff,0xe6,0xc0]
 ; AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_cvt_pd2dq_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vcvtpd2dq %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xff,0xe6,0xc0]
 ; AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
@@ -179,7 +179,7 @@ declare <4 x i32> @llvm.x86.avx.cvt.pd2d
 
 define <8 x i32> @test_x86_avx_cvt_ps2dq_256(<8 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx_cvt_ps2dq_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtps2dq %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5b,0xc0]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> %a0) ; <<8 x i32>> [#uses=1]
@@ -190,12 +190,12 @@ declare <8 x i32> @llvm.x86.avx.cvt.ps2d
 
 define <8 x float> @test_x86_avx_cvtdq2_ps_256(<8 x i32> %a0) {
 ; AVX-LABEL: test_x86_avx_cvtdq2_ps_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvtdq2ps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5b,0xc0]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_cvtdq2_ps_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vcvtdq2ps %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5b,0xc0]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.cvtdq2.ps.256(<8 x i32> %a0) ; <<8 x float>> [#uses=1]
@@ -206,13 +206,13 @@ declare <8 x float> @llvm.x86.avx.cvtdq2
 
 define <4 x i32> @test_x86_avx_cvtt_pd2dq_256(<4 x double> %a0) {
 ; AVX-LABEL: test_x86_avx_cvtt_pd2dq_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvttpd2dq %ymm0, %xmm0 # encoding: [0xc5,0xfd,0xe6,0xc0]
 ; AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_cvtt_pd2dq_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vcvttpd2dq %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe6,0xc0]
 ; AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
@@ -224,12 +224,12 @@ declare <4 x i32> @llvm.x86.avx.cvtt.pd2
 
 define <8 x i32> @test_x86_avx_cvtt_ps2dq_256(<8 x float> %a0) {
 ; AVX-LABEL: test_x86_avx_cvtt_ps2dq_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvttps2dq %ymm0, %ymm0 # encoding: [0xc5,0xfe,0x5b,0xc0]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_cvtt_ps2dq_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vcvttps2dq %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfe,0x5b,0xc0]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> %a0) ; <<8 x i32>> [#uses=1]
@@ -240,7 +240,7 @@ declare <8 x i32> @llvm.x86.avx.cvtt.ps2
 
 define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_dp_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x40,0xc1,0x07]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) ; <<8 x float>> [#uses=1]
@@ -251,7 +251,7 @@ declare <8 x float> @llvm.x86.avx.dp.ps.
 
 define <4 x double> @test_x86_avx_hadd_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_hadd_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x7c,0xc1]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -262,7 +262,7 @@ declare <4 x double> @llvm.x86.avx.hadd.
 
 define <8 x float> @test_x86_avx_hadd_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_hadd_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vhaddps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0x7c,0xc1]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -273,7 +273,7 @@ declare <8 x float> @llvm.x86.avx.hadd.p
 
 define <4 x double> @test_x86_avx_hsub_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_hsub_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x7d,0xc1]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -284,7 +284,7 @@ declare <4 x double> @llvm.x86.avx.hsub.
 
 define <8 x float> @test_x86_avx_hsub_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_hsub_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vhsubps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0x7d,0xc1]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -295,13 +295,13 @@ declare <8 x float> @llvm.x86.avx.hsub.p
 
 define <32 x i8> @test_x86_avx_ldu_dq_256(i8* %a0) {
 ; X86-LABEL: test_x86_avx_ldu_dq_256:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-NEXT:    vlddqu (%eax), %ymm0 # encoding: [0xc5,0xff,0xf0,0x00]
 ; X86-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx_ldu_dq_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vlddqu (%rdi), %ymm0 # encoding: [0xc5,0xff,0xf0,0x07]
 ; X64-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0) ; <<32 x i8>> [#uses=1]
@@ -312,13 +312,13 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.2
 
 define <2 x double> @test_x86_avx_maskload_pd(i8* %a0, <2 x i64> %mask) {
 ; X86-LABEL: test_x86_avx_maskload_pd:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-NEXT:    vmaskmovpd (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2d,0x00]
 ; X86-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx_maskload_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2d,0x07]
 ; X64-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %mask) ; <<2 x double>> [#uses=1]
@@ -329,13 +329,13 @@ declare <2 x double> @llvm.x86.avx.maskl
 
 define <4 x double> @test_x86_avx_maskload_pd_256(i8* %a0, <4 x i64> %mask) {
 ; X86-LABEL: test_x86_avx_maskload_pd_256:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-NEXT:    vmaskmovpd (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2d,0x00]
 ; X86-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx_maskload_pd_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2d,0x07]
 ; X64-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %mask) ; <<4 x double>> [#uses=1]
@@ -346,13 +346,13 @@ declare <4 x double> @llvm.x86.avx.maskl
 
 define <4 x float> @test_x86_avx_maskload_ps(i8* %a0, <4 x i32> %mask) {
 ; X86-LABEL: test_x86_avx_maskload_ps:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-NEXT:    vmaskmovps (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2c,0x00]
 ; X86-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx_maskload_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2c,0x07]
 ; X64-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %mask) ; <<4 x float>> [#uses=1]
@@ -363,13 +363,13 @@ declare <4 x float> @llvm.x86.avx.masklo
 
 define <8 x float> @test_x86_avx_maskload_ps_256(i8* %a0, <8 x i32> %mask) {
 ; X86-LABEL: test_x86_avx_maskload_ps_256:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-NEXT:    vmaskmovps (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2c,0x00]
 ; X86-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx_maskload_ps_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2c,0x07]
 ; X64-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %mask) ; <<8 x float>> [#uses=1]
@@ -380,13 +380,13 @@ declare <8 x float> @llvm.x86.avx.masklo
 
 define void @test_x86_avx_maskstore_pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2) {
 ; X86-LABEL: test_x86_avx_maskstore_pd:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-NEXT:    vmaskmovpd %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0x79,0x2f,0x08]
 ; X86-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx_maskstore_pd:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # encoding: [0xc4,0xe2,0x79,0x2f,0x0f]
 ; X64-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2)
@@ -397,14 +397,14 @@ declare void @llvm.x86.avx.maskstore.pd(
 
 define void @test_x86_avx_maskstore_pd_256(i8* %a0, <4 x i64> %mask, <4 x double> %a2) {
 ; X86-LABEL: test_x86_avx_maskstore_pd_256:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-NEXT:    vmaskmovpd %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0x7d,0x2f,0x08]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx_maskstore_pd_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # encoding: [0xc4,0xe2,0x7d,0x2f,0x0f]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
@@ -416,13 +416,13 @@ declare void @llvm.x86.avx.maskstore.pd.
 
 define void @test_x86_avx_maskstore_ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2) {
 ; X86-LABEL: test_x86_avx_maskstore_ps:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-NEXT:    vmaskmovps %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0x79,0x2e,0x08]
 ; X86-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx_maskstore_ps:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # encoding: [0xc4,0xe2,0x79,0x2e,0x0f]
 ; X64-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2)
@@ -433,14 +433,14 @@ declare void @llvm.x86.avx.maskstore.ps(
 
 define void @test_x86_avx_maskstore_ps_256(i8* %a0, <8 x i32> %mask, <8 x float> %a2) {
 ; X86-LABEL: test_x86_avx_maskstore_ps_256:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-NEXT:    vmaskmovps %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0x7d,0x2e,0x08]
 ; X86-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-LABEL: test_x86_avx_maskstore_ps_256:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # encoding: [0xc4,0xe2,0x7d,0x2e,0x0f]
 ; X64-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
@@ -452,12 +452,12 @@ declare void @llvm.x86.avx.maskstore.ps.
 
 define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; AVX-LABEL: test_x86_avx_max_pd_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5f,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_max_pd_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5f,0xc1]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -468,12 +468,12 @@ declare <4 x double> @llvm.x86.avx.max.p
 
 define <8 x float> @test_x86_avx_max_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; AVX-LABEL: test_x86_avx_max_ps_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vmaxps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5f,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_max_ps_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmaxps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5f,0xc1]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -484,12 +484,12 @@ declare <8 x float> @llvm.x86.avx.max.ps
 
 define <4 x double> @test_x86_avx_min_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; AVX-LABEL: test_x86_avx_min_pd_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vminpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5d,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_min_pd_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vminpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5d,0xc1]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -500,12 +500,12 @@ declare <4 x double> @llvm.x86.avx.min.p
 
 define <8 x float> @test_x86_avx_min_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; AVX-LABEL: test_x86_avx_min_ps_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vminps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5d,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_min_ps_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vminps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5d,0xc1]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -516,7 +516,7 @@ declare <8 x float> @llvm.x86.avx.min.ps
 
 define i32 @test_x86_avx_movmsk_pd_256(<4 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx_movmsk_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovmskpd %ymm0, %eax # encoding: [0xc5,0xfd,0x50,0xc0]
 ; CHECK-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
@@ -528,7 +528,7 @@ declare i32 @llvm.x86.avx.movmsk.pd.256(
 
 define i32 @test_x86_avx_movmsk_ps_256(<8 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx_movmsk_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovmskps %ymm0, %eax # encoding: [0xc5,0xfc,0x50,0xc0]
 ; CHECK-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
@@ -540,7 +540,7 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(
 
 define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx_ptestc_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
 ; CHECK-NEXT:    setb %al # encoding: [0x0f,0x92,0xc0]
@@ -554,7 +554,7 @@ declare i32 @llvm.x86.avx.ptestc.256(<4
 
 define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx_ptestnzc_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
 ; CHECK-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
@@ -568,7 +568,7 @@ declare i32 @llvm.x86.avx.ptestnzc.256(<
 
 define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx_ptestz_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
 ; CHECK-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
@@ -582,7 +582,7 @@ declare i32 @llvm.x86.avx.ptestz.256(<4
 
 define <8 x float> @test_x86_avx_rcp_ps_256(<8 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx_rcp_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrcpps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x53,0xc0]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1]
@@ -593,12 +593,12 @@ declare <8 x float> @llvm.x86.avx.rcp.ps
 
 define <4 x double> @test_x86_avx_round_pd_256(<4 x double> %a0) {
 ; AVX-LABEL: test_x86_avx_round_pd_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vroundpd $7, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x09,0xc0,0x07]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_round_pd_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vrndscalepd $7, %ymm0, %ymm0 # encoding: [0x62,0xf3,0xfd,0x28,0x09,0xc0,0x07]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7) ; <<4 x double>> [#uses=1]
@@ -609,12 +609,12 @@ declare <4 x double> @llvm.x86.avx.round
 
 define <8 x float> @test_x86_avx_round_ps_256(<8 x float> %a0) {
 ; AVX-LABEL: test_x86_avx_round_ps_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vroundps $7, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x08,0xc0,0x07]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_round_ps_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vrndscaleps $7, %ymm0, %ymm0 # encoding: [0x62,0xf3,0x7d,0x28,0x08,0xc0,0x07]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7) ; <<8 x float>> [#uses=1]
@@ -625,7 +625,7 @@ declare <8 x float> @llvm.x86.avx.round.
 
 define <8 x float> @test_x86_avx_rsqrt_ps_256(<8 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx_rsqrt_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrsqrtps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x52,0xc0]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1]
@@ -636,12 +636,12 @@ declare <8 x float> @llvm.x86.avx.rsqrt.
 
 define <4 x double> @test_x86_avx_sqrt_pd_256(<4 x double> %a0) {
 ; AVX-LABEL: test_x86_avx_sqrt_pd_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtpd %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x51,0xc0]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_sqrt_pd_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vsqrtpd %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x51,0xc0]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0) ; <<4 x double>> [#uses=1]
@@ -652,12 +652,12 @@ declare <4 x double> @llvm.x86.avx.sqrt.
 
 define <8 x float> @test_x86_avx_sqrt_ps_256(<8 x float> %a0) {
 ; AVX-LABEL: test_x86_avx_sqrt_ps_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vsqrtps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x51,0xc0]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_sqrt_ps_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vsqrtps %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x51,0xc0]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1]
@@ -668,12 +668,12 @@ declare <8 x float> @llvm.x86.avx.sqrt.p
 
 define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1) {
 ; AVX-LABEL: test_x86_avx_vpermilvar_pd:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0d,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0d,0xc1]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1) ; <<2 x double>> [#uses=1]
@@ -684,12 +684,12 @@ declare <2 x double> @llvm.x86.avx.vperm
 
 define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> %a1) {
 ; AVX-LABEL: test_x86_avx_vpermilvar_pd_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0d,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0d,0xc1]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1) ; <<4 x double>> [#uses=1]
@@ -699,13 +699,13 @@ declare <4 x double> @llvm.x86.avx.vperm
 
 define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) {
 ; AVX-LABEL: test_x86_avx_vpermilvar_pd_256_2:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd $9, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x09]
 ; AVX-NEXT:    # ymm0 = ymm0[1,0,2,3]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd_256_2:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpermilpd $9, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x09]
 ; AVX512VL-NEXT:    # ymm0 = ymm0[1,0,2,3]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
@@ -715,12 +715,12 @@ define <4 x double> @test_x86_avx_vpermi
 
 define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) {
 ; AVX-LABEL: test_x86_avx_vpermilvar_ps:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_vpermilvar_ps:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpermilps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0xc1]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1) ; <<4 x float>> [#uses=1]
@@ -728,24 +728,24 @@ define <4 x float> @test_x86_avx_vpermil
 }
 define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, <4 x i32>* %a1) {
 ; X86-AVX-LABEL: test_x86_avx_vpermilvar_ps_load:
-; X86-AVX:       # BB#0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-AVX-NEXT:    vpermilps (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0x00]
 ; X86-AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_load:
-; X86-AVX512VL:       # BB#0:
+; X86-AVX512VL:       # %bb.0:
 ; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-AVX512VL-NEXT:    vpermilps (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0x00]
 ; X86-AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: test_x86_avx_vpermilvar_ps_load:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpermilps (%rdi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0x07]
 ; X64-AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_load:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vpermilps (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0x07]
 ; X64-AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %a2 = load <4 x i32>, <4 x i32>* %a1
@@ -757,12 +757,12 @@ declare <4 x float> @llvm.x86.avx.vpermi
 
 define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a1) {
 ; AVX-LABEL: test_x86_avx_vpermilvar_ps_256:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilps %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0c,0xc1]
 ; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_256:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpermilps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0c,0xc1]
 ; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1) ; <<8 x float>> [#uses=1]
@@ -773,7 +773,7 @@ declare <8 x float> @llvm.x86.avx.vpermi
 
 define i32 @test_x86_avx_vtestc_pd(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestc_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1]
 ; CHECK-NEXT:    setb %al # encoding: [0x0f,0x92,0xc0]
@@ -786,7 +786,7 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x
 
 define i32 @test_x86_avx_vtestc_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestc_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
 ; CHECK-NEXT:    setb %al # encoding: [0x0f,0x92,0xc0]
@@ -800,7 +800,7 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(
 
 define i32 @test_x86_avx_vtestc_ps(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestc_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1]
 ; CHECK-NEXT:    setb %al # encoding: [0x0f,0x92,0xc0]
@@ -813,7 +813,7 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x
 
 define i32 @test_x86_avx_vtestc_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestc_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
 ; CHECK-NEXT:    setb %al # encoding: [0x0f,0x92,0xc0]
@@ -827,7 +827,7 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(
 
 define i32 @test_x86_avx_vtestnzc_pd(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestnzc_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1]
 ; CHECK-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
@@ -840,7 +840,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd(<2
 
 define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestnzc_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
 ; CHECK-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
@@ -854,7 +854,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd.25
 
 define i32 @test_x86_avx_vtestnzc_ps(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestnzc_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1]
 ; CHECK-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
@@ -867,7 +867,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps(<4
 
 define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestnzc_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
 ; CHECK-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
@@ -881,7 +881,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps.25
 
 define i32 @test_x86_avx_vtestz_pd(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestz_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1]
 ; CHECK-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
@@ -894,7 +894,7 @@ declare i32 @llvm.x86.avx.vtestz.pd(<2 x
 
 define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestz_pd_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
 ; CHECK-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
@@ -908,7 +908,7 @@ declare i32 @llvm.x86.avx.vtestz.pd.256(
 
 define i32 @test_x86_avx_vtestz_ps(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestz_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1]
 ; CHECK-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
@@ -921,7 +921,7 @@ declare i32 @llvm.x86.avx.vtestz.ps(<4 x
 
 define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx_vtestz_ps_256:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK-NEXT:    vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
 ; CHECK-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
@@ -935,7 +935,7 @@ declare i32 @llvm.x86.avx.vtestz.ps.256(
 
 define void @test_x86_avx_vzeroall() {
 ; CHECK-LABEL: test_x86_avx_vzeroall:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vzeroall # encoding: [0xc5,0xfc,0x77]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   call void @llvm.x86.avx.vzeroall()
@@ -946,7 +946,7 @@ declare void @llvm.x86.avx.vzeroall() no
 
 define void @test_x86_avx_vzeroupper() {
 ; CHECK-LABEL: test_x86_avx_vzeroupper:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   call void @llvm.x86.avx.vzeroupper()
@@ -956,7 +956,7 @@ declare void @llvm.x86.avx.vzeroupper()
 
 define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
 ; X86-AVX-LABEL: movnt_dq:
-; X86-AVX:       # BB#0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
 ; X86-AVX-NEXT:    vpsubq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfb,0xc1]
@@ -965,7 +965,7 @@ define void @movnt_dq(i8* %p, <2 x i64>
 ; X86-AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: movnt_dq:
-; X86-AVX512VL:       # BB#0:
+; X86-AVX512VL:       # %bb.0:
 ; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
 ; X86-AVX512VL-NEXT:    vpsubq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1]
@@ -974,7 +974,7 @@ define void @movnt_dq(i8* %p, <2 x i64>
 ; X86-AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: movnt_dq:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
 ; X64-AVX-NEXT:    vpsubq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfb,0xc1]
 ; X64-AVX-NEXT:    vmovntdq %ymm0, (%rdi) # encoding: [0xc5,0xfd,0xe7,0x07]
@@ -982,7 +982,7 @@ define void @movnt_dq(i8* %p, <2 x i64>
 ; X64-AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: movnt_dq:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9]
 ; X64-AVX512VL-NEXT:    vpsubq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1]
 ; X64-AVX512VL-NEXT:    vmovntdq %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x07]
@@ -997,27 +997,27 @@ declare void @llvm.x86.avx.movnt.dq.256(
 
 define void @movnt_ps(i8* %p, <8 x float> %a) nounwind {
 ; X86-AVX-LABEL: movnt_ps:
-; X86-AVX:       # BB#0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-AVX-NEXT:    vmovntps %ymm0, (%eax) # encoding: [0xc5,0xfc,0x2b,0x00]
 ; X86-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: movnt_ps:
-; X86-AVX512VL:       # BB#0:
+; X86-AVX512VL:       # %bb.0:
 ; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-AVX512VL-NEXT:    vmovntps %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x00]
 ; X86-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X86-AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: movnt_ps:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vmovntps %ymm0, (%rdi) # encoding: [0xc5,0xfc,0x2b,0x07]
 ; X64-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: movnt_ps:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vmovntps %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x07]
 ; X64-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
 ; X64-AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
@@ -1029,7 +1029,7 @@ declare void @llvm.x86.avx.movnt.ps.256(
 define void @movnt_pd(i8* %p, <4 x double> %a1) nounwind {
   ; add operation forces the execution domain.
 ; X86-AVX-LABEL: movnt_pd:
-; X86-AVX:       # BB#0:
+; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
 ; X86-AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1]
@@ -1038,7 +1038,7 @@ define void @movnt_pd(i8* %p, <4 x doubl
 ; X86-AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X86-AVX512VL-LABEL: movnt_pd:
-; X86-AVX512VL:       # BB#0:
+; X86-AVX512VL:       # %bb.0:
 ; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
 ; X86-AVX512VL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
 ; X86-AVX512VL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
@@ -1047,7 +1047,7 @@ define void @movnt_pd(i8* %p, <4 x doubl
 ; X86-AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-AVX-LABEL: movnt_pd:
-; X64-AVX:       # BB#0:
+; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
 ; X64-AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1]
 ; X64-AVX-NEXT:    vmovntpd %ymm0, (%rdi) # encoding: [0xc5,0xfd,0x2b,0x07]
@@ -1055,7 +1055,7 @@ define void @movnt_pd(i8* %p, <4 x doubl
 ; X64-AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; X64-AVX512VL-LABEL: movnt_pd:
-; X64-AVX512VL:       # BB#0:
+; X64-AVX512VL:       # %bb.0:
 ; X64-AVX512VL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
 ; X64-AVX512VL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
 ; X64-AVX512VL-NEXT:    vmovntpd %ymm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x2b,0x07]
@@ -1071,7 +1071,7 @@ declare void @llvm.x86.avx.movnt.pd.256(
 ; Check for pclmulqdq
 define <2 x i64> @test_x86_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: test_x86_pclmulqdq:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x44,0xc1,0x00]
 ; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
   %res = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0) ; <<2 x i64>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86_64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86_64.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) {
 ; AVX-LABEL: test_x86_avx_vzeroall:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vmovupd %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill
 ; AVX-NEXT:    vzeroall
@@ -12,7 +12,7 @@ define <4 x double> @test_x86_avx_vzeroa
 ; AVX-NEXT:    ret{{[l|q]}}
 ;
 ; AVX512VL-LABEL: test_x86_avx_vzeroall:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vaddpd %ymm1, %ymm0, %ymm16
 ; AVX512VL-NEXT:    vzeroall
 ; AVX512VL-NEXT:    vmovapd %ymm16, %ymm0
@@ -25,7 +25,7 @@ declare void @llvm.x86.avx.vzeroall() no
 
 define <4 x double> @test_x86_avx_vzeroupper(<4 x double> %a, <4 x double> %b) {
 ; AVX-LABEL: test_x86_avx_vzeroupper:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    vmovupd %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill
 ; AVX-NEXT:    vzeroupper
@@ -33,7 +33,7 @@ define <4 x double> @test_x86_avx_vzerou
 ; AVX-NEXT:    ret{{[l|q]}}
 ;
 ; AVX512VL-LABEL: test_x86_avx_vzeroupper:
-; AVX512VL:       # BB#0:
+; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vaddpd %ymm1, %ymm0, %ymm16
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    vmovapd %ymm16, %ymm0

Modified: llvm/trunk/test/CodeGen/X86/avx-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-load-store.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-load-store.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind {
 ; CHECK-LABEL: test_256_load:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushq %r15
 ; CHECK-NEXT:    pushq %r14
 ; CHECK-NEXT:    pushq %rbx
@@ -33,7 +33,7 @@ define void @test_256_load(double* nocap
 ; CHECK-NEXT:    retq
 ;
 ; CHECK_O0-LABEL: test_256_load:
-; CHECK_O0:       # BB#0: # %entry
+; CHECK_O0:       # %bb.0: # %entry
 ; CHECK_O0-NEXT:    subq $152, %rsp
 ; CHECK_O0-NEXT:    vmovapd (%rdi), %ymm0
 ; CHECK_O0-NEXT:    vmovaps (%rsi), %ymm1
@@ -78,12 +78,12 @@ declare void @dummy(<4 x double>, <8 x f
 
 define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
 ; CHECK-LABEL: mov00:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    retq
 ;
 ; CHECK_O0-LABEL: mov00:
-; CHECK_O0:       # BB#0:
+; CHECK_O0:       # %bb.0:
 ; CHECK_O0-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK_O0-NEXT:    # implicit-def: %ymm1
 ; CHECK_O0-NEXT:    vmovaps %xmm0, %xmm1
@@ -97,12 +97,12 @@ define <8 x float> @mov00(<8 x float> %v
 
 define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
 ; CHECK-LABEL: mov01:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    retq
 ;
 ; CHECK_O0-LABEL: mov01:
-; CHECK_O0:       # BB#0:
+; CHECK_O0:       # %bb.0:
 ; CHECK_O0-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK_O0-NEXT:    # implicit-def: %ymm1
 ; CHECK_O0-NEXT:    vmovaps %xmm0, %xmm1
@@ -116,11 +116,11 @@ define <4 x double> @mov01(<4 x double>
 
 define void @storev16i16(<16 x i16> %a) nounwind {
 ; CHECK-LABEL: storev16i16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps %ymm0, (%rax)
 ;
 ; CHECK_O0-LABEL: storev16i16:
-; CHECK_O0:       # BB#0:
+; CHECK_O0:       # %bb.0:
 ; CHECK_O0-NEXT:    # implicit-def: %rax
 ; CHECK_O0-NEXT:    vmovdqa %ymm0, (%rax)
   store <16 x i16> %a, <16 x i16>* undef, align 32
@@ -129,12 +129,12 @@ define void @storev16i16(<16 x i16> %a)
 
 define void @storev16i16_01(<16 x i16> %a) nounwind {
 ; CHECK-LABEL: storev16i16_01:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rax)
 ; CHECK-NEXT:    vmovups %xmm0, (%rax)
 ;
 ; CHECK_O0-LABEL: storev16i16_01:
-; CHECK_O0:       # BB#0:
+; CHECK_O0:       # %bb.0:
 ; CHECK_O0-NEXT:    # implicit-def: %rax
 ; CHECK_O0-NEXT:    vmovdqu %ymm0, (%rax)
   store <16 x i16> %a, <16 x i16>* undef, align 4
@@ -143,11 +143,11 @@ define void @storev16i16_01(<16 x i16> %
 
 define void @storev32i8(<32 x i8> %a) nounwind {
 ; CHECK-LABEL: storev32i8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps %ymm0, (%rax)
 ;
 ; CHECK_O0-LABEL: storev32i8:
-; CHECK_O0:       # BB#0:
+; CHECK_O0:       # %bb.0:
 ; CHECK_O0-NEXT:    # implicit-def: %rax
 ; CHECK_O0-NEXT:    vmovdqa %ymm0, (%rax)
   store <32 x i8> %a, <32 x i8>* undef, align 32
@@ -156,12 +156,12 @@ define void @storev32i8(<32 x i8> %a) no
 
 define void @storev32i8_01(<32 x i8> %a) nounwind {
 ; CHECK-LABEL: storev32i8_01:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rax)
 ; CHECK-NEXT:    vmovups %xmm0, (%rax)
 ;
 ; CHECK_O0-LABEL: storev32i8_01:
-; CHECK_O0:       # BB#0:
+; CHECK_O0:       # %bb.0:
 ; CHECK_O0-NEXT:    # implicit-def: %rax
 ; CHECK_O0-NEXT:    vmovdqu %ymm0, (%rax)
   store <32 x i8> %a, <32 x i8>* undef, align 4
@@ -172,13 +172,13 @@ define void @storev32i8_01(<32 x i8> %a)
 ; example, after making an integer operation.
 define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
 ; CHECK-LABEL: double_save:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps %xmm1, 16(%rdi)
 ; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
 ; CHECK-NEXT:    retq
 ;
 ; CHECK_O0-LABEL: double_save:
-; CHECK_O0:       # BB#0:
+; CHECK_O0:       # %bb.0:
 ; CHECK_O0-NEXT:    # implicit-def: %ymm2
 ; CHECK_O0-NEXT:    vmovaps %xmm0, %xmm2
 ; CHECK_O0-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm2
@@ -194,23 +194,23 @@ declare void @llvm.x86.avx.maskstore.ps.
 
 define void @f_f() nounwind {
 ; CHECK-LABEL: f_f:
-; CHECK:       # BB#0: # %allocas
+; CHECK:       # %bb.0: # %allocas
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jne .LBB8_2
-; CHECK-NEXT:  # BB#1: # %cif_mask_all
+; CHECK-NEXT:  # %bb.1: # %cif_mask_all
 ; CHECK-NEXT:  .LBB8_2: # %cif_mask_mixed
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jne .LBB8_4
-; CHECK-NEXT:  # BB#3: # %cif_mixed_test_all
+; CHECK-NEXT:  # %bb.3: # %cif_mixed_test_all
 ; CHECK-NEXT:    movl $-1, %eax
 ; CHECK-NEXT:    vmovd %eax, %xmm0
 ; CHECK-NEXT:    vmaskmovps %ymm0, %ymm0, (%rax)
 ; CHECK-NEXT:  .LBB8_4: # %cif_mixed_test_any_check
 ;
 ; CHECK_O0-LABEL: f_f:
-; CHECK_O0:       # BB#0: # %allocas
+; CHECK_O0:       # %bb.0: # %allocas
 ; CHECK_O0-NEXT:    # implicit-def: %al
 ; CHECK_O0-NEXT:    testb $1, %al
 ; CHECK_O0-NEXT:    jne .LBB8_1
@@ -248,7 +248,7 @@ cif_mixed_test_any_check:
 
 define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
 ; CHECK-LABEL: add8i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovups (%rsi), %xmm0
 ; CHECK-NEXT:    vmovups 16(%rsi), %xmm1
 ; CHECK-NEXT:    vmovups %xmm1, 16(%rdi)
@@ -256,7 +256,7 @@ define void @add8i32(<8 x i32>* %ret, <8
 ; CHECK-NEXT:    retq
 ;
 ; CHECK_O0-LABEL: add8i32:
-; CHECK_O0:       # BB#0:
+; CHECK_O0:       # %bb.0:
 ; CHECK_O0-NEXT:    vmovdqu (%rsi), %xmm0
 ; CHECK_O0-NEXT:    vmovdqu 16(%rsi), %xmm1
 ; CHECK_O0-NEXT:    # implicit-def: %ymm2
@@ -273,14 +273,14 @@ define void @add8i32(<8 x i32>* %ret, <8
 
 define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
 ; CHECK-LABEL: add4i64a64:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps (%rsi), %ymm0
 ; CHECK-NEXT:    vmovaps %ymm0, (%rdi)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
 ;
 ; CHECK_O0-LABEL: add4i64a64:
-; CHECK_O0:       # BB#0:
+; CHECK_O0:       # %bb.0:
 ; CHECK_O0-NEXT:    vmovaps (%rsi), %ymm0
 ; CHECK_O0-NEXT:    vmovdqa %ymm0, (%rdi)
 ; CHECK_O0-NEXT:    vzeroupper
@@ -293,7 +293,7 @@ define void @add4i64a64(<4 x i64>* %ret,
 
 define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
 ; CHECK-LABEL: add4i64a16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps (%rsi), %xmm0
 ; CHECK-NEXT:    vmovaps 16(%rsi), %xmm1
 ; CHECK-NEXT:    vmovaps %xmm1, 16(%rdi)
@@ -301,7 +301,7 @@ define void @add4i64a16(<4 x i64>* %ret,
 ; CHECK-NEXT:    retq
 ;
 ; CHECK_O0-LABEL: add4i64a16:
-; CHECK_O0:       # BB#0:
+; CHECK_O0:       # %bb.0:
 ; CHECK_O0-NEXT:    vmovdqa (%rsi), %xmm0
 ; CHECK_O0-NEXT:    vmovdqa 16(%rsi), %xmm1
 ; CHECK_O0-NEXT:    # implicit-def: %ymm2

Modified: llvm/trunk/test/CodeGen/X86/avx-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-logic.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-logic.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <4 x double> @andpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: andpd256:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vandpd %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -21,7 +21,7 @@ entry:
 
 define <4 x double> @andpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: andpd256fold:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vandpd {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -37,7 +37,7 @@ entry:
 
 define <8 x float> @andps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: andps256:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vandps %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -50,7 +50,7 @@ entry:
 
 define <8 x float> @andps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: andps256fold:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -62,7 +62,7 @@ entry:
 
 define <4 x double> @xorpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: xorpd256:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vxorpd %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -79,7 +79,7 @@ entry:
 
 define <4 x double> @xorpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: xorpd256fold:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vxorpd {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -95,7 +95,7 @@ entry:
 
 define <8 x float> @xorps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: xorps256:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vxorps %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -108,7 +108,7 @@ entry:
 
 define <8 x float> @xorps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: xorps256fold:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vxorps {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -120,7 +120,7 @@ entry:
 
 define <4 x double> @orpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: orpd256:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vorpd %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -137,7 +137,7 @@ entry:
 
 define <4 x double> @orpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: orpd256fold:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vorpd {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -153,7 +153,7 @@ entry:
 
 define <8 x float> @orps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: orps256:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vorps %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -166,7 +166,7 @@ entry:
 
 define <8 x float> @orps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: orps256fold:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vorps {{.*}}(%rip), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -178,7 +178,7 @@ entry:
 
 define <4 x double> @andnotpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: andnotpd256:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vandnpd %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -196,7 +196,7 @@ entry:
 
 define <4 x double> @andnotpd256fold(<4 x double> %y, <4 x double>* nocapture %x) nounwind uwtable readonly ssp {
 ; CHECK-LABEL: andnotpd256fold:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vandnpd (%rdi), %ymm0, %ymm0
 ; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -215,7 +215,7 @@ entry:
 
 define <8 x float> @andnotps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: andnotps256:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vandnps %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -229,7 +229,7 @@ entry:
 
 define <8 x float> @andnotps256fold(<8 x float> %y, <8 x float>* nocapture %x) nounwind uwtable readonly ssp {
 ; CHECK-LABEL: andnotps256fold:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vandnps (%rdi), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
 entry:
@@ -246,7 +246,7 @@ entry:
 
 define <2 x i64> @vpandn(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: vpandn:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
 ; CHECK-NEXT:    vpandn %xmm0, %xmm1, %xmm0
@@ -261,7 +261,7 @@ entry:
 
 define <2 x i64> @vpand(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: vpand:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    vpand %xmm1, %xmm0, %xmm0
@@ -275,12 +275,12 @@ entry:
 
 define <4 x i32> @and_xor_splat1_v4i32(<4 x i32> %x) nounwind {
 ; AVX-LABEL: and_xor_splat1_v4i32:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandnps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: and_xor_splat1_v4i32:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
 ; AVX512-NEXT:    vandnps %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
@@ -291,12 +291,12 @@ define <4 x i32> @and_xor_splat1_v4i32(<
 
 define <4 x i64> @and_xor_splat1_v4i64(<4 x i64> %x) nounwind {
 ; AVX-LABEL: and_xor_splat1_v4i64:
-; AVX:       # BB#0:
+; AVX:       # %bb.0:
 ; AVX-NEXT:    vandnps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: and_xor_splat1_v4i64:
-; AVX512:       # BB#0:
+; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
 ; AVX512-NEXT:    vandnps %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-schedule.ll Mon Dec  4 09:18:51 2017
@@ -11,49 +11,49 @@
 
 define <4 x double> @test_addpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_addpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_addpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_addpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vaddpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_addpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vaddpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_addpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vaddpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_addpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vaddpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_addpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_addpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -65,49 +65,49 @@ define <4 x double> @test_addpd(<4 x dou
 
 define <8 x float> @test_addps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_addps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_addps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_addps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vaddps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_addps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vaddps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_addps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vaddps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_addps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vaddps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_addps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_addps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -119,49 +119,49 @@ define <8 x float> @test_addps(<8 x floa
 
 define <4 x double> @test_addsubpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_addsubpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_addsubpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_addsubpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_addsubpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_addsubpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_addsubpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_addsubpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_addsubpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -174,49 +174,49 @@ declare <4 x double> @llvm.x86.avx.addsu
 
 define <8 x float> @test_addsubps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_addsubps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_addsubps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_addsubps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vaddsubps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_addsubps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vaddsubps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_addsubps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vaddsubps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_addsubps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vaddsubps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_addsubps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_addsubps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -229,56 +229,56 @@ declare <8 x float> @llvm.x86.avx.addsub
 
 define <4 x double> @test_andnotpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_andnotpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; GENERIC-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_andnotpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_andnotpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vandnpd (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_andnotpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vandnpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_andnotpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKYLAKE-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_andnotpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKX-NEXT:    vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKX-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_andnotpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vandnpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_andnotpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -298,56 +298,56 @@ define <4 x double> @test_andnotpd(<4 x
 
 define <8 x float> @test_andnotps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_andnotps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vandnps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vandnps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; GENERIC-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_andnotps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vandnps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vandnps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_andnotps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vandnps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vandnps (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_andnotps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vandnps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vandnps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_andnotps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKYLAKE-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_andnotps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKX-NEXT:    vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKX-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_andnotps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vandnps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_andnotps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -367,56 +367,56 @@ define <8 x float> @test_andnotps(<8 x f
 
 define <4 x double> @test_andpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_andpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vandpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vandpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; GENERIC-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_andpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vandpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vandpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_andpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vandpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vandpd (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_andpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vandpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vandpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_andpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKYLAKE-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_andpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKX-NEXT:    vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKX-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_andpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vandpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_andpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -434,56 +434,56 @@ define <4 x double> @test_andpd(<4 x dou
 
 define <8 x float> @test_andps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_andps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vandps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vandps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; GENERIC-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_andps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vandps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vandps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_andps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vandps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vandps (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_andps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vandps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vandps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_andps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKYLAKE-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_andps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKX-NEXT:    vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKX-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_andps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vandps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_andps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -501,56 +501,56 @@ define <8 x float> @test_andps(<8 x floa
 
 define <4 x double> @test_blendpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_blendpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
 ; GENERIC-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_blendpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
 ; SANDY-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_blendpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.33]
 ; HASWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_blendpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.33]
 ; BROADWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [7:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_blendpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.33]
 ; SKYLAKE-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_blendpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.33]
 ; SKX-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_blendpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
 ; BTVER2-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_blendpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
@@ -564,49 +564,49 @@ define <4 x double> @test_blendpd(<4 x d
 
 define <8 x float> @test_blendps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_blendps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
 ; GENERIC-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_blendps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
 ; SANDY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_blendps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.33]
 ; HASWELL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_blendps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.33]
 ; BROADWELL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [7:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_blendps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.33]
 ; SKYLAKE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_blendps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.33]
 ; SKX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_blendps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
 ; BTVER2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_blendps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
 ; ZNVER1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -618,49 +618,49 @@ define <8 x float> @test_blendps(<8 x fl
 
 define <4 x double> @test_blendvpd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) {
 ; GENERIC-LABEL: test_blendvpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
 ; GENERIC-NEXT:    vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_blendvpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
 ; SANDY-NEXT:    vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_blendvpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
 ; HASWELL-NEXT:    vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [2:2.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_blendvpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
 ; BROADWELL-NEXT:    vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_blendvpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
 ; SKYLAKE-NEXT:    vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_blendvpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
 ; SKX-NEXT:    vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_blendvpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00]
 ; BTVER2-NEXT:    vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_blendvpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -673,49 +673,49 @@ declare <4 x double> @llvm.x86.avx.blend
 
 define <8 x float> @test_blendvps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) {
 ; GENERIC-LABEL: test_blendvps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
 ; GENERIC-NEXT:    vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_blendvps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
 ; SANDY-NEXT:    vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_blendvps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
 ; HASWELL-NEXT:    vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [2:2.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_blendvps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
 ; BROADWELL-NEXT:    vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_blendvps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
 ; SKYLAKE-NEXT:    vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_blendvps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
 ; SKX-NEXT:    vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_blendvps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00]
 ; BTVER2-NEXT:    vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_blendvps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -728,42 +728,42 @@ declare <8 x float> @llvm.x86.avx.blendv
 
 define <8 x float> @test_broadcastf128(<4 x float> *%a0) {
 ; GENERIC-LABEL: test_broadcastf128:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_broadcastf128:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_broadcastf128:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_broadcastf128:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [6:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_broadcastf128:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [7:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_broadcastf128:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [7:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_broadcastf128:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_broadcastf128:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x float>, <4 x float> *%a0, align 32
@@ -773,42 +773,42 @@ define <8 x float> @test_broadcastf128(<
 
 define <4 x double> @test_broadcastsd_ymm(double *%a0) {
 ; GENERIC-LABEL: test_broadcastsd_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastsd (%rdi), %ymm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_broadcastsd_ymm:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vbroadcastsd (%rdi), %ymm0 # sched: [7:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_broadcastsd_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vbroadcastsd (%rdi), %ymm0 # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_broadcastsd_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vbroadcastsd (%rdi), %ymm0 # sched: [6:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_broadcastsd_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vbroadcastsd (%rdi), %ymm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_broadcastsd_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastsd (%rdi), %ymm0 # sched: [7:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_broadcastsd_ymm:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vbroadcastsd (%rdi), %ymm0 # sched: [6:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_broadcastsd_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vbroadcastsd (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load double, double *%a0, align 8
@@ -819,42 +819,42 @@ define <4 x double> @test_broadcastsd_ym
 
 define <4 x float> @test_broadcastss(float *%a0) {
 ; GENERIC-LABEL: test_broadcastss:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastss (%rdi), %xmm0 # sched: [6:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_broadcastss:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vbroadcastss (%rdi), %xmm0 # sched: [6:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_broadcastss:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vbroadcastss (%rdi), %xmm0 # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_broadcastss:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vbroadcastss (%rdi), %xmm0 # sched: [5:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_broadcastss:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vbroadcastss (%rdi), %xmm0 # sched: [6:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_broadcastss:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastss (%rdi), %xmm0 # sched: [6:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_broadcastss:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vbroadcastss (%rdi), %xmm0 # sched: [5:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_broadcastss:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vbroadcastss (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load float, float *%a0, align 4
@@ -865,42 +865,42 @@ define <4 x float> @test_broadcastss(flo
 
 define <8 x float> @test_broadcastss_ymm(float *%a0) {
 ; GENERIC-LABEL: test_broadcastss_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vbroadcastss (%rdi), %ymm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_broadcastss_ymm:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vbroadcastss (%rdi), %ymm0 # sched: [7:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_broadcastss_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vbroadcastss (%rdi), %ymm0 # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_broadcastss_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vbroadcastss (%rdi), %ymm0 # sched: [6:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_broadcastss_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vbroadcastss (%rdi), %ymm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_broadcastss_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vbroadcastss (%rdi), %ymm0 # sched: [7:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_broadcastss_ymm:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vbroadcastss (%rdi), %ymm0 # sched: [6:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_broadcastss_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vbroadcastss (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load float, float *%a0, align 4
@@ -911,42 +911,42 @@ define <8 x float> @test_broadcastss_ymm
 
 define <4 x double> @test_cmppd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_cmppd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    vorpd %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_cmppd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; SANDY-NEXT:    vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    vorpd %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cmppd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; HASWELL-NEXT:    vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vorpd %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cmppd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    vorpd %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cmppd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cmppd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcmpeqpd %ymm1, %ymm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vcmpeqpd (%rdi), %ymm0, %k1 # sched: [10:1.00]
 ; SKX-NEXT:    vpmovm2q %k0, %ymm0
@@ -955,14 +955,14 @@ define <4 x double> @test_cmppd(<4 x dou
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cmppd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [2:2.00]
 ; BTVER2-NEXT:    vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
 ; BTVER2-NEXT:    vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cmppd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -979,42 +979,42 @@ define <4 x double> @test_cmppd(<4 x dou
 
 define <8 x float> @test_cmpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_cmpps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; GENERIC-NEXT:    vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    vorps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_cmpps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; SANDY-NEXT:    vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    vorps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cmpps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; HASWELL-NEXT:    vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vorps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cmpps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    vorps %ymm0, %ymm1, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cmpps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cmpps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcmpeqps %ymm1, %ymm0, %k0 # sched: [3:1.00]
 ; SKX-NEXT:    vcmpeqps (%rdi), %ymm0, %k1 # sched: [10:1.00]
 ; SKX-NEXT:    vpmovm2d %k0, %ymm0
@@ -1023,14 +1023,14 @@ define <8 x float> @test_cmpps(<8 x floa
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cmpps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [2:2.00]
 ; BTVER2-NEXT:    vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
 ; BTVER2-NEXT:    vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cmpps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -1047,56 +1047,56 @@ define <8 x float> @test_cmpps(<8 x floa
 
 define <4 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
 ; GENERIC-LABEL: test_cvtdq2pd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtdq2pd %xmm0, %ymm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vcvtdq2pd (%rdi), %ymm1 # sched: [10:1.00]
 ; GENERIC-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_cvtdq2pd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvtdq2pd %xmm0, %ymm0 # sched: [4:1.00]
 ; SANDY-NEXT:    vcvtdq2pd (%rdi), %ymm1 # sched: [10:1.00]
 ; SANDY-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvtdq2pd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtdq2pd %xmm0, %ymm0 # sched: [6:1.00]
 ; HASWELL-NEXT:    vcvtdq2pd (%rdi), %ymm1 # sched: [6:1.00]
 ; HASWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvtdq2pd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtdq2pd %xmm0, %ymm0 # sched: [6:1.00]
 ; BROADWELL-NEXT:    vcvtdq2pd (%rdi), %ymm1 # sched: [11:1.00]
 ; BROADWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvtdq2pd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtdq2pd %xmm0, %ymm0 # sched: [7:1.00]
 ; SKYLAKE-NEXT:    vcvtdq2pd (%rdi), %ymm1 # sched: [13:1.00]
 ; SKYLAKE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvtdq2pd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtdq2pd %xmm0, %ymm0 # sched: [7:1.00]
 ; SKX-NEXT:    vcvtdq2pd (%rdi), %ymm1 # sched: [13:1.00]
 ; SKX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvtdq2pd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtdq2pd (%rdi), %ymm1 # sched: [8:2.00]
 ; BTVER2-NEXT:    vcvtdq2pd %xmm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvtdq2pd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtdq2pd (%rdi), %ymm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtdq2pd %xmm0, %ymm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -1110,14 +1110,14 @@ define <4 x double> @test_cvtdq2pd(<4 x
 
 define <8 x float> @test_cvtdq2ps(<8 x i32> %a0, <8 x i32> *%a1) {
 ; GENERIC-LABEL: test_cvtdq2ps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vcvtdq2ps (%rdi), %ymm1 # sched: [10:1.00]
 ; GENERIC-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_cvtdq2ps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmovaps (%rdi), %xmm1 # sched: [6:0.50]
 ; SANDY-NEXT:    vinsertf128 $1, 16(%rdi), %ymm1, %ymm1 # sched: [7:0.50]
@@ -1126,42 +1126,42 @@ define <8 x float> @test_cvtdq2ps(<8 x i
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvtdq2ps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vcvtdq2ps (%rdi), %ymm1 # sched: [3:1.00]
 ; HASWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvtdq2ps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vcvtdq2ps (%rdi), %ymm1 # sched: [9:1.00]
 ; BROADWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvtdq2ps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtdq2ps %ymm0, %ymm0 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vcvtdq2ps (%rdi), %ymm1 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvtdq2ps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtdq2ps %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vcvtdq2ps (%rdi), %ymm1 # sched: [11:0.50]
 ; SKX-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvtdq2ps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtdq2ps (%rdi), %ymm1 # sched: [8:2.00]
 ; BTVER2-NEXT:    vcvtdq2ps %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvtdq2ps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtdq2ps (%rdi), %ymm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtdq2ps %ymm0, %ymm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -1175,56 +1175,56 @@ define <8 x float> @test_cvtdq2ps(<8 x i
 
 define <8 x i32> @test_cvtpd2dq(<4 x double> %a0, <4 x double> *%a1) {
 ; GENERIC-LABEL: test_cvtpd2dq:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtpd2dq %ymm0, %xmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vcvtpd2dqy (%rdi), %xmm1 # sched: [11:1.00]
 ; GENERIC-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_cvtpd2dq:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvtpd2dq %ymm0, %xmm0 # sched: [4:1.00]
 ; SANDY-NEXT:    vcvtpd2dqy (%rdi), %xmm1 # sched: [11:1.00]
 ; SANDY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvtpd2dq:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtpd2dq %ymm0, %xmm0 # sched: [6:1.00]
 ; HASWELL-NEXT:    vcvtpd2dqy (%rdi), %xmm1 # sched: [7:1.00]
 ; HASWELL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvtpd2dq:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtpd2dq %ymm0, %xmm0 # sched: [6:1.00]
 ; BROADWELL-NEXT:    vcvtpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
 ; BROADWELL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvtpd2dq:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtpd2dq %ymm0, %xmm0 # sched: [7:1.00]
 ; SKYLAKE-NEXT:    vcvtpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
 ; SKYLAKE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvtpd2dq:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtpd2dq %ymm0, %xmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vcvtpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
 ; SKX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvtpd2dq:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtpd2dqy (%rdi), %xmm1 # sched: [11:2.00]
 ; BTVER2-NEXT:    vcvtpd2dq %ymm0, %xmm0 # sched: [6:2.00]
 ; BTVER2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvtpd2dq:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtpd2dqy (%rdi), %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtpd2dq %ymm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
@@ -1239,56 +1239,56 @@ declare <4 x i32> @llvm.x86.avx.cvt.pd2d
 
 define <8 x i32> @test_cvttpd2dq(<4 x double> %a0, <4 x double> *%a1) {
 ; GENERIC-LABEL: test_cvttpd2dq:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttpd2dq %ymm0, %xmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vcvttpd2dqy (%rdi), %xmm1 # sched: [11:1.00]
 ; GENERIC-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_cvttpd2dq:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvttpd2dq %ymm0, %xmm0 # sched: [4:1.00]
 ; SANDY-NEXT:    vcvttpd2dqy (%rdi), %xmm1 # sched: [11:1.00]
 ; SANDY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvttpd2dq:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvttpd2dq %ymm0, %xmm0 # sched: [6:1.00]
 ; HASWELL-NEXT:    vcvttpd2dqy (%rdi), %xmm1 # sched: [7:1.00]
 ; HASWELL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvttpd2dq:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvttpd2dq %ymm0, %xmm0 # sched: [6:1.00]
 ; BROADWELL-NEXT:    vcvttpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
 ; BROADWELL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvttpd2dq:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvttpd2dq %ymm0, %xmm0 # sched: [7:1.00]
 ; SKYLAKE-NEXT:    vcvttpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
 ; SKYLAKE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvttpd2dq:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttpd2dq %ymm0, %xmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vcvttpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
 ; SKX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvttpd2dq:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvttpd2dqy (%rdi), %xmm1 # sched: [11:2.00]
 ; BTVER2-NEXT:    vcvttpd2dq %ymm0, %xmm0 # sched: [6:2.00]
 ; BTVER2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvttpd2dq:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvttpd2dqy (%rdi), %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttpd2dq %ymm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
@@ -1302,56 +1302,56 @@ define <8 x i32> @test_cvttpd2dq(<4 x do
 
 define <8 x float> @test_cvtpd2ps(<4 x double> %a0, <4 x double> *%a1) {
 ; GENERIC-LABEL: test_cvtpd2ps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtpd2ps %ymm0, %xmm0 # sched: [4:1.00]
 ; GENERIC-NEXT:    vcvtpd2psy (%rdi), %xmm1 # sched: [11:1.00]
 ; GENERIC-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_cvtpd2ps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvtpd2ps %ymm0, %xmm0 # sched: [4:1.00]
 ; SANDY-NEXT:    vcvtpd2psy (%rdi), %xmm1 # sched: [11:1.00]
 ; SANDY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvtpd2ps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtpd2ps %ymm0, %xmm0 # sched: [6:1.00]
 ; HASWELL-NEXT:    vcvtpd2psy (%rdi), %xmm1 # sched: [7:1.00]
 ; HASWELL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvtpd2ps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtpd2ps %ymm0, %xmm0 # sched: [6:1.00]
 ; BROADWELL-NEXT:    vcvtpd2psy (%rdi), %xmm1 # sched: [8:1.00]
 ; BROADWELL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvtpd2ps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtpd2ps %ymm0, %xmm0 # sched: [7:1.00]
 ; SKYLAKE-NEXT:    vcvtpd2psy (%rdi), %xmm1 # sched: [8:1.00]
 ; SKYLAKE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvtpd2ps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtpd2ps %ymm0, %xmm0 # sched: [7:1.00]
 ; SKX-NEXT:    vcvtpd2psy (%rdi), %xmm1 # sched: [8:1.00]
 ; SKX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvtpd2ps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtpd2psy (%rdi), %xmm1 # sched: [11:2.00]
 ; BTVER2-NEXT:    vcvtpd2ps %ymm0, %xmm0 # sched: [6:2.00]
 ; BTVER2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvtpd2ps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtpd2psy (%rdi), %xmm1 # sched: [11:1.00]
 ; ZNVER1-NEXT:    vcvtpd2ps %ymm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
@@ -1365,56 +1365,56 @@ define <8 x float> @test_cvtpd2ps(<4 x d
 
 define <8 x i32> @test_cvtps2dq(<8 x float> %a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_cvtps2dq:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvtps2dq %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vcvtps2dq (%rdi), %ymm1 # sched: [10:1.00]
 ; GENERIC-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_cvtps2dq:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvtps2dq %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vcvtps2dq (%rdi), %ymm1 # sched: [10:1.00]
 ; SANDY-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvtps2dq:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvtps2dq %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vcvtps2dq (%rdi), %ymm1 # sched: [3:1.00]
 ; HASWELL-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvtps2dq:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvtps2dq %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vcvtps2dq (%rdi), %ymm1 # sched: [9:1.00]
 ; BROADWELL-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvtps2dq:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvtps2dq %ymm0, %ymm0 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vcvtps2dq (%rdi), %ymm1 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvtps2dq:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvtps2dq %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vcvtps2dq (%rdi), %ymm1 # sched: [11:0.50]
 ; SKX-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvtps2dq:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvtps2dq (%rdi), %ymm1 # sched: [8:2.00]
 ; BTVER2-NEXT:    vcvtps2dq %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvtps2dq:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvtps2dq (%rdi), %ymm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtps2dq %ymm0, %ymm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1429,56 +1429,56 @@ declare <8 x i32> @llvm.x86.avx.cvt.ps2d
 
 define <8 x i32> @test_cvttps2dq(<8 x float> %a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_cvttps2dq:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vcvttps2dq (%rdi), %ymm1 # sched: [10:1.00]
 ; GENERIC-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_cvttps2dq:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vcvttps2dq (%rdi), %ymm1 # sched: [10:1.00]
 ; SANDY-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_cvttps2dq:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vcvttps2dq (%rdi), %ymm1 # sched: [3:1.00]
 ; HASWELL-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_cvttps2dq:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vcvttps2dq (%rdi), %ymm1 # sched: [9:1.00]
 ; BROADWELL-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_cvttps2dq:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vcvttps2dq %ymm0, %ymm0 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vcvttps2dq (%rdi), %ymm1 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_cvttps2dq:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vcvttps2dq %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vcvttps2dq (%rdi), %ymm1 # sched: [11:0.50]
 ; SKX-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_cvttps2dq:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vcvttps2dq (%rdi), %ymm1 # sched: [8:2.00]
 ; BTVER2-NEXT:    vcvttps2dq %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cvttps2dq:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vcvttps2dq (%rdi), %ymm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttps2dq %ymm0, %ymm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1492,49 +1492,49 @@ define <8 x i32> @test_cvttps2dq(<8 x fl
 
 define <4 x double> @test_divpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_divpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vdivpd %ymm1, %ymm0, %ymm0 # sched: [45:2.00]
 ; GENERIC-NEXT:    vdivpd (%rdi), %ymm0, %ymm0 # sched: [52:2.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_divpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vdivpd %ymm1, %ymm0, %ymm0 # sched: [45:2.00]
 ; SANDY-NEXT:    vdivpd (%rdi), %ymm0, %ymm0 # sched: [52:2.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_divpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vdivpd %ymm1, %ymm0, %ymm0 # sched: [35:2.00]
 ; HASWELL-NEXT:    vdivpd (%rdi), %ymm0, %ymm0 # sched: [35:2.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_divpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vdivpd %ymm1, %ymm0, %ymm0 # sched: [23:2.00]
 ; BROADWELL-NEXT:    vdivpd (%rdi), %ymm0, %ymm0 # sched: [29:2.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_divpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vdivpd %ymm1, %ymm0, %ymm0 # sched: [14:1.00]
 ; SKYLAKE-NEXT:    vdivpd (%rdi), %ymm0, %ymm0 # sched: [21:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_divpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vdivpd %ymm1, %ymm0, %ymm0 # sched: [14:1.00]
 ; SKX-NEXT:    vdivpd (%rdi), %ymm0, %ymm0 # sched: [21:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_divpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vdivpd %ymm1, %ymm0, %ymm0 # sched: [38:38.00]
 ; BTVER2-NEXT:    vdivpd (%rdi), %ymm0, %ymm0 # sched: [43:38.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_divpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:15.00]
 ; ZNVER1-NEXT:    vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:22.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1546,49 +1546,49 @@ define <4 x double> @test_divpd(<4 x dou
 
 define <8 x float> @test_divps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_divps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vdivps %ymm1, %ymm0, %ymm0 # sched: [29:2.00]
 ; GENERIC-NEXT:    vdivps (%rdi), %ymm0, %ymm0 # sched: [36:2.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_divps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vdivps %ymm1, %ymm0, %ymm0 # sched: [29:2.00]
 ; SANDY-NEXT:    vdivps (%rdi), %ymm0, %ymm0 # sched: [36:2.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_divps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vdivps %ymm1, %ymm0, %ymm0 # sched: [21:2.00]
 ; HASWELL-NEXT:    vdivps (%rdi), %ymm0, %ymm0 # sched: [21:2.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_divps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vdivps %ymm1, %ymm0, %ymm0 # sched: [17:2.00]
 ; BROADWELL-NEXT:    vdivps (%rdi), %ymm0, %ymm0 # sched: [23:2.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_divps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vdivps %ymm1, %ymm0, %ymm0 # sched: [11:1.00]
 ; SKYLAKE-NEXT:    vdivps (%rdi), %ymm0, %ymm0 # sched: [18:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_divps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vdivps %ymm1, %ymm0, %ymm0 # sched: [11:1.00]
 ; SKX-NEXT:    vdivps (%rdi), %ymm0, %ymm0 # sched: [18:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_divps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vdivps %ymm1, %ymm0, %ymm0 # sched: [38:38.00]
 ; BTVER2-NEXT:    vdivps (%rdi), %ymm0, %ymm0 # sched: [43:38.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_divps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vdivps %ymm1, %ymm0, %ymm0 # sched: [12:12.00]
 ; ZNVER1-NEXT:    vdivps (%rdi), %ymm0, %ymm0 # sched: [19:19.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1600,49 +1600,49 @@ define <8 x float> @test_divps(<8 x floa
 
 define <8 x float> @test_dpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_dpps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [12:2.00]
 ; GENERIC-NEXT:    vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_dpps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [12:2.00]
 ; SANDY-NEXT:    vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_dpps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [14:2.00]
 ; HASWELL-NEXT:    vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [14:2.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_dpps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [14:2.00]
 ; BROADWELL-NEXT:    vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [20:2.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_dpps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [13:1.33]
 ; SKYLAKE-NEXT:    vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [20:1.33]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_dpps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [13:1.33]
 ; SKX-NEXT:    vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [20:1.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_dpps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [12:6.00]
 ; BTVER2-NEXT:    vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [17:6.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_dpps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1655,55 +1655,55 @@ declare <8 x float> @llvm.x86.avx.dp.ps.
 
 define <4 x float> @test_extractf128(<8 x float> %a0, <8 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_extractf128:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_extractf128:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [5:1.00]
 ; SANDY-NEXT:    vzeroupper
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_extractf128:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_extractf128:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_extractf128:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [3:1.00]
 ; SKYLAKE-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_extractf128:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [3:1.00]
 ; SKX-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_extractf128:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_extractf128:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.33]
 ; ZNVER1-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [8:0.50]
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
@@ -1716,49 +1716,49 @@ define <4 x float> @test_extractf128(<8
 
 define <4 x double> @test_haddpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_haddpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; GENERIC-NEXT:    vhaddpd (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_haddpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; SANDY-NEXT:    vhaddpd (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_haddpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; HASWELL-NEXT:    vhaddpd (%rdi), %ymm0, %ymm0 # sched: [5:2.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_haddpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; BROADWELL-NEXT:    vhaddpd (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_haddpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
 ; SKYLAKE-NEXT:    vhaddpd (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_haddpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
 ; SKX-NEXT:    vhaddpd (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_haddpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vhaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_haddpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vhaddpd (%rdi), %ymm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1771,49 +1771,49 @@ declare <4 x double> @llvm.x86.avx.hadd.
 
 define <8 x float> @test_haddps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_haddps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vhaddps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; GENERIC-NEXT:    vhaddps (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_haddps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vhaddps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; SANDY-NEXT:    vhaddps (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_haddps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vhaddps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; HASWELL-NEXT:    vhaddps (%rdi), %ymm0, %ymm0 # sched: [5:2.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_haddps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vhaddps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; BROADWELL-NEXT:    vhaddps (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_haddps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vhaddps %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
 ; SKYLAKE-NEXT:    vhaddps (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_haddps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vhaddps %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
 ; SKX-NEXT:    vhaddps (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_haddps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vhaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_haddps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vhaddps %ymm1, %ymm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vhaddps (%rdi), %ymm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1826,49 +1826,49 @@ declare <8 x float> @llvm.x86.avx.hadd.p
 
 define <4 x double> @test_hsubpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_hsubpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; GENERIC-NEXT:    vhsubpd (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_hsubpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; SANDY-NEXT:    vhsubpd (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_hsubpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; HASWELL-NEXT:    vhsubpd (%rdi), %ymm0, %ymm0 # sched: [5:2.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_hsubpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; BROADWELL-NEXT:    vhsubpd (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_hsubpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
 ; SKYLAKE-NEXT:    vhsubpd (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_hsubpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
 ; SKX-NEXT:    vhsubpd (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_hsubpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vhsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_hsubpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vhsubpd (%rdi), %ymm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1881,49 +1881,49 @@ declare <4 x double> @llvm.x86.avx.hsub.
 
 define <8 x float> @test_hsubps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_hsubps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vhsubps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; GENERIC-NEXT:    vhsubps (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_hsubps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vhsubps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; SANDY-NEXT:    vhsubps (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_hsubps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vhsubps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; HASWELL-NEXT:    vhsubps (%rdi), %ymm0, %ymm0 # sched: [5:2.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_hsubps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vhsubps %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
 ; BROADWELL-NEXT:    vhsubps (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_hsubps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vhsubps %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
 ; SKYLAKE-NEXT:    vhsubps (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_hsubps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vhsubps %ymm1, %ymm0, %ymm0 # sched: [6:2.00]
 ; SKX-NEXT:    vhsubps (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_hsubps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vhsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_hsubps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vhsubps %ymm1, %ymm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vhsubps (%rdi), %ymm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1936,56 +1936,56 @@ declare <8 x float> @llvm.x86.avx.hsub.p
 
 define <8 x float> @test_insertf128(<8 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_insertf128:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:1.00]
 ; GENERIC-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_insertf128:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:1.00]
 ; SANDY-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
 ; SANDY-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_insertf128:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; HASWELL-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
 ; HASWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_insertf128:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:0.50]
 ; BROADWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_insertf128:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; SKYLAKE-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_insertf128:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; SKX-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
 ; SKX-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_insertf128:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50]
 ; BTVER2-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_insertf128:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [2:0.67]
 ; ZNVER1-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -2001,42 +2001,42 @@ define <8 x float> @test_insertf128(<8 x
 
 define <32 x i8> @test_lddqu(i8* %a0) {
 ; GENERIC-LABEL: test_lddqu:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vlddqu (%rdi), %ymm0 # sched: [6:0.50]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_lddqu:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vlddqu (%rdi), %ymm0 # sched: [6:0.50]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_lddqu:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vlddqu (%rdi), %ymm0 # sched: [1:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_lddqu:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vlddqu (%rdi), %ymm0 # sched: [6:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_lddqu:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vlddqu (%rdi), %ymm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_lddqu:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vlddqu (%rdi), %ymm0 # sched: [7:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_lddqu:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vlddqu (%rdi), %ymm0 # sched: [5:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_lddqu:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vlddqu (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0)
@@ -2046,56 +2046,56 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.2
 
 define <2 x double> @test_maskmovpd(i8* %a0, <2 x i64> %a1, <2 x double> %a2) {
 ; GENERIC-LABEL: test_maskmovpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:1.00]
 ; GENERIC-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    vmovapd %xmm2, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_maskmovpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:1.00]
 ; SANDY-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
 ; SANDY-NEXT:    vmovapd %xmm2, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_maskmovpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
 ; HASWELL-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
 ; HASWELL-NEXT:    vmovapd %xmm2, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_maskmovpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [7:2.00]
 ; BROADWELL-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
 ; BROADWELL-NEXT:    vmovapd %xmm2, %xmm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_maskmovpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
 ; SKYLAKE-NEXT:    vmovapd %xmm2, %xmm0 # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_maskmovpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
 ; SKX-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
 ; SKX-NEXT:    vmovapd %xmm2, %xmm0 # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_maskmovpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [6:1.00]
 ; BTVER2-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [6:2.00]
 ; BTVER2-NEXT:    vmovapd %xmm2, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maskmovpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [4:0.50]
 ; ZNVER1-NEXT:    vmovapd %xmm2, %xmm0 # sched: [1:0.50]
@@ -2109,56 +2109,56 @@ declare void @llvm.x86.avx.maskstore.pd(
 
 define <4 x double> @test_maskmovpd_ymm(i8* %a0, <4 x i64> %a1, <4 x double> %a2) {
 ; GENERIC-LABEL: test_maskmovpd_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [9:1.00]
 ; GENERIC-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_maskmovpd_ymm:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [9:1.00]
 ; SANDY-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
 ; SANDY-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_maskmovpd_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
 ; HASWELL-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
 ; HASWELL-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_maskmovpd_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:2.00]
 ; BROADWELL-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
 ; BROADWELL-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_maskmovpd_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
 ; SKYLAKE-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
 ; SKYLAKE-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_maskmovpd_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
 ; SKX-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
 ; SKX-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_maskmovpd_ymm:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [6:2.00]
 ; BTVER2-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [6:2.00]
 ; BTVER2-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maskmovpd_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:1.00]
 ; ZNVER1-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
 ; ZNVER1-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:0.50]
@@ -2172,56 +2172,56 @@ declare void @llvm.x86.avx.maskstore.pd.
 
 define <4 x float> @test_maskmovps(i8* %a0, <4 x i32> %a1, <4 x float> %a2) {
 ; GENERIC-LABEL: test_maskmovps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:1.00]
 ; GENERIC-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_maskmovps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:1.00]
 ; SANDY-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
 ; SANDY-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_maskmovps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
 ; HASWELL-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
 ; HASWELL-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_maskmovps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [7:2.00]
 ; BROADWELL-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
 ; BROADWELL-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_maskmovps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
 ; SKYLAKE-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_maskmovps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
 ; SKX-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
 ; SKX-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_maskmovps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [6:1.00]
 ; BTVER2-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [6:2.00]
 ; BTVER2-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maskmovps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [4:0.50]
 ; ZNVER1-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:0.50]
@@ -2235,56 +2235,56 @@ declare void @llvm.x86.avx.maskstore.ps(
 
 define <8 x float> @test_maskmovps_ymm(i8* %a0, <8 x i32> %a1, <8 x float> %a2) {
 ; GENERIC-LABEL: test_maskmovps_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [9:1.00]
 ; GENERIC-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_maskmovps_ymm:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [9:1.00]
 ; SANDY-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
 ; SANDY-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_maskmovps_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
 ; HASWELL-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
 ; HASWELL-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_maskmovps_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:2.00]
 ; BROADWELL-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
 ; BROADWELL-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_maskmovps_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
 ; SKYLAKE-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
 ; SKYLAKE-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_maskmovps_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
 ; SKX-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
 ; SKX-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_maskmovps_ymm:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [6:2.00]
 ; BTVER2-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [6:2.00]
 ; BTVER2-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maskmovps_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:1.00]
 ; ZNVER1-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
 ; ZNVER1-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:0.50]
@@ -2298,49 +2298,49 @@ declare void @llvm.x86.avx.maskstore.ps.
 
 define <4 x double> @test_maxpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_maxpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_maxpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_maxpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmaxpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_maxpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmaxpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_maxpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vmaxpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_maxpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmaxpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_maxpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
 ; BTVER2-NEXT:    vmaxpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maxpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2353,49 +2353,49 @@ declare <4 x double> @llvm.x86.avx.max.p
 
 define <8 x float> @test_maxps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_maxps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_maxps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_maxps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmaxps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_maxps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmaxps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_maxps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmaxps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vmaxps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_maxps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmaxps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmaxps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_maxps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmaxps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
 ; BTVER2-NEXT:    vmaxps (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maxps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2408,49 +2408,49 @@ declare <8 x float> @llvm.x86.avx.max.ps
 
 define <4 x double> @test_minpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_minpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_minpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_minpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vminpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_minpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vminpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_minpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vminpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vminpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_minpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vminpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vminpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_minpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vminpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
 ; BTVER2-NEXT:    vminpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_minpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2463,49 +2463,49 @@ declare <4 x double> @llvm.x86.avx.min.p
 
 define <8 x float> @test_minps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_minps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_minps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_minps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vminps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_minps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vminps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_minps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vminps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKYLAKE-NEXT:    vminps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_minps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vminps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vminps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_minps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vminps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
 ; BTVER2-NEXT:    vminps (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_minps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2518,56 +2518,56 @@ declare <8 x float> @llvm.x86.avx.min.ps
 
 define <4 x double> @test_movapd(<4 x double> *%a0, <4 x double> *%a1) {
 ; GENERIC-LABEL: test_movapd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovapd (%rdi), %ymm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovapd %ymm0, (%rsi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movapd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovapd (%rdi), %ymm0 # sched: [7:0.50]
 ; SANDY-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmovapd %ymm0, (%rsi) # sched: [5:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movapd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovapd (%rdi), %ymm0 # sched: [1:0.50]
 ; HASWELL-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmovapd %ymm0, (%rsi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movapd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovapd (%rdi), %ymm0 # sched: [6:0.50]
 ; BROADWELL-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmovapd %ymm0, (%rsi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movapd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovapd (%rdi), %ymm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmovapd %ymm0, (%rsi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movapd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovapd (%rdi), %ymm0 # sched: [7:0.50]
 ; SKX-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmovapd %ymm0, (%rsi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movapd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovapd (%rdi), %ymm0 # sched: [5:1.00]
 ; BTVER2-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vmovapd %ymm0, (%rsi) # sched: [1:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movapd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovapd (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovapd %ymm0, (%rsi) # sched: [1:0.50]
@@ -2580,56 +2580,56 @@ define <4 x double> @test_movapd(<4 x do
 
 define <8 x float> @test_movaps(<8 x float> *%a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_movaps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovaps (%rdi), %ymm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovaps %ymm0, (%rsi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movaps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovaps (%rdi), %ymm0 # sched: [7:0.50]
 ; SANDY-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmovaps %ymm0, (%rsi) # sched: [5:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movaps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovaps (%rdi), %ymm0 # sched: [1:0.50]
 ; HASWELL-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmovaps %ymm0, (%rsi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movaps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovaps (%rdi), %ymm0 # sched: [6:0.50]
 ; BROADWELL-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmovaps %ymm0, (%rsi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movaps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovaps (%rdi), %ymm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmovaps %ymm0, (%rsi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movaps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovaps (%rdi), %ymm0 # sched: [7:0.50]
 ; SKX-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmovaps %ymm0, (%rsi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movaps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovaps (%rdi), %ymm0 # sched: [5:1.00]
 ; BTVER2-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vmovaps %ymm0, (%rsi) # sched: [1:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movaps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovaps (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovaps %ymm0, (%rsi) # sched: [1:0.50]
@@ -2642,56 +2642,56 @@ define <8 x float> @test_movaps(<8 x flo
 
 define <4 x double> @test_movddup(<4 x double> %a0, <4 x double> *%a1) {
 ; GENERIC-LABEL: test_movddup:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
 ; GENERIC-NEXT:    vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [7:0.50]
 ; GENERIC-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movddup:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
 ; SANDY-NEXT:    vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [7:0.50]
 ; SANDY-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movddup:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
 ; HASWELL-NEXT:    vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [1:0.50]
 ; HASWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movddup:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
 ; BROADWELL-NEXT:    vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [6:0.50]
 ; BROADWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movddup:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [7:0.50]
 ; SKYLAKE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movddup:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00]
 ; SKX-NEXT:    vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [7:0.50]
 ; SKX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movddup:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [5:1.00]
 ; BTVER2-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50]
 ; BTVER2-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movddup:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [8:0.50]
 ; ZNVER1-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -2705,48 +2705,48 @@ define <4 x double> @test_movddup(<4 x d
 
 define i32 @test_movmskpd(<4 x double> %a0) {
 ; GENERIC-LABEL: test_movmskpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovmskpd %ymm0, %eax # sched: [2:1.00]
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movmskpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovmskpd %ymm0, %eax # sched: [2:1.00]
 ; SANDY-NEXT:    vzeroupper
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movmskpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovmskpd %ymm0, %eax # sched: [3:1.00]
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movmskpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovmskpd %ymm0, %eax # sched: [3:1.00]
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movmskpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovmskpd %ymm0, %eax # sched: [2:1.00]
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movmskpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovmskpd %ymm0, %eax # sched: [2:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movmskpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovmskpd %ymm0, %eax # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movmskpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovmskpd %ymm0, %eax # sched: [1:1.00]
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2757,48 +2757,48 @@ declare i32 @llvm.x86.avx.movmsk.pd.256(
 
 define i32 @test_movmskps(<8 x float> %a0) {
 ; GENERIC-LABEL: test_movmskps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovmskps %ymm0, %eax # sched: [2:1.00]
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movmskps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovmskps %ymm0, %eax # sched: [2:1.00]
 ; SANDY-NEXT:    vzeroupper
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movmskps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovmskps %ymm0, %eax # sched: [3:1.00]
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movmskps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovmskps %ymm0, %eax # sched: [3:1.00]
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movmskps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovmskps %ymm0, %eax # sched: [2:1.00]
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movmskps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovmskps %ymm0, %eax # sched: [2:1.00]
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movmskps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovmskps %ymm0, %eax # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movmskps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovmskps %ymm0, %eax # sched: [1:1.00]
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2809,7 +2809,7 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(
 
 define void @test_movntdq(<4 x i64> %a0, <4 x i64> *%a1) {
 ; GENERIC-LABEL: test_movntdq:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
 ; GENERIC-NEXT:    vmovntdq %ymm0, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    #NO_APP
@@ -2817,7 +2817,7 @@ define void @test_movntdq(<4 x i64> %a0,
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movntdq:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    #APP
 ; SANDY-NEXT:    vmovntdq %ymm0, (%rdi) # sched: [5:1.00]
 ; SANDY-NEXT:    #NO_APP
@@ -2825,7 +2825,7 @@ define void @test_movntdq(<4 x i64> %a0,
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movntdq:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
 ; HASWELL-NEXT:    vmovntdq %ymm0, (%rdi) # sched: [1:1.00]
 ; HASWELL-NEXT:    #NO_APP
@@ -2833,7 +2833,7 @@ define void @test_movntdq(<4 x i64> %a0,
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movntdq:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
 ; BROADWELL-NEXT:    vmovntdq %ymm0, (%rdi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    #NO_APP
@@ -2841,7 +2841,7 @@ define void @test_movntdq(<4 x i64> %a0,
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movntdq:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
 ; SKYLAKE-NEXT:    vmovntdq %ymm0, (%rdi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    #NO_APP
@@ -2849,7 +2849,7 @@ define void @test_movntdq(<4 x i64> %a0,
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movntdq:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
 ; SKX-NEXT:    vmovntdq %ymm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    #NO_APP
@@ -2857,14 +2857,14 @@ define void @test_movntdq(<4 x i64> %a0,
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movntdq:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    #APP
 ; BTVER2-NEXT:    vmovntdq %ymm0, (%rdi) # sched: [3:2.00]
 ; BTVER2-NEXT:    #NO_APP
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movntdq:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
 ; ZNVER1-NEXT:    vmovntdq %ymm0, (%rdi) # sched: [1:0.50]
 ; ZNVER1-NEXT:    #NO_APP
@@ -2876,49 +2876,49 @@ define void @test_movntdq(<4 x i64> %a0,
 
 define <4 x double> @test_movntpd(<4 x double> %a0, <4 x double> *%a1) {
 ; GENERIC-LABEL: test_movntpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovntpd %ymm0, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movntpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmovntpd %ymm0, (%rdi) # sched: [5:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movntpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movntpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movntpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movntpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movntpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vmovntpd %ymm0, (%rdi) # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movntpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovntpd %ymm0, (%rdi) # sched: [1:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2929,49 +2929,49 @@ define <4 x double> @test_movntpd(<4 x d
 
 define <8 x float> @test_movntps(<8 x float> %a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_movntps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovntps %ymm0, (%rdi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movntps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vmovntps %ymm0, (%rdi) # sched: [5:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movntps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmovntps %ymm0, (%rdi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movntps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmovntps %ymm0, (%rdi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movntps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmovntps %ymm0, (%rdi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movntps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmovntps %ymm0, (%rdi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movntps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vmovntps %ymm0, (%rdi) # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movntps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovntps %ymm0, (%rdi) # sched: [1:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2982,56 +2982,56 @@ define <8 x float> @test_movntps(<8 x fl
 
 define <8 x float> @test_movshdup(<8 x float> %a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_movshdup:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
 ; GENERIC-NEXT:    vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [7:0.50]
 ; GENERIC-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movshdup:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
 ; SANDY-NEXT:    vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [7:0.50]
 ; SANDY-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movshdup:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
 ; HASWELL-NEXT:    vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [1:0.50]
 ; HASWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movshdup:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
 ; BROADWELL-NEXT:    vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [6:0.50]
 ; BROADWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movshdup:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [7:0.50]
 ; SKYLAKE-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movshdup:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00]
 ; SKX-NEXT:    vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [7:0.50]
 ; SKX-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movshdup:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [5:1.00]
 ; BTVER2-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50]
 ; BTVER2-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movshdup:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [8:0.50]
 ; ZNVER1-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3045,56 +3045,56 @@ define <8 x float> @test_movshdup(<8 x f
 
 define <8 x float> @test_movsldup(<8 x float> %a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_movsldup:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
 ; GENERIC-NEXT:    vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [7:0.50]
 ; GENERIC-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movsldup:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
 ; SANDY-NEXT:    vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [7:0.50]
 ; SANDY-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movsldup:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
 ; HASWELL-NEXT:    vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [1:0.50]
 ; HASWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movsldup:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
 ; BROADWELL-NEXT:    vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [6:0.50]
 ; BROADWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movsldup:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [7:0.50]
 ; SKYLAKE-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movsldup:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00]
 ; SKX-NEXT:    vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [7:0.50]
 ; SKX-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movsldup:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [5:1.00]
 ; BTVER2-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50]
 ; BTVER2-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movsldup:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [8:0.50]
 ; ZNVER1-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3108,14 +3108,14 @@ define <8 x float> @test_movsldup(<8 x f
 
 define <4 x double> @test_movupd(<4 x double> *%a0, <4 x double> *%a1) {
 ; GENERIC-LABEL: test_movupd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovupd (%rdi), %ymm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovupd %ymm0, (%rsi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movupd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovups (%rdi), %xmm0 # sched: [6:0.50]
 ; SANDY-NEXT:    vinsertf128 $1, 16(%rdi), %ymm0, %ymm0 # sched: [7:0.50]
 ; SANDY-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3124,42 +3124,42 @@ define <4 x double> @test_movupd(<4 x do
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movupd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovupd (%rdi), %ymm0 # sched: [1:0.50]
 ; HASWELL-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmovupd %ymm0, (%rsi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movupd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovupd (%rdi), %ymm0 # sched: [6:0.50]
 ; BROADWELL-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmovupd %ymm0, (%rsi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movupd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovupd (%rdi), %ymm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmovupd %ymm0, (%rsi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movupd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovupd (%rdi), %ymm0 # sched: [7:0.50]
 ; SKX-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmovupd %ymm0, (%rsi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movupd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovupd (%rdi), %ymm0 # sched: [5:1.00]
 ; BTVER2-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vmovupd %ymm0, (%rsi) # sched: [1:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movupd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovupd (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovupd %ymm0, (%rsi) # sched: [1:0.50]
@@ -3172,14 +3172,14 @@ define <4 x double> @test_movupd(<4 x do
 
 define <8 x float> @test_movups(<8 x float> *%a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_movups:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmovups (%rdi), %ymm0 # sched: [7:0.50]
 ; GENERIC-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vmovups %ymm0, (%rsi) # sched: [5:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_movups:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmovups (%rdi), %xmm0 # sched: [6:0.50]
 ; SANDY-NEXT:    vinsertf128 $1, 16(%rdi), %ymm0, %ymm0 # sched: [7:0.50]
 ; SANDY-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3188,42 +3188,42 @@ define <8 x float> @test_movups(<8 x flo
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movups:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmovups (%rdi), %ymm0 # sched: [1:0.50]
 ; HASWELL-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vmovups %ymm0, (%rsi) # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_movups:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmovups (%rdi), %ymm0 # sched: [6:0.50]
 ; BROADWELL-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vmovups %ymm0, (%rsi) # sched: [1:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_movups:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmovups (%rdi), %ymm0 # sched: [7:0.50]
 ; SKYLAKE-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmovups %ymm0, (%rsi) # sched: [1:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_movups:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmovups (%rdi), %ymm0 # sched: [7:0.50]
 ; SKX-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmovups %ymm0, (%rsi) # sched: [1:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_movups:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmovups (%rdi), %ymm0 # sched: [5:1.00]
 ; BTVER2-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vmovups %ymm0, (%rsi) # sched: [1:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_movups:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmovups (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovups %ymm0, (%rsi) # sched: [1:0.50]
@@ -3236,49 +3236,49 @@ define <8 x float> @test_movups(<8 x flo
 
 define <4 x double> @test_mulpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_mulpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    vmulpd (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_mulpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
 ; SANDY-NEXT:    vmulpd (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_mulpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vmulpd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_mulpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmulpd %ymm1, %ymm0, %ymm0 # sched: [3:0.50]
 ; BROADWELL-NEXT:    vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_mulpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmulpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_mulpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmulpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_mulpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:4.00]
 ; BTVER2-NEXT:    vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:4.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_mulpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vmulpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -3290,49 +3290,49 @@ define <4 x double> @test_mulpd(<4 x dou
 
 define <8 x float> @test_mulps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_mulps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
 ; GENERIC-NEXT:    vmulps (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_mulps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
 ; SANDY-NEXT:    vmulps (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_mulps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vmulps %ymm1, %ymm0, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    vmulps (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_mulps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vmulps %ymm1, %ymm0, %ymm0 # sched: [3:0.50]
 ; BROADWELL-NEXT:    vmulps (%rdi), %ymm0, %ymm0 # sched: [9:0.50]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_mulps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vmulps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vmulps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_mulps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vmulps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vmulps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_mulps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
 ; BTVER2-NEXT:    vmulps (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_mulps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vmulps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vmulps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -3344,56 +3344,56 @@ define <8 x float> @test_mulps(<8 x floa
 
 define <4 x double> @orpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: orpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vorpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; GENERIC-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: orpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vorpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: orpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vorpd (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: orpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vorpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: orpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKYLAKE-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: orpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKX-NEXT:    vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKX-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: orpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: orpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -3411,56 +3411,56 @@ define <4 x double> @orpd(<4 x double> %
 
 define <8 x float> @test_orps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_orps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vorps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; GENERIC-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_orps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vorps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_orps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vorps (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_orps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vorps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_orps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKYLAKE-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_orps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKX-NEXT:    vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKX-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_orps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_orps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -3478,56 +3478,56 @@ define <8 x float> @test_orps(<8 x float
 
 define <4 x double> @test_perm2f128(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_perm2f128:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
 ; GENERIC-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [8:1.00]
 ; GENERIC-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_perm2f128:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
 ; SANDY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [8:1.00]
 ; SANDY-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_perm2f128:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
 ; HASWELL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [3:1.00]
 ; HASWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_perm2f128:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
 ; BROADWELL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [9:1.00]
 ; BROADWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_perm2f128:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
 ; SKYLAKE-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [10:1.00]
 ; SKYLAKE-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_perm2f128:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
 ; SKX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [10:1.00]
 ; SKX-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_perm2f128:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [1:0.50]
 ; BTVER2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [6:1.00]
 ; BTVER2-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_perm2f128:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [100:?]
 ; ZNVER1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [100:?]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -3541,56 +3541,56 @@ define <4 x double> @test_perm2f128(<4 x
 
 define <2 x double> @test_permilpd(<2 x double> %a0, <2 x double> *%a1) {
 ; GENERIC-LABEL: test_permilpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
 ; GENERIC-NEXT:    vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [7:1.00]
 ; GENERIC-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_permilpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
 ; SANDY-NEXT:    vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [7:1.00]
 ; SANDY-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_permilpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
 ; HASWELL-NEXT:    vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [1:1.00]
 ; HASWELL-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_permilpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
 ; BROADWELL-NEXT:    vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [6:1.00]
 ; BROADWELL-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_permilpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [7:1.00]
 ; SKYLAKE-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_permilpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:1.00]
 ; SKX-NEXT:    vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [7:1.00]
 ; SKX-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_permilpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [6:1.00]
 ; BTVER2-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50]
 ; BTVER2-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_permilpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -3604,56 +3604,56 @@ define <2 x double> @test_permilpd(<2 x
 
 define <4 x double> @test_permilpd_ymm(<4 x double> %a0, <4 x double> *%a1) {
 ; GENERIC-LABEL: test_permilpd_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
 ; GENERIC-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:1.00]
 ; GENERIC-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_permilpd_ymm:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
 ; SANDY-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:1.00]
 ; SANDY-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_permilpd_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
 ; HASWELL-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [1:1.00]
 ; HASWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_permilpd_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
 ; BROADWELL-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_permilpd_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:1.00]
 ; SKYLAKE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_permilpd_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00]
 ; SKX-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:1.00]
 ; SKX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_permilpd_ymm:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [6:1.00]
 ; BTVER2-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50]
 ; BTVER2-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_permilpd_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3667,56 +3667,56 @@ define <4 x double> @test_permilpd_ymm(<
 
 define <4 x float> @test_permilps(<4 x float> %a0, <4 x float> *%a1) {
 ; GENERIC-LABEL: test_permilps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
 ; GENERIC-NEXT:    vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:1.00]
 ; GENERIC-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_permilps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
 ; SANDY-NEXT:    vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:1.00]
 ; SANDY-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_permilps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
 ; HASWELL-NEXT:    vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [1:1.00]
 ; HASWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_permilps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
 ; BROADWELL-NEXT:    vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00]
 ; BROADWELL-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_permilps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:1.00]
 ; SKYLAKE-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_permilps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:1.00]
 ; SKX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [7:1.00]
 ; SKX-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_permilps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00]
 ; BTVER2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50]
 ; BTVER2-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_permilps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
@@ -3730,56 +3730,56 @@ define <4 x float> @test_permilps(<4 x f
 
 define <8 x float> @test_permilps_ymm(<8 x float> %a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_permilps_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
 ; GENERIC-NEXT:    vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:1.00]
 ; GENERIC-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_permilps_ymm:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
 ; SANDY-NEXT:    vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:1.00]
 ; SANDY-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_permilps_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
 ; HASWELL-NEXT:    vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [1:1.00]
 ; HASWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_permilps_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
 ; BROADWELL-NEXT:    vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_permilps_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:1.00]
 ; SKYLAKE-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_permilps_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
 ; SKX-NEXT:    vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:1.00]
 ; SKX-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_permilps_ymm:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [6:1.00]
 ; BTVER2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50]
 ; BTVER2-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_permilps_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -3793,49 +3793,49 @@ define <8 x float> @test_permilps_ymm(<8
 
 define <2 x double> @test_permilvarpd(<2 x double> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
 ; GENERIC-LABEL: test_permilvarpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpermilpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_permilvarpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vpermilpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_permilvarpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vpermilpd (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_permilvarpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vpermilpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_permilvarpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; SKYLAKE-NEXT:    vpermilpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_permilvarpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; SKX-NEXT:    vpermilpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_permilvarpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vpermilpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_permilvarpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vpermilpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -3848,49 +3848,49 @@ declare <2 x double> @llvm.x86.avx.vperm
 
 define <4 x double> @test_permilvarpd_ymm(<4 x double> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
 ; GENERIC-LABEL: test_permilvarpd_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_permilvarpd_ymm:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_permilvarpd_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vpermilpd (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_permilvarpd_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vpermilpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_permilvarpd_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SKYLAKE-NEXT:    vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_permilvarpd_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SKX-NEXT:    vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_permilvarpd_ymm:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0 # sched: [3:3.00]
 ; BTVER2-NEXT:    vpermilpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_permilvarpd_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -3903,49 +3903,49 @@ declare <4 x double> @llvm.x86.avx.vperm
 
 define <4 x float> @test_permilvarps(<4 x float> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
 ; GENERIC-LABEL: test_permilvarps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpermilps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_permilvarps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vpermilps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_permilvarps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vpermilps (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_permilvarps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vpermilps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_permilvarps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; SKYLAKE-NEXT:    vpermilps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_permilvarps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; SKX-NEXT:    vpermilps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_permilvarps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vpermilps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_permilvarps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vpermilps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -3958,49 +3958,49 @@ declare <4 x float> @llvm.x86.avx.vpermi
 
 define <8 x float> @test_permilvarps_ymm(<8 x float> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
 ; GENERIC-LABEL: test_permilvarps_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_permilvarps_ymm:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_permilvarps_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vpermilps (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_permilvarps_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vpermilps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_permilvarps_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SKYLAKE-NEXT:    vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_permilvarps_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SKX-NEXT:    vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_permilvarps_ymm:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vpermilps %ymm1, %ymm0, %ymm0 # sched: [3:3.00]
 ; BTVER2-NEXT:    vpermilps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_permilvarps_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -4013,56 +4013,56 @@ declare <8 x float> @llvm.x86.avx.vpermi
 
 define <8 x float> @test_rcpps(<8 x float> %a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_rcpps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vrcpps (%rdi), %ymm1 # sched: [14:2.00]
 ; GENERIC-NEXT:    vrcpps %ymm0, %ymm0 # sched: [7:2.00]
 ; GENERIC-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_rcpps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vrcpps (%rdi), %ymm1 # sched: [14:2.00]
 ; SANDY-NEXT:    vrcpps %ymm0, %ymm0 # sched: [7:2.00]
 ; SANDY-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_rcpps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpps (%rdi), %ymm1 # sched: [11:2.00]
 ; HASWELL-NEXT:    vrcpps %ymm0, %ymm0 # sched: [11:2.00]
 ; HASWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_rcpps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vrcpps (%rdi), %ymm1 # sched: [17:2.00]
 ; BROADWELL-NEXT:    vrcpps %ymm0, %ymm0 # sched: [11:2.00]
 ; BROADWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_rcpps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vrcpps %ymm0, %ymm0 # sched: [4:1.00]
 ; SKYLAKE-NEXT:    vrcpps (%rdi), %ymm1 # sched: [11:1.00]
 ; SKYLAKE-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_rcpps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpps %ymm0, %ymm0 # sched: [4:1.00]
 ; SKX-NEXT:    vrcpps (%rdi), %ymm1 # sched: [11:1.00]
 ; SKX-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_rcpps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vrcpps (%rdi), %ymm1 # sched: [7:2.00]
 ; BTVER2-NEXT:    vrcpps %ymm0, %ymm0 # sched: [2:2.00]
 ; BTVER2-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_rcpps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vrcpps (%rdi), %ymm1 # sched: [12:0.50]
 ; ZNVER1-NEXT:    vrcpps %ymm0, %ymm0 # sched: [5:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4077,56 +4077,56 @@ declare <8 x float> @llvm.x86.avx.rcp.ps
 
 define <4 x double> @test_roundpd(<4 x double> %a0, <4 x double> *%a1) {
 ; GENERIC-LABEL: test_roundpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vroundpd $7, (%rdi), %ymm1 # sched: [10:1.00]
 ; GENERIC-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_roundpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vroundpd $7, (%rdi), %ymm1 # sched: [10:1.00]
 ; SANDY-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_roundpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vroundpd $7, %ymm0, %ymm0 # sched: [5:1.25]
 ; HASWELL-NEXT:    vroundpd $7, (%rdi), %ymm1 # sched: [6:2.00]
 ; HASWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_roundpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vroundpd $7, %ymm0, %ymm0 # sched: [6:0.50]
 ; BROADWELL-NEXT:    vroundpd $7, (%rdi), %ymm1 # sched: [12:2.00]
 ; BROADWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_roundpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vroundpd $7, %ymm0, %ymm0 # sched: [8:0.67]
 ; SKYLAKE-NEXT:    vroundpd $7, (%rdi), %ymm1 # sched: [15:0.67]
 ; SKYLAKE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_roundpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vrndscalepd $7, %ymm0, %ymm0 # sched: [8:0.67]
 ; SKX-NEXT:    vrndscalepd $7, (%rdi), %ymm1 # sched: [15:0.67]
 ; SKX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_roundpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vroundpd $7, (%rdi), %ymm1 # sched: [8:2.00]
 ; BTVER2-NEXT:    vroundpd $7, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_roundpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vroundpd $7, (%rdi), %ymm1 # sched: [11:1.00]
 ; ZNVER1-NEXT:    vroundpd $7, %ymm0, %ymm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4141,56 +4141,56 @@ declare <4 x double> @llvm.x86.avx.round
 
 define <8 x float> @test_roundps(<8 x float> %a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_roundps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vroundps $7, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vroundps $7, (%rdi), %ymm1 # sched: [10:1.00]
 ; GENERIC-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_roundps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vroundps $7, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vroundps $7, (%rdi), %ymm1 # sched: [10:1.00]
 ; SANDY-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_roundps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vroundps $7, %ymm0, %ymm0 # sched: [5:1.25]
 ; HASWELL-NEXT:    vroundps $7, (%rdi), %ymm1 # sched: [6:2.00]
 ; HASWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_roundps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vroundps $7, %ymm0, %ymm0 # sched: [6:0.50]
 ; BROADWELL-NEXT:    vroundps $7, (%rdi), %ymm1 # sched: [12:2.00]
 ; BROADWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_roundps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vroundps $7, %ymm0, %ymm0 # sched: [8:0.67]
 ; SKYLAKE-NEXT:    vroundps $7, (%rdi), %ymm1 # sched: [15:0.67]
 ; SKYLAKE-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_roundps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vrndscaleps $7, %ymm0, %ymm0 # sched: [8:0.67]
 ; SKX-NEXT:    vrndscaleps $7, (%rdi), %ymm1 # sched: [15:0.67]
 ; SKX-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_roundps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vroundps $7, (%rdi), %ymm1 # sched: [8:2.00]
 ; BTVER2-NEXT:    vroundps $7, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_roundps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vroundps $7, (%rdi), %ymm1 # sched: [11:1.00]
 ; ZNVER1-NEXT:    vroundps $7, %ymm0, %ymm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4205,56 +4205,56 @@ declare <8 x float> @llvm.x86.avx.round.
 
 define <8 x float> @test_rsqrtps(<8 x float> %a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_rsqrtps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vrsqrtps (%rdi), %ymm1 # sched: [14:2.00]
 ; GENERIC-NEXT:    vrsqrtps %ymm0, %ymm0 # sched: [7:2.00]
 ; GENERIC-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_rsqrtps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vrsqrtps (%rdi), %ymm1 # sched: [14:2.00]
 ; SANDY-NEXT:    vrsqrtps %ymm0, %ymm0 # sched: [7:2.00]
 ; SANDY-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_rsqrtps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrsqrtps (%rdi), %ymm1 # sched: [11:2.00]
 ; HASWELL-NEXT:    vrsqrtps %ymm0, %ymm0 # sched: [11:2.00]
 ; HASWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_rsqrtps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vrsqrtps (%rdi), %ymm1 # sched: [17:2.00]
 ; BROADWELL-NEXT:    vrsqrtps %ymm0, %ymm0 # sched: [11:2.00]
 ; BROADWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_rsqrtps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vrsqrtps %ymm0, %ymm0 # sched: [4:1.00]
 ; SKYLAKE-NEXT:    vrsqrtps (%rdi), %ymm1 # sched: [11:1.00]
 ; SKYLAKE-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_rsqrtps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vrsqrtps %ymm0, %ymm0 # sched: [4:1.00]
 ; SKX-NEXT:    vrsqrtps (%rdi), %ymm1 # sched: [11:1.00]
 ; SKX-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_rsqrtps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vrsqrtps (%rdi), %ymm1 # sched: [7:2.00]
 ; BTVER2-NEXT:    vrsqrtps %ymm0, %ymm0 # sched: [2:2.00]
 ; BTVER2-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_rsqrtps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vrsqrtps (%rdi), %ymm1 # sched: [12:0.50]
 ; ZNVER1-NEXT:    vrsqrtps %ymm0, %ymm0 # sched: [5:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4269,56 +4269,56 @@ declare <8 x float> @llvm.x86.avx.rsqrt.
 
 define <4 x double> @test_shufpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_shufpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
 ; GENERIC-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:1.00]
 ; GENERIC-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_shufpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
 ; SANDY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:1.00]
 ; SANDY-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_shufpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
 ; HASWELL-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [1:1.00]
 ; HASWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_shufpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
 ; BROADWELL-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_shufpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:1.00]
 ; SKYLAKE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_shufpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00]
 ; SKX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:1.00]
 ; SKX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_shufpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50]
 ; BTVER2-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [6:1.00]
 ; BTVER2-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_shufpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4332,49 +4332,49 @@ define <4 x double> @test_shufpd(<4 x do
 
 define <8 x float> @test_shufps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) nounwind {
 ; GENERIC-LABEL: test_shufps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
 ; GENERIC-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_shufps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
 ; SANDY-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_shufps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
 ; HASWELL-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_shufps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
 ; BROADWELL-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [7:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_shufps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_shufps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00]
 ; SKX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_shufps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50]
 ; BTVER2-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_shufps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50]
 ; ZNVER1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -4386,56 +4386,56 @@ define <8 x float> @test_shufps(<8 x flo
 
 define <4 x double> @test_sqrtpd(<4 x double> %a0, <4 x double> *%a1) {
 ; GENERIC-LABEL: test_sqrtpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vsqrtpd (%rdi), %ymm1 # sched: [52:2.00]
 ; GENERIC-NEXT:    vsqrtpd %ymm0, %ymm0 # sched: [45:2.00]
 ; GENERIC-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_sqrtpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vsqrtpd (%rdi), %ymm1 # sched: [52:2.00]
 ; SANDY-NEXT:    vsqrtpd %ymm0, %ymm0 # sched: [45:2.00]
 ; SANDY-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_sqrtpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vsqrtpd (%rdi), %ymm1 # sched: [35:2.00]
 ; HASWELL-NEXT:    vsqrtpd %ymm0, %ymm0 # sched: [35:2.00]
 ; HASWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_sqrtpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vsqrtpd (%rdi), %ymm1 # sched: [40:2.00]
 ; BROADWELL-NEXT:    vsqrtpd %ymm0, %ymm0 # sched: [34:2.00]
 ; BROADWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_sqrtpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vsqrtpd %ymm0, %ymm0 # sched: [18:1.00]
 ; SKYLAKE-NEXT:    vsqrtpd (%rdi), %ymm1 # sched: [25:1.00]
 ; SKYLAKE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_sqrtpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vsqrtpd %ymm0, %ymm0 # sched: [18:1.00]
 ; SKX-NEXT:    vsqrtpd (%rdi), %ymm1 # sched: [25:1.00]
 ; SKX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_sqrtpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vsqrtpd (%rdi), %ymm1 # sched: [59:54.00]
 ; BTVER2-NEXT:    vsqrtpd %ymm0, %ymm0 # sched: [54:54.00]
 ; BTVER2-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_sqrtpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vsqrtpd (%rdi), %ymm1 # sched: [47:47.00]
 ; ZNVER1-NEXT:    vsqrtpd %ymm0, %ymm0 # sched: [40:40.00]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4450,56 +4450,56 @@ declare <4 x double> @llvm.x86.avx.sqrt.
 
 define <8 x float> @test_sqrtps(<8 x float> %a0, <8 x float> *%a1) {
 ; GENERIC-LABEL: test_sqrtps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vsqrtps (%rdi), %ymm1 # sched: [36:2.00]
 ; GENERIC-NEXT:    vsqrtps %ymm0, %ymm0 # sched: [29:2.00]
 ; GENERIC-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_sqrtps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vsqrtps (%rdi), %ymm1 # sched: [36:2.00]
 ; SANDY-NEXT:    vsqrtps %ymm0, %ymm0 # sched: [29:2.00]
 ; SANDY-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_sqrtps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vsqrtps (%rdi), %ymm1 # sched: [21:2.00]
 ; HASWELL-NEXT:    vsqrtps %ymm0, %ymm0 # sched: [21:2.00]
 ; HASWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_sqrtps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vsqrtps (%rdi), %ymm1 # sched: [27:2.00]
 ; BROADWELL-NEXT:    vsqrtps %ymm0, %ymm0 # sched: [21:2.00]
 ; BROADWELL-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_sqrtps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vsqrtps %ymm0, %ymm0 # sched: [12:1.00]
 ; SKYLAKE-NEXT:    vsqrtps (%rdi), %ymm1 # sched: [19:1.00]
 ; SKYLAKE-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_sqrtps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vsqrtps %ymm0, %ymm0 # sched: [12:1.00]
 ; SKX-NEXT:    vsqrtps (%rdi), %ymm1 # sched: [19:1.00]
 ; SKX-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_sqrtps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vsqrtps (%rdi), %ymm1 # sched: [47:42.00]
 ; BTVER2-NEXT:    vsqrtps %ymm0, %ymm0 # sched: [42:42.00]
 ; BTVER2-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_sqrtps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vsqrtps (%rdi), %ymm1 # sched: [35:35.00]
 ; ZNVER1-NEXT:    vsqrtps %ymm0, %ymm0 # sched: [28:28.00]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -4514,49 +4514,49 @@ declare <8 x float> @llvm.x86.avx.sqrt.p
 
 define <4 x double> @test_subpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_subpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_subpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_subpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vsubpd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_subpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vsubpd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_subpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vsubpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vsubpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_subpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vsubpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vsubpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_subpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_subpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -4568,49 +4568,49 @@ define <4 x double> @test_subpd(<4 x dou
 
 define <8 x float> @test_subps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_subps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_subps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_subps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vsubps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_subps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    vsubps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_subps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vsubps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    vsubps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_subps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vsubps %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    vsubps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_subps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    vsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_subps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -4622,7 +4622,7 @@ define <8 x float> @test_subps(<8 x floa
 
 define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
 ; GENERIC-LABEL: test_testpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    xorl %eax, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    vtestpd %xmm1, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    setb %al # sched: [1:0.50]
@@ -4631,7 +4631,7 @@ define i32 @test_testpd(<2 x double> %a0
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_testpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    xorl %eax, %eax # sched: [1:0.33]
 ; SANDY-NEXT:    vtestpd %xmm1, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    setb %al # sched: [1:0.50]
@@ -4640,7 +4640,7 @@ define i32 @test_testpd(<2 x double> %a0
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_testpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; HASWELL-NEXT:    vtestpd %xmm1, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    setb %al # sched: [1:0.50]
@@ -4649,7 +4649,7 @@ define i32 @test_testpd(<2 x double> %a0
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_testpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; BROADWELL-NEXT:    vtestpd %xmm1, %xmm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    setb %al # sched: [1:0.50]
@@ -4658,7 +4658,7 @@ define i32 @test_testpd(<2 x double> %a0
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_testpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    vtestpd %xmm1, %xmm0 # sched: [2:1.00]
 ; SKYLAKE-NEXT:    setb %al # sched: [1:0.50]
@@ -4667,7 +4667,7 @@ define i32 @test_testpd(<2 x double> %a0
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_testpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; SKX-NEXT:    vtestpd %xmm1, %xmm0 # sched: [2:1.00]
 ; SKX-NEXT:    setb %al # sched: [1:0.50]
@@ -4676,7 +4676,7 @@ define i32 @test_testpd(<2 x double> %a0
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_testpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    xorl %eax, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    vtestpd %xmm1, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    setb %al # sched: [1:0.50]
@@ -4685,7 +4685,7 @@ define i32 @test_testpd(<2 x double> %a0
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_testpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    vtestpd %xmm1, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    setb %al # sched: [1:0.25]
@@ -4702,7 +4702,7 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x
 
 define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_testpd_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    xorl %eax, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    vtestpd %ymm1, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    setb %al # sched: [1:0.50]
@@ -4712,7 +4712,7 @@ define i32 @test_testpd_ymm(<4 x double>
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_testpd_ymm:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    xorl %eax, %eax # sched: [1:0.33]
 ; SANDY-NEXT:    vtestpd %ymm1, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    setb %al # sched: [1:0.50]
@@ -4722,7 +4722,7 @@ define i32 @test_testpd_ymm(<4 x double>
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_testpd_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; HASWELL-NEXT:    vtestpd %ymm1, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    setb %al # sched: [1:0.50]
@@ -4732,7 +4732,7 @@ define i32 @test_testpd_ymm(<4 x double>
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_testpd_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; BROADWELL-NEXT:    vtestpd %ymm1, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    setb %al # sched: [1:0.50]
@@ -4742,7 +4742,7 @@ define i32 @test_testpd_ymm(<4 x double>
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_testpd_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    vtestpd %ymm1, %ymm0 # sched: [2:1.00]
 ; SKYLAKE-NEXT:    setb %al # sched: [1:0.50]
@@ -4752,7 +4752,7 @@ define i32 @test_testpd_ymm(<4 x double>
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_testpd_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; SKX-NEXT:    vtestpd %ymm1, %ymm0 # sched: [2:1.00]
 ; SKX-NEXT:    setb %al # sched: [1:0.50]
@@ -4762,7 +4762,7 @@ define i32 @test_testpd_ymm(<4 x double>
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_testpd_ymm:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    xorl %eax, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    vtestpd %ymm1, %ymm0 # sched: [4:2.00]
 ; BTVER2-NEXT:    setb %al # sched: [1:0.50]
@@ -4771,7 +4771,7 @@ define i32 @test_testpd_ymm(<4 x double>
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_testpd_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    vtestpd %ymm1, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    setb %al # sched: [1:0.25]
@@ -4789,7 +4789,7 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(
 
 define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
 ; GENERIC-LABEL: test_testps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    xorl %eax, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    vtestps %xmm1, %xmm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    setb %al # sched: [1:0.50]
@@ -4798,7 +4798,7 @@ define i32 @test_testps(<4 x float> %a0,
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_testps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    xorl %eax, %eax # sched: [1:0.33]
 ; SANDY-NEXT:    vtestps %xmm1, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    setb %al # sched: [1:0.50]
@@ -4807,7 +4807,7 @@ define i32 @test_testps(<4 x float> %a0,
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_testps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; HASWELL-NEXT:    vtestps %xmm1, %xmm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    setb %al # sched: [1:0.50]
@@ -4816,7 +4816,7 @@ define i32 @test_testps(<4 x float> %a0,
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_testps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; BROADWELL-NEXT:    vtestps %xmm1, %xmm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    setb %al # sched: [1:0.50]
@@ -4825,7 +4825,7 @@ define i32 @test_testps(<4 x float> %a0,
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_testps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    vtestps %xmm1, %xmm0 # sched: [2:1.00]
 ; SKYLAKE-NEXT:    setb %al # sched: [1:0.50]
@@ -4834,7 +4834,7 @@ define i32 @test_testps(<4 x float> %a0,
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_testps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; SKX-NEXT:    vtestps %xmm1, %xmm0 # sched: [2:1.00]
 ; SKX-NEXT:    setb %al # sched: [1:0.50]
@@ -4843,7 +4843,7 @@ define i32 @test_testps(<4 x float> %a0,
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_testps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    xorl %eax, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    vtestps %xmm1, %xmm0 # sched: [3:1.00]
 ; BTVER2-NEXT:    setb %al # sched: [1:0.50]
@@ -4852,7 +4852,7 @@ define i32 @test_testps(<4 x float> %a0,
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_testps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    vtestps %xmm1, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    setb %al # sched: [1:0.25]
@@ -4869,7 +4869,7 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x
 
 define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_testps_ymm:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    xorl %eax, %eax # sched: [1:0.33]
 ; GENERIC-NEXT:    vtestps %ymm1, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    setb %al # sched: [1:0.50]
@@ -4879,7 +4879,7 @@ define i32 @test_testps_ymm(<8 x float>
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_testps_ymm:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    xorl %eax, %eax # sched: [1:0.33]
 ; SANDY-NEXT:    vtestps %ymm1, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    setb %al # sched: [1:0.50]
@@ -4889,7 +4889,7 @@ define i32 @test_testps_ymm(<8 x float>
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_testps_ymm:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; HASWELL-NEXT:    vtestps %ymm1, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    setb %al # sched: [1:0.50]
@@ -4899,7 +4899,7 @@ define i32 @test_testps_ymm(<8 x float>
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_testps_ymm:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; BROADWELL-NEXT:    vtestps %ymm1, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    setb %al # sched: [1:0.50]
@@ -4909,7 +4909,7 @@ define i32 @test_testps_ymm(<8 x float>
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_testps_ymm:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; SKYLAKE-NEXT:    vtestps %ymm1, %ymm0 # sched: [2:1.00]
 ; SKYLAKE-NEXT:    setb %al # sched: [1:0.50]
@@ -4919,7 +4919,7 @@ define i32 @test_testps_ymm(<8 x float>
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_testps_ymm:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; SKX-NEXT:    vtestps %ymm1, %ymm0 # sched: [2:1.00]
 ; SKX-NEXT:    setb %al # sched: [1:0.50]
@@ -4929,7 +4929,7 @@ define i32 @test_testps_ymm(<8 x float>
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_testps_ymm:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    xorl %eax, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    vtestps %ymm1, %ymm0 # sched: [4:2.00]
 ; BTVER2-NEXT:    setb %al # sched: [1:0.50]
@@ -4938,7 +4938,7 @@ define i32 @test_testps_ymm(<8 x float>
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_testps_ymm:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    xorl %eax, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    vtestps %ymm1, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    setb %al # sched: [1:0.25]
@@ -4956,56 +4956,56 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(
 
 define <4 x double> @test_unpckhpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_unpckhpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
 ; GENERIC-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:1.00]
 ; GENERIC-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_unpckhpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
 ; SANDY-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:1.00]
 ; SANDY-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_unpckhpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
 ; HASWELL-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [1:1.00]
 ; HASWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_unpckhpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
 ; BROADWELL-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_unpckhpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:1.00]
 ; SKYLAKE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_unpckhpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
 ; SKX-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:1.00]
 ; SKX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_unpckhpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50]
 ; BTVER2-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [6:1.00]
 ; BTVER2-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_unpckhpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -5019,49 +5019,49 @@ define <4 x double> @test_unpckhpd(<4 x
 
 define <8 x float> @test_unpckhps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) nounwind {
 ; GENERIC-LABEL: test_unpckhps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
 ; GENERIC-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_unpckhps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
 ; SANDY-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_unpckhps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
 ; HASWELL-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_unpckhps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
 ; BROADWELL-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_unpckhps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_unpckhps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
 ; SKX-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_unpckhps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.50]
 ; BTVER2-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_unpckhps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -5073,56 +5073,56 @@ define <8 x float> @test_unpckhps(<8 x f
 
 define <4 x double> @test_unpcklpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_unpcklpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
 ; GENERIC-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:1.00]
 ; GENERIC-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_unpcklpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
 ; SANDY-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:1.00]
 ; SANDY-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_unpcklpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
 ; HASWELL-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [1:1.00]
 ; HASWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_unpcklpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
 ; BROADWELL-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_unpcklpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:1.00]
 ; SKYLAKE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_unpcklpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
 ; SKX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:1.00]
 ; SKX-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_unpcklpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50]
 ; BTVER2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [6:1.00]
 ; BTVER2-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_unpcklpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -5136,49 +5136,49 @@ define <4 x double> @test_unpcklpd(<4 x
 
 define <8 x float> @test_unpcklps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) nounwind {
 ; GENERIC-LABEL: test_unpcklps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
 ; GENERIC-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_unpcklps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
 ; SANDY-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_unpcklps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
 ; HASWELL-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [1:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_unpcklps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
 ; BROADWELL-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_unpcklps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
 ; SKYLAKE-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_unpcklps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
 ; SKX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_unpcklps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.50]
 ; BTVER2-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [6:1.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_unpcklps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -5190,56 +5190,56 @@ define <8 x float> @test_unpcklps(<8 x f
 
 define <4 x double> @test_xorpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) {
 ; GENERIC-LABEL: test_xorpd:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; GENERIC-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_xorpd:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_xorpd:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vxorpd (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_xorpd:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vxorpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_xorpd:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKYLAKE-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_xorpd:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKX-NEXT:    vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKX-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_xorpd:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vxorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_xorpd:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -5257,56 +5257,56 @@ define <4 x double> @test_xorpd(<4 x dou
 
 define <8 x float> @test_xorps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) {
 ; GENERIC-LABEL: test_xorps:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vxorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; GENERIC-NEXT:    vxorps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; GENERIC-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_xorps:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vxorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vxorps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_xorps:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vxorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vxorps (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
 ; HASWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_xorps:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vxorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
 ; BROADWELL-NEXT:    vxorps (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
 ; BROADWELL-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_xorps:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKYLAKE-NEXT:    vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKYLAKE-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_xorps:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
 ; SKX-NEXT:    vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; SKX-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_xorps:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    vxorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
 ; BTVER2-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_xorps:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -5324,42 +5324,42 @@ define <8 x float> @test_xorps(<8 x floa
 
 define void @test_zeroall() {
 ; GENERIC-LABEL: test_zeroall:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vzeroall
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_zeroall:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vzeroall
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_zeroall:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vzeroall # sched: [16:16.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_zeroall:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vzeroall # sched: [16:16.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_zeroall:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vzeroall # sched: [16:4.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_zeroall:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vzeroall # sched: [16:4.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_zeroall:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vzeroall # sched: [90:?]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_zeroall:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vzeroall # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   call void @llvm.x86.avx.vzeroall()
@@ -5369,42 +5369,42 @@ declare void @llvm.x86.avx.vzeroall() no
 
 define void @test_zeroupper() {
 ; GENERIC-LABEL: test_zeroupper:
-; GENERIC:       # BB#0:
+; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    vzeroupper
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; SANDY-LABEL: test_zeroupper:
-; SANDY:       # BB#0:
+; SANDY:       # %bb.0:
 ; SANDY-NEXT:    vzeroupper
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_zeroupper:
-; HASWELL:       # BB#0:
+; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [2:1.00]
 ;
 ; BROADWELL-LABEL: test_zeroupper:
-; BROADWELL:       # BB#0:
+; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_zeroupper:
-; SKYLAKE:       # BB#0:
+; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_zeroupper:
-; SKX:       # BB#0:
+; SKX:       # %bb.0:
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; BTVER2-LABEL: test_zeroupper:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    vzeroupper # sched: [46:?]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_zeroupper:
-; ZNVER1:       # BB#0:
+; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   call void @llvm.x86.avx.vzeroupper()

Modified: llvm/trunk/test/CodeGen/X86/avx-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-select.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-select.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-select.ll Mon Dec  4 09:18:51 2017
@@ -4,22 +4,22 @@
 
 define <8 x i32> @select00(i32 %a, <8 x i32> %b) nounwind {
 ; X86-LABEL: select00:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    cmpl $255, {{[0-9]+}}(%esp)
 ; X86-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    je .LBB0_2
-; X86-NEXT:  # BB#1:
+; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    vmovaps %ymm0, %ymm1
 ; X86-NEXT:  .LBB0_2:
 ; X86-NEXT:    vxorps %ymm1, %ymm0, %ymm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select00:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpl $255, %edi
 ; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    je .LBB0_2
-; X64-NEXT:  # BB#1:
+; X64-NEXT:  # %bb.1:
 ; X64-NEXT:    vmovaps %ymm0, %ymm1
 ; X64-NEXT:  .LBB0_2:
 ; X64-NEXT:    vxorps %ymm1, %ymm0, %ymm0
@@ -32,22 +32,22 @@ define <8 x i32> @select00(i32 %a, <8 x
 
 define <4 x i64> @select01(i32 %a, <4 x i64> %b) nounwind {
 ; X86-LABEL: select01:
-; X86:       # BB#0:
+; X86:       # %bb.0:
 ; X86-NEXT:    cmpl $255, {{[0-9]+}}(%esp)
 ; X86-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X86-NEXT:    je .LBB1_2
-; X86-NEXT:  # BB#1:
+; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    vmovaps %ymm0, %ymm1
 ; X86-NEXT:  .LBB1_2:
 ; X86-NEXT:    vxorps %ymm1, %ymm0, %ymm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select01:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    cmpl $255, %edi
 ; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    je .LBB1_2
-; X64-NEXT:  # BB#1:
+; X64-NEXT:  # %bb.1:
 ; X64-NEXT:    vmovaps %ymm0, %ymm1
 ; X64-NEXT:  .LBB1_2:
 ; X64-NEXT:    vxorps %ymm1, %ymm0, %ymm0

Modified: llvm/trunk/test/CodeGen/X86/avx-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-shift.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-shift.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 ;;; Shift left
 define <8 x i32> @vshift00(<8 x i32> %a) {
 ; CHECK-LABEL: vshift00:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpslld $2, %xmm0, %xmm1
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpslld $2, %xmm0, %xmm0
@@ -16,7 +16,7 @@ define <8 x i32> @vshift00(<8 x i32> %a)
 
 define <16 x i16> @vshift01(<16 x i16> %a) {
 ; CHECK-LABEL: vshift01:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsllw $2, %xmm0, %xmm1
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpsllw $2, %xmm0, %xmm0
@@ -28,7 +28,7 @@ define <16 x i16> @vshift01(<16 x i16> %
 
 define <4 x i64> @vshift02(<4 x i64> %a) {
 ; CHECK-LABEL: vshift02:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsllq $2, %xmm0, %xmm1
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpsllq $2, %xmm0, %xmm0
@@ -41,7 +41,7 @@ define <4 x i64> @vshift02(<4 x i64> %a)
 ;;; Logical Shift right
 define <8 x i32> @vshift03(<8 x i32> %a) {
 ; CHECK-LABEL: vshift03:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrld $2, %xmm0, %xmm1
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpsrld $2, %xmm0, %xmm0
@@ -53,7 +53,7 @@ define <8 x i32> @vshift03(<8 x i32> %a)
 
 define <16 x i16> @vshift04(<16 x i16> %a) {
 ; CHECK-LABEL: vshift04:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlw $2, %xmm0, %xmm1
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpsrlw $2, %xmm0, %xmm0
@@ -65,7 +65,7 @@ define <16 x i16> @vshift04(<16 x i16> %
 
 define <4 x i64> @vshift05(<4 x i64> %a) {
 ; CHECK-LABEL: vshift05:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlq $2, %xmm0, %xmm1
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpsrlq $2, %xmm0, %xmm0
@@ -78,7 +78,7 @@ define <4 x i64> @vshift05(<4 x i64> %a)
 ;;; Arithmetic Shift right
 define <8 x i32> @vshift06(<8 x i32> %a) {
 ; CHECK-LABEL: vshift06:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrad $2, %xmm0, %xmm1
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpsrad $2, %xmm0, %xmm0
@@ -90,7 +90,7 @@ define <8 x i32> @vshift06(<8 x i32> %a)
 
 define <16 x i16> @vshift07(<16 x i16> %a) {
 ; CHECK-LABEL: vshift07:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsraw $2, %xmm0, %xmm1
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpsraw $2, %xmm0, %xmm0
@@ -102,7 +102,7 @@ define <16 x i16> @vshift07(<16 x i16> %
 
 define <32 x i8> @vshift09(<32 x i8> %a) {
 ; CHECK-LABEL: vshift09:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vpsrlw $2, %xmm1, %xmm1
 ; CHECK-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
@@ -122,7 +122,7 @@ define <32 x i8> @vshift09(<32 x i8> %a)
 
 define <32 x i8> @vshift10(<32 x i8> %a) {
 ; CHECK-LABEL: vshift10:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpcmpgtb %xmm1, %xmm2, %xmm1
@@ -135,7 +135,7 @@ define <32 x i8> @vshift10(<32 x i8> %a)
 
 define <32 x i8> @vshift11(<32 x i8> %a) {
 ; CHECK-LABEL: vshift11:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vpsrlw $2, %xmm1, %xmm1
 ; CHECK-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
@@ -150,7 +150,7 @@ define <32 x i8> @vshift11(<32 x i8> %a)
 
 define <32 x i8> @vshift12(<32 x i8> %a) {
 ; CHECK-LABEL: vshift12:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vpsllw $2, %xmm1, %xmm1
 ; CHECK-NEXT:    vmovdqa {{.*#+}} xmm2 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
@@ -166,7 +166,7 @@ define <32 x i8> @vshift12(<32 x i8> %a)
 ;;; Support variable shifts
 define <8 x i32> @vshift08(<8 x i32> %a)  {
 ; CHECK-LABEL: vshift08:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpslld $23, %xmm0, %xmm1
 ; CHECK-NEXT:    vmovdqa {{.*#+}} xmm2 = [1065353216,1065353216,1065353216,1065353216]
 ; CHECK-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
@@ -184,7 +184,7 @@ define <8 x i32> @vshift08(<8 x i32> %a)
 ; PR15141
 define <4 x i32> @vshift13(<4 x i32> %in) {
 ; CHECK-LABEL: vshift13:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %T = shl <4 x i32> %in, <i32 0, i32 1, i32 2, i32 4>
@@ -194,7 +194,7 @@ define <4 x i32> @vshift13(<4 x i32> %in
 ;;; Uses shifts for sign extension
 define <16 x i16> @sext_v16i16(<16 x i16> %a)  {
 ; CHECK-LABEL: sext_v16i16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsllw $8, %xmm0, %xmm1
 ; CHECK-NEXT:    vpsraw $8, %xmm1, %xmm1
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -209,7 +209,7 @@ define <16 x i16> @sext_v16i16(<16 x i16
 
 define <8 x i32> @sext_v8i32(<8 x i32> %a)  {
 ; CHECK-LABEL: sext_v8i32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpslld $16, %xmm0, %xmm1
 ; CHECK-NEXT:    vpsrad $16, %xmm1, %xmm1
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/avx-shuffle-x86_32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-shuffle-x86_32.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-shuffle-x86_32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-shuffle-x86_32.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 ; Avoid unnecessary vinsertf128
 define <4 x i64> @test1(<4 x i64> %a) nounwind {
 ; CHECK-LABEL: test1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; CHECK-NEXT:    retl
@@ -14,7 +14,7 @@ define <4 x i64> @test1(<4 x i64> %a) no
 
 define <8 x i16> @test2(<4 x i16>* %v) nounwind {
 ; CHECK-LABEL: test2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    retl

Modified: llvm/trunk/test/CodeGen/X86/avx-splat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-splat.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-splat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-splat.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <32 x i8> @funcA(<32 x i8> %a) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: funcA:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5]
 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -14,7 +14,7 @@ entry:
 
 define <16 x i16> @funcB(<16 x i16> %a) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: funcB:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
 ; CHECK-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -26,7 +26,7 @@ entry:
 
 define <4 x i64> @funcC(i64 %q) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: funcC:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmovq %rdi, %xmm0
 ; CHECK-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -41,7 +41,7 @@ entry:
 
 define <4 x double> @funcD(double %q) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: funcD:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -58,12 +58,12 @@ entry:
 ;
 define <8 x float> @funcE() nounwind {
 ; CHECK-LABEL: funcE:
-; CHECK:       # BB#0: # %for_exit499
+; CHECK:       # %bb.0: # %for_exit499
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    # implicit-def: %ymm0
 ; CHECK-NEXT:    jne .LBB4_2
-; CHECK-NEXT:  # BB#1: # %load.i1247
+; CHECK-NEXT:  # %bb.1: # %load.i1247
 ; CHECK-NEXT:    pushq %rbp
 ; CHECK-NEXT:    movq %rsp, %rbp
 ; CHECK-NEXT:    andq $-32, %rsp
@@ -99,7 +99,7 @@ __load_and_broadcast_32.exit1249:
 
 define <8 x float> @funcF(i32 %val) nounwind {
 ; CHECK-LABEL: funcF:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovd %edi, %xmm0
 ; CHECK-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,0]
 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -112,7 +112,7 @@ define <8 x float> @funcF(i32 %val) noun
 
 define <8 x float> @funcG(<8 x float> %a) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: funcG:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
@@ -123,7 +123,7 @@ entry:
 
 define <8 x float> @funcH(<8 x float> %a) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: funcH:
-; CHECK:       # BB#0: # %entry
+; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[1,1,1,1,5,5,5,5]
 ; CHECK-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; CHECK-NEXT:    retq
@@ -134,7 +134,7 @@ entry:
 
 define <2 x double> @splat_load_2f64_11(<2 x double>* %ptr) {
 ; CHECK-LABEL: splat_load_2f64_11:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
 ; CHECK-NEXT:    retq
   %x = load <2 x double>, <2 x double>* %ptr
@@ -144,7 +144,7 @@ define <2 x double> @splat_load_2f64_11(
 
 define <4 x double> @splat_load_4f64_2222(<4 x double>* %ptr) {
 ; CHECK-LABEL: splat_load_4f64_2222:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vbroadcastsd 16(%rdi), %ymm0
 ; CHECK-NEXT:    retq
   %x = load <4 x double>, <4 x double>* %ptr
@@ -154,7 +154,7 @@ define <4 x double> @splat_load_4f64_222
 
 define <4 x float> @splat_load_4f32_0000(<4 x float>* %ptr) {
 ; CHECK-LABEL: splat_load_4f32_0000:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vbroadcastss (%rdi), %xmm0
 ; CHECK-NEXT:    retq
   %x = load <4 x float>, <4 x float>* %ptr
@@ -164,7 +164,7 @@ define <4 x float> @splat_load_4f32_0000
 
 define <8 x float> @splat_load_8f32_77777777(<8 x float>* %ptr) {
 ; CHECK-LABEL: splat_load_8f32_77777777:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vbroadcastss 28(%rdi), %ymm0
 ; CHECK-NEXT:    retq
   %x = load <8 x float>, <8 x float>* %ptr

Modified: llvm/trunk/test/CodeGen/X86/avx-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-trunc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-trunc.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <4 x i32> @trunc_64_32(<4 x i64> %A) nounwind uwtable readnone ssp{
 ; CHECK-LABEL: trunc_64_32:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; CHECK-NEXT:    vzeroupper
@@ -14,7 +14,7 @@ define <4 x i32> @trunc_64_32(<4 x i64>
 
 define <8 x i16> @trunc_32_16(<8 x i32> %A) nounwind uwtable readnone ssp{
 ; CHECK-LABEL: trunc_32_16:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; CHECK-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
@@ -28,7 +28,7 @@ define <8 x i16> @trunc_32_16(<8 x i32>
 
 define <16 x i8> @trunc_16_8(<16 x i16> %A) nounwind uwtable readnone ssp{
 ; CHECK-LABEL: trunc_16_8:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; CHECK-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; CHECK-NEXT:    vpshufb %xmm2, %xmm1, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/avx-unpack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-unpack.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-unpack.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-unpack.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <8 x float> @unpackhips(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpackhips:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
 ; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -12,7 +12,7 @@ define <8 x float> @unpackhips(<8 x floa
 
 define <4 x double> @unpackhipd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpackhipd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
 ; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -21,7 +21,7 @@ define <4 x double> @unpackhipd(<4 x dou
 
 define <8 x float> @unpacklops(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpacklops:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -30,7 +30,7 @@ define <8 x float> @unpacklops(<8 x floa
 
 define <4 x double> @unpacklopd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpacklopd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
 ; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -39,7 +39,7 @@ define <4 x double> @unpacklopd(<4 x dou
 
 define <8 x float> @unpacklops_not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpacklops_not:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; CHECK-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
@@ -50,7 +50,7 @@ define <8 x float> @unpacklops_not(<8 x
 
 define <4 x double> @unpacklopd_not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpacklopd_not:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
 ; CHECK-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
@@ -61,7 +61,7 @@ define <4 x double> @unpacklopd_not(<4 x
 
 define <8 x float> @unpackhips_not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpackhips_not:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[u,2,u,3,u,4,u,5]
 ; CHECK-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,u,3,u,4,u,5,u]
 ; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
@@ -72,7 +72,7 @@ define <8 x float> @unpackhips_not(<8 x
 
 define <4 x double> @unpackhipd_not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpackhipd_not:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
@@ -89,7 +89,7 @@ define <4 x double> @unpackhipd_not(<4 x
 
 define <8 x i32> @unpackhips1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpackhips1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
 ; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -98,7 +98,7 @@ define <8 x i32> @unpackhips1(<8 x i32>
 
 define <8 x i32> @unpackhips2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpackhips2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %ymm0
 ; CHECK-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
 ; CHECK-NEXT:    retq
@@ -110,7 +110,7 @@ define <8 x i32> @unpackhips2(<8 x i32>*
 
 define <4 x i64> @unpackhipd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpackhipd1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
 ; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -119,7 +119,7 @@ define <4 x i64> @unpackhipd1(<4 x i64>
 
 define <4 x i64> @unpackhipd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpackhipd2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %ymm0
 ; CHECK-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
 ; CHECK-NEXT:    retq
@@ -131,7 +131,7 @@ define <4 x i64> @unpackhipd2(<4 x i64>*
 
 define <8 x i32> @unpacklops1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpacklops1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -140,7 +140,7 @@ define <8 x i32> @unpacklops1(<8 x i32>
 
 define <8 x i32> @unpacklops2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpacklops2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %ymm0
 ; CHECK-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
 ; CHECK-NEXT:    retq
@@ -152,7 +152,7 @@ define <8 x i32> @unpacklops2(<8 x i32>*
 
 define <4 x i64> @unpacklopd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpacklopd1:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
 ; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -161,7 +161,7 @@ define <4 x i64> @unpacklopd1(<4 x i64>
 
 define <4 x i64> @unpacklopd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpacklopd2:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovaps (%rdi), %ymm0
 ; CHECK-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
 ; CHECK-NEXT:    retq
@@ -173,7 +173,7 @@ define <4 x i64> @unpacklopd2(<4 x i64>*
 
 define <16 x i16> @unpackhwd_undef(<16 x i16> %src1) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpackhwd_undef:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
@@ -185,7 +185,7 @@ define <16 x i16> @unpackhwd_undef(<16 x
 
 define <16 x i16> @unpacklwd_undef(<16 x i16> %src1) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpacklwd_undef:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3]
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -197,7 +197,7 @@ define <16 x i16> @unpacklwd_undef(<16 x
 
 define <32 x i8> @unpackhbw_undef(<32 x i8> %src1, <32 x i8> %src2) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpackhbw_undef:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
@@ -209,7 +209,7 @@ define <32 x i8> @unpackhbw_undef(<32 x
 
 define <32 x i8> @unpacklbw_undef(<32 x i8> %src1) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: unpacklbw_undef:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]

Modified: llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll Mon Dec  4 09:18:51 2017
@@ -4,7 +4,7 @@
 
 define <4 x i64> @A(i64* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: A:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl (%eax), %ecx
 ; X32-NEXT:    movl 4(%eax), %eax
@@ -16,7 +16,7 @@ define <4 x i64> @A(i64* %ptr) nounwind
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: A:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -30,7 +30,7 @@ entry:
 
 define <4 x i64> @A2(i64* %ptr, i64* %ptr2) nounwind uwtable readnone ssp {
 ; X32-LABEL: A2:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl (%ecx), %edx
@@ -45,7 +45,7 @@ define <4 x i64> @A2(i64* %ptr, i64* %pt
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: A2:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movq (%rdi), %rax
 ; X64-NEXT:    vmovq %rax, %xmm0
 ; X64-NEXT:    movq %rax, (%rsi)
@@ -64,13 +64,13 @@ entry:
 
 define <8 x i32> @B(i32* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: B:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: B:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss (%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -84,13 +84,13 @@ entry:
 
 define <8 x i32> @B2(i32* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: B2:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: B2:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss (%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -108,7 +108,7 @@ entry:
 
 define <8 x i32> @B3(i32* %ptr, i32* %ptr2) nounwind uwtable readnone ssp {
 ; X32-LABEL: B3:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl (%ecx), %ecx
@@ -119,7 +119,7 @@ define <8 x i32> @B3(i32* %ptr, i32* %pt
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: B3:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    vmovd %eax, %xmm0
 ; X64-NEXT:    movl %eax, (%rsi)
@@ -142,13 +142,13 @@ entry:
 
 define <4 x double> @C(double* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: C:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastsd (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: C:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -162,7 +162,7 @@ entry:
 
 define <4 x double> @C2(double* %ptr, double* %ptr2) nounwind uwtable readnone ssp {
 ; X32-LABEL: C2:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -172,7 +172,7 @@ define <4 x double> @C2(double* %ptr, do
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: C2:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    vmovsd %xmm0, (%rsi)
 ; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
@@ -190,13 +190,13 @@ entry:
 
 define <8 x float> @D(float* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: D:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: D:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss (%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -210,13 +210,13 @@ entry:
 
 define <8 x float> @D2(float* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: D2:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: D2:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss (%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -234,7 +234,7 @@ entry:
 
 define <8 x float> @D3(float* %ptr, float* %ptr2) nounwind uwtable readnone ssp {
 ; X32-LABEL: D3:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -244,7 +244,7 @@ define <8 x float> @D3(float* %ptr, floa
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: D3:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    vmovss %xmm0, (%rsi)
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
@@ -268,13 +268,13 @@ entry:
 
 define <4 x float> @e(float* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: e:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss (%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: e:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss (%rdi), %xmm0
 ; X64-NEXT:    retq
 entry:
@@ -288,7 +288,7 @@ entry:
 
 define <4 x float> @e2(float* %ptr, float* %ptr2) nounwind uwtable readnone ssp {
 ; X32-LABEL: e2:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -297,7 +297,7 @@ define <4 x float> @e2(float* %ptr, floa
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: e2:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    vmovss %xmm0, (%rsi)
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
@@ -315,12 +315,12 @@ entry:
 ; Don't broadcast constants on pre-AVX2 hardware.
 define <4 x float> @_e2(float* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: _e2:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    vmovaps {{.*#+}} xmm0 = [-7.812500e-03,-7.812500e-03,-7.812500e-03,-7.812500e-03]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: _e2:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [-7.812500e-03,-7.812500e-03,-7.812500e-03,-7.812500e-03]
 ; X64-NEXT:    retq
 entry:
@@ -334,13 +334,13 @@ entry:
 
 define <4 x i32> @F(i32* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: F:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss (%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: F:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss (%rdi), %xmm0
 ; X64-NEXT:    retq
 entry:
@@ -354,7 +354,7 @@ entry:
 
 define <4 x i32> @F2(i32* %ptr, i32* %ptr2) nounwind uwtable readnone ssp {
 ; X32-LABEL: F2:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl (%ecx), %ecx
@@ -364,7 +364,7 @@ define <4 x i32> @F2(i32* %ptr, i32* %pt
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: F2:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    movl %eax, (%rsi)
 ; X64-NEXT:    vmovd %eax, %xmm0
@@ -384,13 +384,13 @@ entry:
 
 define <4 x i32> @load_splat_4i32_4i32_1111(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_4i32_4i32_1111:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = mem[1,1,1,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_4i32_4i32_1111:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = mem[1,1,1,1]
 ; X64-NEXT:    retq
 entry:
@@ -401,13 +401,13 @@ entry:
 
 define <8 x i32> @load_splat_8i32_4i32_33333333(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_8i32_4i32_33333333:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss 12(%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_8i32_4i32_33333333:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss 12(%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -418,13 +418,13 @@ entry:
 
 define <8 x i32> @load_splat_8i32_8i32_55555555(<8 x i32>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_8i32_8i32_55555555:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss 20(%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_8i32_8i32_55555555:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss 20(%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -435,13 +435,13 @@ entry:
 
 define <4 x float> @load_splat_4f32_4f32_1111(<4 x float>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_4f32_4f32_1111:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss 4(%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_4f32_4f32_1111:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss 4(%rdi), %xmm0
 ; X64-NEXT:    retq
 entry:
@@ -452,13 +452,13 @@ entry:
 
 define <8 x float> @load_splat_8f32_4f32_33333333(<4 x float>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_8f32_4f32_33333333:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss 12(%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_8f32_4f32_33333333:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss 12(%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -469,13 +469,13 @@ entry:
 
 define <8 x float> @load_splat_8f32_8f32_55555555(<8 x float>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_8f32_8f32_55555555:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss 20(%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_8f32_8f32_55555555:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss 20(%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -486,13 +486,13 @@ entry:
 
 define <2 x i64> @load_splat_2i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_2i64_2i64_1111:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_2i64_2i64_1111:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
 ; X64-NEXT:    retq
 entry:
@@ -503,13 +503,13 @@ entry:
 
 define <4 x i64> @load_splat_4i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_4i64_2i64_1111:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastsd 8(%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_4i64_2i64_1111:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastsd 8(%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -520,13 +520,13 @@ entry:
 
 define <4 x i64> @load_splat_4i64_4i64_2222(<4 x i64>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_4i64_4i64_2222:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastsd 16(%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_4i64_4i64_2222:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastsd 16(%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -537,13 +537,13 @@ entry:
 
 define <2 x double> @load_splat_2f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_2f64_2f64_1111:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_2f64_2f64_1111:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
 ; X64-NEXT:    retq
 entry:
@@ -554,13 +554,13 @@ entry:
 
 define <4 x double> @load_splat_4f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_4f64_2f64_1111:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastsd 8(%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_4f64_2f64_1111:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastsd 8(%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -571,13 +571,13 @@ entry:
 
 define <4 x double> @load_splat_4f64_4f64_2222(<4 x double>* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: load_splat_4f64_4f64_2222:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastsd 16(%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: load_splat_4f64_4f64_2222:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastsd 16(%rdi), %ymm0
 ; X64-NEXT:    retq
 entry:
@@ -590,7 +590,7 @@ entry:
 
 define <2 x i64> @G(i64* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: G:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl (%eax), %ecx
 ; X32-NEXT:    movl 4(%eax), %eax
@@ -601,7 +601,7 @@ define <2 x i64> @G(i64* %ptr) nounwind
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: G:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
 ; X64-NEXT:    retq
@@ -614,7 +614,7 @@ entry:
 
 define <2 x i64> @G2(i64* %ptr, i64* %ptr2) nounwind uwtable readnone ssp {
 ; X32-LABEL: G2:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl (%ecx), %edx
@@ -628,7 +628,7 @@ define <2 x i64> @G2(i64* %ptr, i64* %pt
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: G2:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movq (%rdi), %rax
 ; X64-NEXT:    movq %rax, (%rsi)
 ; X64-NEXT:    vmovq %rax, %xmm0
@@ -644,12 +644,12 @@ entry:
 
 define <4 x i32> @H(<4 x i32> %a) {
 ; X32-LABEL: H:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: H:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; X64-NEXT:    retq
 entry:
@@ -659,13 +659,13 @@ entry:
 
 define <2 x double> @I(double* %ptr) nounwind uwtable readnone ssp {
 ; X32-LABEL: I:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: I:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
 ; X64-NEXT:    retq
 entry:
@@ -677,7 +677,7 @@ entry:
 
 define <2 x double> @I2(double* %ptr, double* %ptr2) nounwind uwtable readnone ssp {
 ; X32-LABEL: I2:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -686,7 +686,7 @@ define <2 x double> @I2(double* %ptr, do
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: I2:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    vmovsd %xmm0, (%rsi)
 ; X64-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
@@ -701,7 +701,7 @@ entry:
 
 define <4 x float> @_RR(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
 ; X32-LABEL: _RR:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vbroadcastss (%ecx), %xmm0
@@ -710,7 +710,7 @@ define <4 x float> @_RR(float* %ptr, i32
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: _RR:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss (%rdi), %xmm0
 ; X64-NEXT:    movl (%rsi), %eax
 ; X64-NEXT:    movl %eax, (%rax)
@@ -729,13 +729,13 @@ entry:
 
 define <4 x float> @_RR2(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
 ; X32-LABEL: _RR2:
-; X32:       ## BB#0: ## %entry
+; X32:       ## %bb.0: ## %entry
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss (%eax), %xmm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: _RR2:
-; X64:       ## BB#0: ## %entry
+; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    vbroadcastss (%rdi), %xmm0
 ; X64-NEXT:    retq
 entry:
@@ -751,13 +751,13 @@ entry:
 
 define <8 x float> @splat_concat1(float* %p) {
 ; X32-LABEL: splat_concat1:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: splat_concat1:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vbroadcastss (%rdi), %ymm0
 ; X64-NEXT:    retq
   %1 = load float, float* %p, align 4
@@ -771,13 +771,13 @@ define <8 x float> @splat_concat1(float*
 
 define <8 x float> @splat_concat2(float* %p) {
 ; X32-LABEL: splat_concat2:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastss (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: splat_concat2:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vbroadcastss (%rdi), %ymm0
 ; X64-NEXT:    retq
   %1 = load float, float* %p, align 4
@@ -795,13 +795,13 @@ define <8 x float> @splat_concat2(float*
 
 define <4 x double> @splat_concat3(double* %p) {
 ; X32-LABEL: splat_concat3:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastsd (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: splat_concat3:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; X64-NEXT:    retq
   %1 = load double, double* %p, align 8
@@ -813,13 +813,13 @@ define <4 x double> @splat_concat3(doubl
 
 define <4 x double> @splat_concat4(double* %p) {
 ; X32-LABEL: splat_concat4:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastsd (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: splat_concat4:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; X64-NEXT:    retq
   %1 = load double, double* %p, align 8
@@ -834,13 +834,13 @@ define <4 x double> @splat_concat4(doubl
 ; PR34041
 define <4 x double> @broadcast_shuffle_1000(double* %p) {
 ; X32-LABEL: broadcast_shuffle_1000:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastsd (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: broadcast_shuffle_1000:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; X64-NEXT:    retq
   %1 = load double, double* %p
@@ -851,13 +851,13 @@ define <4 x double> @broadcast_shuffle_1
 
 define <4 x double> @broadcast_shuffle1032(double* %p) {
 ; X32-LABEL: broadcast_shuffle1032:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastsd (%eax), %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: broadcast_shuffle1032:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    vbroadcastsd (%rdi), %ymm0
 ; X64-NEXT:    retq
   %1 = load double, double* %p
@@ -872,7 +872,7 @@ define <4 x double> @broadcast_shuffle10
 ;
 define float @broadcast_lifetime() nounwind {
 ; X32-LABEL: broadcast_lifetime:
-; X32:       ## BB#0:
+; X32:       ## %bb.0:
 ; X32-NEXT:    pushl %esi
 ; X32-NEXT:    subl $56, %esp
 ; X32-NEXT:    leal {{[0-9]+}}(%esp), %esi
@@ -894,7 +894,7 @@ define float @broadcast_lifetime() nounw
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: broadcast_lifetime:
-; X64:       ## BB#0:
+; X64:       ## %bb.0:
 ; X64-NEXT:    subq $40, %rsp
 ; X64-NEXT:    movq %rsp, %rdi
 ; X64-NEXT:    callq _gfunc

Modified: llvm/trunk/test/CodeGen/X86/avx-vbroadcastf128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vbroadcastf128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vbroadcastf128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vbroadcastf128.ll Mon Dec  4 09:18:51 2017
@@ -4,13 +4,13 @@
 
 define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
 ; X32-LABEL: test_broadcast_2f64_4f64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_2f64_4f64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X64-NEXT:    retq
  %1 = load <2 x double>, <2 x double> *%p
@@ -20,13 +20,13 @@ define <4 x double> @test_broadcast_2f64
 
 define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
 ; X32-LABEL: test_broadcast_2i64_4i64:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_2i64_4i64:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X64-NEXT:    retq
  %1 = load <2 x i64>, <2 x i64> *%p
@@ -36,13 +36,13 @@ define <4 x i64> @test_broadcast_2i64_4i
 
 define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
 ; X32-LABEL: test_broadcast_4f32_8f32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_4f32_8f32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X64-NEXT:    retq
  %1 = load <4 x float>, <4 x float> *%p
@@ -52,13 +52,13 @@ define <8 x float> @test_broadcast_4f32_
 
 define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
 ; X32-LABEL: test_broadcast_4i32_8i32:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_4i32_8i32:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X64-NEXT:    retq
  %1 = load <4 x i32>, <4 x i32> *%p
@@ -68,13 +68,13 @@ define <8 x i32> @test_broadcast_4i32_8i
 
 define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
 ; X32-LABEL: test_broadcast_8i16_16i16:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_8i16_16i16:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X64-NEXT:    retq
  %1 = load <8 x i16>, <8 x i16> *%p
@@ -84,13 +84,13 @@ define <16 x i16> @test_broadcast_8i16_1
 
 define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
 ; X32-LABEL: test_broadcast_16i8_32i8:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_16i8_32i8:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; X64-NEXT:    retq
  %1 = load <16 x i8>, <16 x i8> *%p
@@ -100,7 +100,7 @@ define <32 x i8> @test_broadcast_16i8_32
 
 define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x double>* %p1) {
 ; X32-LABEL: test_broadcast_2f64_4f64_reuse:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovaps (%ecx), %xmm1
@@ -109,7 +109,7 @@ define <4 x double> @test_broadcast_2f64
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_2f64_4f64_reuse:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps (%rdi), %xmm1
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm0
 ; X64-NEXT:    vmovaps %xmm1, (%rsi)
@@ -122,7 +122,7 @@ define <4 x double> @test_broadcast_2f64
 
 define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1) {
 ; X32-LABEL: test_broadcast_2i64_4i64_reuse:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovaps (%ecx), %xmm1
@@ -131,7 +131,7 @@ define <4 x i64> @test_broadcast_2i64_4i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_2i64_4i64_reuse:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps (%rdi), %xmm1
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm0
 ; X64-NEXT:    vmovaps %xmm1, (%rsi)
@@ -144,7 +144,7 @@ define <4 x i64> @test_broadcast_2i64_4i
 
 define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>* %p1) {
 ; X32-LABEL: test_broadcast_4f32_8f32_reuse:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovaps (%ecx), %xmm1
@@ -153,7 +153,7 @@ define <8 x float> @test_broadcast_4f32_
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_4f32_8f32_reuse:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps (%rdi), %xmm1
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm0
 ; X64-NEXT:    vmovaps %xmm1, (%rsi)
@@ -166,7 +166,7 @@ define <8 x float> @test_broadcast_4f32_
 
 define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1) {
 ; X32-LABEL: test_broadcast_4i32_8i32_reuse:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovaps (%ecx), %xmm1
@@ -175,7 +175,7 @@ define <8 x i32> @test_broadcast_4i32_8i
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_4i32_8i32_reuse:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps (%rdi), %xmm1
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm0
 ; X64-NEXT:    vmovaps %xmm1, (%rsi)
@@ -188,7 +188,7 @@ define <8 x i32> @test_broadcast_4i32_8i
 
 define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p1) nounwind {
 ; X32-LABEL: test_broadcast_8i16_16i16_reuse:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovaps (%ecx), %xmm1
@@ -197,7 +197,7 @@ define <16 x i16> @test_broadcast_8i16_1
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_8i16_16i16_reuse:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps (%rdi), %xmm1
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm0
 ; X64-NEXT:    vmovaps %xmm1, (%rsi)
@@ -210,7 +210,7 @@ define <16 x i16> @test_broadcast_8i16_1
 
 define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1) nounwind {
 ; X32-LABEL: test_broadcast_16i8_32i8_reuse:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovaps (%ecx), %xmm1
@@ -219,7 +219,7 @@ define <32 x i8> @test_broadcast_16i8_32
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_16i8_32i8_reuse:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps (%rdi), %xmm1
 ; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm0
 ; X64-NEXT:    vmovaps %xmm1, (%rsi)
@@ -232,7 +232,7 @@ define <32 x i8> @test_broadcast_16i8_32
 
 define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) {
 ; X32-LABEL: PR29088:
-; X32:       # BB#0:
+; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovaps (%ecx), %xmm0
@@ -242,7 +242,7 @@ define <8 x i32> @PR29088(<4 x i32>* %p0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: PR29088:
-; X64:       # BB#0:
+; X64:       # %bb.0:
 ; X64-NEXT:    vmovaps (%rdi), %xmm0
 ; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; X64-NEXT:    vmovaps %ymm1, (%rsi)

Modified: llvm/trunk/test/CodeGen/X86/avx-vextractf128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vextractf128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vextractf128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vextractf128.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: A:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -13,7 +13,7 @@ entry:
 
 define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: B:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; CHECK-NEXT:    retq
 entry:
@@ -23,7 +23,7 @@ entry:
 
 define void @t0(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
 ; CHECK-LABEL: t0:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rdi)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
@@ -36,7 +36,7 @@ entry:
 
 define void @t2(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
 ; CHECK-LABEL: t2:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rdi)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
@@ -49,7 +49,7 @@ entry:
 
 define void @t4(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
 ; CHECK-LABEL: t4:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rdi)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
@@ -63,7 +63,7 @@ entry:
 
 define void @t5(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
 ; CHECK-LABEL: t5:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
@@ -76,7 +76,7 @@ entry:
 
 define void @t6(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
 ; CHECK-LABEL: t6:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
@@ -89,7 +89,7 @@ entry:
 
 define void @t7(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
 ; CHECK-LABEL: t7:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
@@ -103,7 +103,7 @@ entry:
 
 define void @t8(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
 ; CHECK-LABEL: t8:
-; CHECK:       ## BB#0: ## %entry
+; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    vmovups %xmm0, (%rdi)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
@@ -118,7 +118,7 @@ entry:
 ; PR15462
 define void @t9(i64* %p) {
 ; CHECK-LABEL: t9:
-; CHECK:       ## BB#0:
+; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovups %ymm0, (%rdi)
 ; CHECK-NEXT:    vzeroupper

Modified: llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 
 define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: A:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3>
@@ -12,7 +12,7 @@ define <8 x float> @A(<8 x float> %a) no
 
 define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp {
 ; CHECK-LABEL: B:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 1>
@@ -24,7 +24,7 @@ declare <2 x double> @llvm.x86.sse2.min.
 
 define void @insert_crash() nounwind {
 ; CHECK-LABEL: insert_crash:
-; CHECK:       # BB#0: # %allocas
+; CHECK:       # %bb.0: # %allocas
 ; CHECK-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vminpd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vminsd %xmm0, %xmm0, %xmm0
@@ -49,7 +49,7 @@ allocas:
 
 define <4 x i32> @DAGCombineA(<4 x i32> %v1) nounwind readonly {
 ; CHECK-LABEL: DAGCombineA:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
   %t1 = shufflevector <4 x i32> %v1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <8 x i32> %t1, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -58,7 +58,7 @@ define <4 x i32> @DAGCombineA(<4 x i32>
 
 define <8 x i32> @DAGCombineB(<8 x i32> %v1, <8 x i32> %v2) nounwind readonly {
 ; CHECK-LABEL: DAGCombineB:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; CHECK-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
@@ -74,7 +74,7 @@ define <8 x i32> @DAGCombineB(<8 x i32>
 
 define <4 x double> @insert_undef_pd(<4 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: insert_undef_pd:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm0
 ; CHECK-NEXT:    retq
@@ -85,7 +85,7 @@ declare <4 x double> @llvm.x86.avx.vinse
 
 define <8 x float> @insert_undef_ps(<8 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: insert_undef_ps:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm0
 ; CHECK-NEXT:    retq
@@ -96,7 +96,7 @@ declare <8 x float> @llvm.x86.avx.vinser
 
 define <8 x i32> @insert_undef_si(<8 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: insert_undef_si:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm0
 ; CHECK-NEXT:    retq
@@ -108,7 +108,7 @@ declare <8 x i32> @llvm.x86.avx.vinsertf
 ; rdar://10643481
 define <8 x float> @vinsertf128_combine(float* nocapture %f) nounwind uwtable readonly ssp {
 ; CHECK-LABEL: vinsertf128_combine:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %add.ptr = getelementptr inbounds float, float* %f, i64 4
@@ -121,7 +121,7 @@ define <8 x float> @vinsertf128_combine(
 ; rdar://11076953
 define <8 x float> @vinsertf128_ucombine(float* nocapture %f) nounwind uwtable readonly ssp {
 ; CHECK-LABEL: vinsertf128_ucombine:
-; CHECK:       # BB#0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %add.ptr = getelementptr inbounds float, float* %f, i64 4

Modified: llvm/trunk/test/CodeGen/X86/avx-vpclmulqdq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vpclmulqdq.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vpclmulqdq.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vpclmulqdq.ll Mon Dec  4 09:18:51 2017
@@ -3,7 +3,7 @@
 ; Check for vpclmulqdq
 define <4 x i64> @test_x86_pclmulqdq(<4 x i64> %a0, <4 x i64> %a1) {
 ; AVX_VPCLMULQDQ-LABEL: test_x86_pclmulqdq:
-; AVX_VPCLMULQDQ:       # BB#0:
+; AVX_VPCLMULQDQ:       # %bb.0:
 ; AVX_VPCLMULQDQ-NEXT:    vpclmulqdq $17, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x44,0xc1,0x11]
 ; AVX_VPCLMULQDQ-NEXT:    retl # encoding: [0xc3]
   %res = call <4 x i64> @llvm.x86.pclmulqdq.256(<4 x i64> %a0, <4 x i64> %a1, i8 17)

Modified: llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll Mon Dec  4 09:18:51 2017
@@ -4,12 +4,12 @@
 
 define <8 x float> @shuffle_v8f32_45670123(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v8f32_45670123:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v8f32_45670123:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX2-NEXT:    retq
 entry:
@@ -19,12 +19,12 @@ entry:
 
 define <8 x float> @shuffle_v8f32_45670123_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v8f32_45670123_mem:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v8f32_45670123_mem:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = mem[2,3,0,1]
 ; AVX2-NEXT:    retq
 entry:
@@ -36,7 +36,7 @@ entry:
 
 define <8 x float> @shuffle_v8f32_0123cdef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: shuffle_v8f32_0123cdef:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; ALL-NEXT:    retq
 entry:
@@ -46,12 +46,12 @@ entry:
 
 define <8 x float> @shuffle_v8f32_01230123(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v8f32_01230123:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v8f32_01230123:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
 ; AVX2-NEXT:    retq
 entry:
@@ -61,12 +61,12 @@ entry:
 
 define <8 x float> @shuffle_v8f32_01230123_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v8f32_01230123_mem:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v8f32_01230123_mem:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = mem[0,1,0,1]
 ; AVX2-NEXT:    retq
 entry:
@@ -78,12 +78,12 @@ entry:
 
 define <8 x float> @shuffle_v8f32_45674567(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v8f32_45674567:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v8f32_45674567:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX2-NEXT:    retq
 entry:
@@ -93,12 +93,12 @@ entry:
 
 define <8 x float> @shuffle_v8f32_45674567_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v8f32_45674567_mem:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v8f32_45674567_mem:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = mem[2,3,2,3]
 ; AVX2-NEXT:    retq
 entry:
@@ -110,12 +110,12 @@ entry:
 
 define <32 x i8> @shuffle_v32i8_2323(<32 x i8> %a, <32 x i8> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v32i8_2323:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_2323:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX2-NEXT:    retq
 entry:
@@ -125,7 +125,7 @@ entry:
 
 define <32 x i8> @shuffle_v32i8_2323_domain(<32 x i8> %a, <32 x i8> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v32i8_2323_domain:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
@@ -134,7 +134,7 @@ define <32 x i8> @shuffle_v32i8_2323_dom
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v32i8_2323_domain:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
@@ -148,7 +148,7 @@ entry:
 
 define <4 x i64> @shuffle_v4i64_6701(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: shuffle_v4i64_6701:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
 ; ALL-NEXT:    retq
 entry:
@@ -158,14 +158,14 @@ entry:
 
 define <4 x i64> @shuffle_v4i64_6701_domain(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v4i64_6701_domain:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v4i64_6701_domain:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
 ; AVX2-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
@@ -179,7 +179,7 @@ entry:
 
 define <8 x i32> @shuffle_v8i32_u5u7cdef(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v8i32_u5u7cdef:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
@@ -188,7 +188,7 @@ define <8 x i32> @shuffle_v8i32_u5u7cdef
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v8i32_u5u7cdef:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
 ; AVX2-NEXT:    vpsubd %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
@@ -202,14 +202,14 @@ entry:
 
 define <16 x i16> @shuffle_v16i16_4501(<16 x i16> %a, <16 x i16> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v16i16_4501:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v16i16_4501:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
 ; AVX2-NEXT:    vpsubw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
@@ -223,7 +223,7 @@ entry:
 
 define <16 x i16> @shuffle_v16i16_4501_mem(<16 x i16>* %a, <16 x i16>* %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v16i16_4501_mem:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsubw %xmm1, %xmm0, %xmm0
@@ -231,7 +231,7 @@ define <16 x i16> @shuffle_v16i16_4501_m
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v16i16_4501_mem:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsubw %ymm1, %ymm0, %ymm0
@@ -249,7 +249,7 @@ entry:
 
 define <8 x float> @shuffle_v8f32_uu67u9ub(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: shuffle_v8f32_uu67u9ub:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
 ; ALL-NEXT:    retq
 entry:
@@ -259,12 +259,12 @@ entry:
 
 define <8 x float> @shuffle_v8f32_uu67uu67(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v8f32_uu67uu67:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v8f32_uu67uu67:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-NEXT:    retq
 entry:
@@ -274,7 +274,7 @@ entry:
 
 define <8 x float> @shuffle_v8f32_uu67uuab(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: shuffle_v8f32_uu67uuab:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
 ; ALL-NEXT:    retq
 entry:
@@ -284,7 +284,7 @@ entry:
 
 define <8 x float> @shuffle_v8f32_uu67uuef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: shuffle_v8f32_uu67uuef:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; ALL-NEXT:    retq
 entry:
@@ -294,12 +294,12 @@ entry:
 
 define <8 x float> @shuffle_v8f32_uu674567(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v8f32_uu674567:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v8f32_uu674567:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-NEXT:    retq
 entry:
@@ -309,7 +309,7 @@ entry:
 
 define <8 x float> @shuffle_v8f32_uu6789ab(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: shuffle_v8f32_uu6789ab:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
 ; ALL-NEXT:    retq
 entry:
@@ -319,12 +319,12 @@ entry:
 
 define <8 x float> @shuffle_v8f32_4567uu67(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: shuffle_v8f32_4567uu67:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v8f32_4567uu67:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX2-NEXT:    retq
 entry:
@@ -334,7 +334,7 @@ entry:
 
 define <8 x float> @shuffle_v8f32_4567uuef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: shuffle_v8f32_4567uuef:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; ALL-NEXT:    retq
 entry:
@@ -346,7 +346,7 @@ entry:
 
 define <8 x float> @shuffle_v8f32_uu67ucuf(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: shuffle_v8f32_uu67ucuf:
-; ALL:       # BB#0: # %entry
+; ALL:       # %bb.0: # %entry
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; ALL-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,3,4,4,6,7]
 ; ALL-NEXT:    retq
@@ -362,7 +362,7 @@ entry:
 
 define <4 x double> @shuffle_v4f64_zz01(<4 x double> %a) {
 ; ALL-LABEL: shuffle_v4f64_zz01:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
 ; ALL-NEXT:    retq
   %s = shufflevector <4 x double> %a, <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
@@ -370,7 +370,7 @@ define <4 x double> @shuffle_v4f64_zz01(
 }
 define <4 x double> @shuffle_v4f64_zz01_optsize(<4 x double> %a) optsize {
 ; ALL-LABEL: shuffle_v4f64_zz01_optsize:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
 ; ALL-NEXT:    retq
   %s = shufflevector <4 x double> %a, <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
@@ -379,7 +379,7 @@ define <4 x double> @shuffle_v4f64_zz01_
 
 define <4 x double> @shuffle_v4f64_zz23(<4 x double> %a) {
 ; ALL-LABEL: shuffle_v4f64_zz23:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; ALL-NEXT:    retq
@@ -388,7 +388,7 @@ define <4 x double> @shuffle_v4f64_zz23(
 }
 define <4 x double> @shuffle_v4f64_zz23_optsize(<4 x double> %a) optsize {
 ; ALL-LABEL: shuffle_v4f64_zz23_optsize:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; ALL-NEXT:    retq
@@ -398,7 +398,7 @@ define <4 x double> @shuffle_v4f64_zz23_
 
 define <4 x double> @shuffle_v4f64_zz45(<4 x double> %a) {
 ; ALL-LABEL: shuffle_v4f64_zz45:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
 ; ALL-NEXT:    retq
   %s = shufflevector <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x double> %a, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -406,7 +406,7 @@ define <4 x double> @shuffle_v4f64_zz45(
 }
 define <4 x double> @shuffle_v4f64_zz45_optsize(<4 x double> %a) optsize {
 ; ALL-LABEL: shuffle_v4f64_zz45_optsize:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
 ; ALL-NEXT:    retq
   %s = shufflevector <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x double> %a, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -415,7 +415,7 @@ define <4 x double> @shuffle_v4f64_zz45_
 
 define <4 x double> @shuffle_v4f64_zz67(<4 x double> %a) {
 ; ALL-LABEL: shuffle_v4f64_zz67:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; ALL-NEXT:    retq
@@ -424,7 +424,7 @@ define <4 x double> @shuffle_v4f64_zz67(
 }
 define <4 x double> @shuffle_v4f64_zz67_optsize(<4 x double> %a) optsize {
 ; ALL-LABEL: shuffle_v4f64_zz67_optsize:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; ALL-NEXT:    retq
@@ -434,7 +434,7 @@ define <4 x double> @shuffle_v4f64_zz67_
 
 define <4 x double> @shuffle_v4f64_01zz(<4 x double> %a) {
 ; ALL-LABEL: shuffle_v4f64_01zz:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; ALL-NEXT:    retq
@@ -443,7 +443,7 @@ define <4 x double> @shuffle_v4f64_01zz(
 }
 define <4 x double> @shuffle_v4f64_01zz_optsize(<4 x double> %a) optsize {
 ; ALL-LABEL: shuffle_v4f64_01zz_optsize:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; ALL-NEXT:    retq
@@ -453,7 +453,7 @@ define <4 x double> @shuffle_v4f64_01zz_
 
 define <4 x double> @shuffle_v4f64_23zz(<4 x double> %a) {
 ; ALL-LABEL: shuffle_v4f64_23zz:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
 ; ALL-NEXT:    retq
   %s = shufflevector <4 x double> %a, <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
@@ -461,7 +461,7 @@ define <4 x double> @shuffle_v4f64_23zz(
 }
 define <4 x double> @shuffle_v4f64_23zz_optsize(<4 x double> %a) optsize {
 ; ALL-LABEL: shuffle_v4f64_23zz_optsize:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
 ; ALL-NEXT:    retq
   %s = shufflevector <4 x double> %a, <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
@@ -470,7 +470,7 @@ define <4 x double> @shuffle_v4f64_23zz_
 
 define <4 x double> @shuffle_v4f64_45zz(<4 x double> %a) {
 ; ALL-LABEL: shuffle_v4f64_45zz:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; ALL-NEXT:    retq
@@ -479,7 +479,7 @@ define <4 x double> @shuffle_v4f64_45zz(
 }
 define <4 x double> @shuffle_v4f64_45zz_optsize(<4 x double> %a) optsize {
 ; ALL-LABEL: shuffle_v4f64_45zz_optsize:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; ALL-NEXT:    retq
@@ -489,7 +489,7 @@ define <4 x double> @shuffle_v4f64_45zz_
 
 define <4 x double> @shuffle_v4f64_67zz(<4 x double> %a) {
 ; ALL-LABEL: shuffle_v4f64_67zz:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
 ; ALL-NEXT:    retq
   %s = shufflevector <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x double> %a, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
@@ -497,7 +497,7 @@ define <4 x double> @shuffle_v4f64_67zz(
 }
 define <4 x double> @shuffle_v4f64_67zz_optsize(<4 x double> %a) optsize {
 ; ALL-LABEL: shuffle_v4f64_67zz_optsize:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
 ; ALL-NEXT:    retq
   %s = shufflevector <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x double> %a, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
@@ -508,14 +508,14 @@ define <4 x double> @shuffle_v4f64_67zz_
 
 define <4 x i64> @shuffle_v4i64_67zz(<4 x i64> %a, <4 x i64> %b) {
 ; AVX1-LABEL: shuffle_v4i64_67zz:
-; AVX1:       # BB#0:
+; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
 ; AVX1-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v4i64_67zz:
-; AVX2:       # BB#0:
+; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
 ; AVX2-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
@@ -528,13 +528,13 @@ define <4 x i64> @shuffle_v4i64_67zz(<4
 
 define <4 x double> @ld0_hi0_lo1_4f64(<4 x double> * %pa, <4 x double> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: ld0_hi0_lo1_4f64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
 ; AVX1-NEXT:    vaddpd {{.*}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: ld0_hi0_lo1_4f64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
 ; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
 ; AVX2-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -548,13 +548,13 @@ entry:
 
 define <4 x double> @ld1_hi0_hi1_4f64(<4 x double> %a, <4 x double> * %pb) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: ld1_hi0_hi1_4f64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
 ; AVX1-NEXT:    vaddpd {{.*}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: ld1_hi0_hi1_4f64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
 ; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
 ; AVX2-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
@@ -568,13 +568,13 @@ entry:
 
 define <8 x float> @ld0_hi0_lo1_8f32(<8 x float> * %pa, <8 x float> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: ld0_hi0_lo1_8f32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
 ; AVX1-NEXT:    vaddps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: ld0_hi0_lo1_8f32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
 ; AVX2-NEXT:    vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
 ; AVX2-NEXT:    vaddps %ymm1, %ymm0, %ymm0
@@ -588,13 +588,13 @@ entry:
 
 define <8 x float> @ld1_hi0_hi1_8f32(<8 x float> %a, <8 x float> * %pb) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: ld1_hi0_hi1_8f32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
 ; AVX1-NEXT:    vaddps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: ld1_hi0_hi1_8f32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
 ; AVX2-NEXT:    vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
 ; AVX2-NEXT:    vaddps %ymm1, %ymm0, %ymm0
@@ -608,7 +608,7 @@ entry:
 
 define <4 x i64> @ld0_hi0_lo1_4i64(<4 x i64> * %pa, <4 x i64> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: ld0_hi0_lo1_4i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
 ; AVX1-NEXT:    vpaddq {{.*}}(%rip), %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -617,7 +617,7 @@ define <4 x i64> @ld0_hi0_lo1_4i64(<4 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: ld0_hi0_lo1_4i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
 ; AVX2-NEXT:    vpaddq {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    retq
@@ -630,7 +630,7 @@ entry:
 
 define <4 x i64> @ld1_hi0_hi1_4i64(<4 x i64> %a, <4 x i64> * %pb) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: ld1_hi0_hi1_4i64:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
 ; AVX1-NEXT:    vpaddq {{.*}}(%rip), %xmm0, %xmm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
@@ -639,7 +639,7 @@ define <4 x i64> @ld1_hi0_hi1_4i64(<4 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: ld1_hi0_hi1_4i64:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
 ; AVX2-NEXT:    vpaddq {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    retq
@@ -652,7 +652,7 @@ entry:
 
 define <8 x i32> @ld0_hi0_lo1_8i32(<8 x i32> * %pa, <8 x i32> %b) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: ld0_hi0_lo1_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,2,3,4]
@@ -662,7 +662,7 @@ define <8 x i32> @ld0_hi0_lo1_8i32(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: ld0_hi0_lo1_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
 ; AVX2-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    retq
@@ -675,7 +675,7 @@ entry:
 
 define <8 x i32> @ld1_hi0_hi1_8i32(<8 x i32> %a, <8 x i32> * %pb) nounwind uwtable readnone ssp {
 ; AVX1-LABEL: ld1_hi0_hi1_8i32:
-; AVX1:       # BB#0: # %entry
+; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,2,3,4]
@@ -685,7 +685,7 @@ define <8 x i32> @ld1_hi0_hi1_8i32(<8 x
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: ld1_hi0_hi1_8i32:
-; AVX2:       # BB#0: # %entry
+; AVX2:       # %bb.0: # %entry
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3]
 ; AVX2-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll Mon Dec  4 09:18:51 2017
@@ -15,7 +15,7 @@ declare <4 x float> @llvm.x86.avx.vextra
 
 define <4 x float> @test00(<4 x float> %a, <4 x float> %b) nounwind {
 ; ALL-LABEL: test00:
-; ALL:       # BB#0:
+; ALL:       # %bb.0:
 ; ALL-NEXT:    pushq %rax
 ; ALL-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 ; ALL-NEXT:    callq do_sse
@@ -30,7 +30,7 @@ define <4 x float> @test00(<4 x float> %
 
 define <8 x float> @test01(<4 x float> %a, <4 x float> %b, <8 x float> %c) nounwind {
 ; VZ-LABEL: test01:
-; VZ:       # BB#0:
+; VZ:       # %bb.0:
 ; VZ-NEXT:    subq $56, %rsp
 ; VZ-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
 ; VZ-NEXT:    vmovaps {{.*}}(%rip), %xmm0
@@ -44,7 +44,7 @@ define <8 x float> @test01(<4 x float> %
 ; VZ-NEXT:    retq
 ;
 ; FAST-YMM-ZMM-LABEL: test01:
-; FAST-YMM-ZMM:       # BB#0:
+; FAST-YMM-ZMM:       # %bb.0:
 ; FAST-YMM-ZMM-NEXT:    subq $56, %rsp
 ; FAST-YMM-ZMM-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
 ; FAST-YMM-ZMM-NEXT:    vmovaps {{.*}}(%rip), %xmm0
@@ -57,7 +57,7 @@ define <8 x float> @test01(<4 x float> %
 ; FAST-YMM-ZMM-NEXT:    retq
 ;
 ; BTVER2-LABEL: test01:
-; BTVER2:       # BB#0:
+; BTVER2:       # %bb.0:
 ; BTVER2-NEXT:    subq $56, %rsp
 ; BTVER2-NEXT:    vmovaps {{.*}}(%rip), %xmm0
 ; BTVER2-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
@@ -80,14 +80,14 @@ define <8 x float> @test01(<4 x float> %
 
 define <4 x float> @test02(<8 x float> %a, <8 x float> %b) nounwind {
 ; VZ-LABEL: test02:
-; VZ:       # BB#0:
+; VZ:       # %bb.0:
 ; VZ-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 ; VZ-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; VZ-NEXT:    vzeroupper
 ; VZ-NEXT:    jmp do_sse # TAILCALL
 ;
 ; NO-VZ-LABEL: test02:
-; NO-VZ:       # BB#0:
+; NO-VZ:       # %bb.0:
 ; NO-VZ-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 ; NO-VZ-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
 ; NO-VZ-NEXT:    jmp do_sse # TAILCALL
@@ -102,7 +102,7 @@ define <4 x float> @test02(<8 x float> %
 
 define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
 ; VZ-LABEL: test03:
-; VZ:       # BB#0: # %entry
+; VZ:       # %bb.0: # %entry
 ; VZ-NEXT:    pushq %rbx
 ; VZ-NEXT:    subq $16, %rsp
 ; VZ-NEXT:    vaddps %xmm1, %xmm0, %xmm0
@@ -113,7 +113,7 @@ define <4 x float> @test03(<4 x float> %
 ; VZ-NEXT:    callq foo
 ; VZ-NEXT:    testl %eax, %eax
 ; VZ-NEXT:    jne .LBB3_1
-; VZ-NEXT:  # BB#2: # %for.body.preheader
+; VZ-NEXT:  # %bb.2: # %for.body.preheader
 ; VZ-NEXT:    movl $4, %ebx
 ; VZ-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
 ; VZ-NEXT:    .p2align 4, 0x90
@@ -127,13 +127,13 @@ define <4 x float> @test03(<4 x float> %
 ; VZ-NEXT:    callq do_sse
 ; VZ-NEXT:    decl %ebx
 ; VZ-NEXT:    jne .LBB3_3
-; VZ-NEXT:  # BB#4: # %for.end
+; VZ-NEXT:  # %bb.4: # %for.end
 ; VZ-NEXT:    addq $16, %rsp
 ; VZ-NEXT:    popq %rbx
 ; VZ-NEXT:    retq
 ;
 ; FAST-YMM-ZMM-LABEL: test03:
-; FAST-YMM-ZMM:       # BB#0: # %entry
+; FAST-YMM-ZMM:       # %bb.0: # %entry
 ; FAST-YMM-ZMM-NEXT:    pushq %rbx
 ; FAST-YMM-ZMM-NEXT:    subq $16, %rsp
 ; FAST-YMM-ZMM-NEXT:    vaddps %xmm1, %xmm0, %xmm0
@@ -144,7 +144,7 @@ define <4 x float> @test03(<4 x float> %
 ; FAST-YMM-ZMM-NEXT:    callq foo
 ; FAST-YMM-ZMM-NEXT:    testl %eax, %eax
 ; FAST-YMM-ZMM-NEXT:    jne .LBB3_1
-; FAST-YMM-ZMM-NEXT:  # BB#2: # %for.body.preheader
+; FAST-YMM-ZMM-NEXT:  # %bb.2: # %for.body.preheader
 ; FAST-YMM-ZMM-NEXT:    movl $4, %ebx
 ; FAST-YMM-ZMM-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
 ; FAST-YMM-ZMM-NEXT:    .p2align 4, 0x90
@@ -157,13 +157,13 @@ define <4 x float> @test03(<4 x float> %
 ; FAST-YMM-ZMM-NEXT:    callq do_sse
 ; FAST-YMM-ZMM-NEXT:    decl %ebx
 ; FAST-YMM-ZMM-NEXT:    jne .LBB3_3
-; FAST-YMM-ZMM-NEXT:  # BB#4: # %for.end
+; FAST-YMM-ZMM-NEXT:  # %bb.4: # %for.end
 ; FAST-YMM-ZMM-NEXT:    addq $16, %rsp
 ; FAST-YMM-ZMM-NEXT:    popq %rbx
 ; FAST-YMM-ZMM-NEXT:    retq
 ;
 ; BTVER2-LABEL: test03:
-; BTVER2:       # BB#0: # %entry
+; BTVER2:       # %bb.0: # %entry
 ; BTVER2-NEXT:    pushq %rbx
 ; BTVER2-NEXT:    subq $16, %rsp
 ; BTVER2-NEXT:    vaddps %xmm1, %xmm0, %xmm0
@@ -174,7 +174,7 @@ define <4 x float> @test03(<4 x float> %
 ; BTVER2-NEXT:    callq foo
 ; BTVER2-NEXT:    testl %eax, %eax
 ; BTVER2-NEXT:    jne .LBB3_1
-; BTVER2-NEXT:  # BB#2: # %for.body.preheader
+; BTVER2-NEXT:  # %bb.2: # %for.body.preheader
 ; BTVER2-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
 ; BTVER2-NEXT:    movl $4, %ebx
 ; BTVER2-NEXT:    .p2align 4, 0x90
@@ -187,7 +187,7 @@ define <4 x float> @test03(<4 x float> %
 ; BTVER2-NEXT:    callq do_sse
 ; BTVER2-NEXT:    decl %ebx
 ; BTVER2-NEXT:    jne .LBB3_3
-; BTVER2-NEXT:  # BB#4: # %for.end
+; BTVER2-NEXT:  # %bb.4: # %for.end
 ; BTVER2-NEXT:    addq $16, %rsp
 ; BTVER2-NEXT:    popq %rbx
 ; BTVER2-NEXT:    retq
@@ -220,7 +220,7 @@ for.end:
 
 define <4 x float> @test04(<4 x float> %a, <4 x float> %b) nounwind {
 ; VZ-LABEL: test04:
-; VZ:       # BB#0:
+; VZ:       # %bb.0:
 ; VZ-NEXT:    pushq %rax
 ; VZ-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; VZ-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -231,7 +231,7 @@ define <4 x float> @test04(<4 x float> %
 ; VZ-NEXT:    retq
 ;
 ; NO-VZ-LABEL: test04:
-; NO-VZ:       # BB#0:
+; NO-VZ:       # %bb.0:
 ; NO-VZ-NEXT:    pushq %rax
 ; NO-VZ-NEXT:    # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
 ; NO-VZ-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0




More information about the llvm-commits mailing list